diff --git a/.cargo/config.toml b/.cargo/config.toml index 4796a2c26965c1021d14c6acc854bca1aea76941..bd46659f7991a95d853711672a6a4eed9222c5a1 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,4 +1,9 @@ -# +[build] +rustdocflags = [ + "-Dwarnings", + "-Arustdoc::redundant_explicit_links", # stylistic +] + # An auto defined `clippy` feature was introduced, # but it was found to clash with user defined features, # so was renamed to `cargo-clippy`. @@ -10,7 +15,7 @@ rustflags = [ "-Aclippy::all", "-Dclippy::correctness", "-Aclippy::if-same-then-else", - "-Aclippy::clone-double-ref", + "-Asuspicious_double_ref_op", "-Dclippy::complexity", "-Aclippy::zero-prefixed-literal", # 00_1000_000 "-Aclippy::type_complexity", # raison d'etre @@ -30,4 +35,10 @@ rustflags = [ "-Aclippy::derivable_impls", # false positives "-Aclippy::stable_sort_primitive", # prefer stable sort "-Aclippy::extra-unused-type-parameters", # stylistic + "-Aclippy::default_constructed_unit_structs", # stylistic ] + +[env] +# Needed for musl builds so user doesn't have to install musl-tools. +CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true } +CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true } diff --git a/.cargo/musl-g++ b/.cargo/musl-g++ new file mode 100755 index 0000000000000000000000000000000000000000..656adcc2ac1b6785b708d8828b29d93e03a77272 --- /dev/null +++ b/.cargo/musl-g++ @@ -0,0 +1,7 @@ +#!/bin/sh + +# Wrapper for building with musl. +# +# See comments for musl-gcc in this repo. + +g++ "$@" diff --git a/.cargo/musl-gcc b/.cargo/musl-gcc new file mode 100755 index 0000000000000000000000000000000000000000..5bc7852dbc6ec25f51403ae58fedc7ba094f78b7 --- /dev/null +++ b/.cargo/musl-gcc @@ -0,0 +1,13 @@ +#!/bin/sh + +# Wrapper for building with musl. +# +# musl unfortunately requires a musl-enabled C compiler (musl-gcc) to be +# installed, which can be kind of a pain to get installed depending on the +# distro. That's not a very good user experience. +# +# The real musl-gcc wrapper sets the correct system include paths for linking +# with musl libc library. Since this is not actually used to link any binaries +# it should most likely work just fine. + +gcc "$@" diff --git a/.config/lychee.toml b/.config/lychee.toml new file mode 100644 index 0000000000000000000000000000000000000000..9b2ae069931769251fc26b57686fd1dbc5ff0b83 --- /dev/null +++ b/.config/lychee.toml @@ -0,0 +1,50 @@ +# Config file for lychee link checker: +# Run with `lychee -c .config/lychee.toml ./**/*.rs ./**/*.prdoc` + +cache = true +max_cache_age = "1d" +max_redirects = 10 +max_retries = 6 + +# Exclude localhost et.al. +exclude_all_private = true + +# Treat these codes as success condition: +accept = [ + # Ok + 200, + + # Rate limited - GitHub likes to throw this. + 429 +] + +exclude_path = [ "./target" ] + +exclude = [ + # Place holders (no need to fix these): + "http://visitme/", + "https://visitme/", + + # TODO + "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", + "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", + "https://github.com/ipfs/js-ipfs-bitswap/blob/", + "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", + "https://github.com/paritytech/substrate/frame/fast-unstake", + "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", + "https://polkadot.network/the-path-of-a-parachain-block/", + "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", + "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", + "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", + "https://research.web3.foundation/en/latest/polkadot/slashing/npos.html", + "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", + "https://rpc.polkadot.io/", + "https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html", + "https://w3f.github.io/parachain-implementers-guide/node/index.html", + "https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html", + "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", +] diff --git a/.config/zepter.yaml b/.config/zepter.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33bf3a044cf8069f66fb0e6284078f0083c4e38f --- /dev/null +++ b/.config/zepter.yaml @@ -0,0 +1,43 @@ +version: + format: 1 + # Minimum version of the binary that is expected to work. This is just for printing a nice error + # message when someone tries to use an older version. + binary: 0.13.2 + +# The examples in this file assume crate `A` to have a dependency on crate `B`. +workflows: + check: + - [ + 'lint', + # Check that `A` activates the features of `B`. + 'propagate-feature', + # These are the features to check: + '--features=try-runtime,runtime-benchmarks,std', + # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. + '--left-side-feature-missing=ignore', + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + '--left-side-outside-workspace=ignore', + # Some features imply that they activate a specific dependency as non-optional. Otherwise the default behaviour with a `?` is used. + '--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking', + # Actually modify the files and not just report the issues: + '--offline', + '--locked', + '--show-path', + '--quiet', + ] + # Format the features into canonical format: + - ['format', 'features', '--offline', '--locked', '--quiet'] + # Same as `check`, but with the `--fix` flag. + default: + - [ $check.0, '--fix' ] + - [ $check.1, '--fix' ] + +# Will be displayed when any workflow fails: +help: + text: | + Polkadot-SDK uses the Zepter CLI to detect abnormalities in the feature configuration. + It looks like one more more checks failed; please check the console output. You can try to automatically address them by running `zepter`. + Otherwise please ask directly in the Merge Request, GitHub Discussions or on Matrix Chat, thank you. + links: + - "https://github.com/paritytech/polkadot-sdk/issues/1831" + - "https://github.com/ggwpez/zepter" diff --git a/.github/review-bot.yml b/.github/review-bot.yml index 581e33762608a298fdb0c251fa3414b06ea01de6..b053ead37fb758c5513d2d2f340ed132ff24e29b 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -118,3 +118,7 @@ rules: - minApprovals: 1 teams: - ci + +preventReviewRequests: + teams: + - core-devs diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml new file mode 100644 index 0000000000000000000000000000000000000000..db0175c6855bd9ccaf668309cd0704c4c8ac88c1 --- /dev/null +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -0,0 +1,71 @@ +name: Build and Attach Runtimes to Releases/RC + +on: + release: + types: + - created + +env: + PROFILE: production + +jobs: + build_and_upload: + strategy: + matrix: + runtime: + - { name: westend, package: westend-runtime, path: polkadot/runtime/westend } + - { name: rococo, package: rococo-runtime, path: polkadot/runtime/rococo } + - { name: asset-hub-rococo, package: asset-hub-rococo-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-rococo } + - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } + - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } + - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } + - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } + - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } + build_config: + # Release build has logging disabled and no dev features + - { type: on-chain-release, opts: --features on-chain-release-build } + # Debug build has logging enabled and developer features + - { type: dev-debug-build, opts: --features try-runtime } + + runs-on: ubuntu-22.04 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} + id: srtool_build + uses: chevdor/srtool-actions@v0.9.0 + env: + BUILD_OPTS: ${{ matrix.build_config.opts }} + with: + chain: ${{ matrix.runtime.name }} + package: ${{ matrix.runtime.package }} + runtime_dir: ${{ matrix.runtime.path }} + profile: ${{ env.PROFILE }} + + - name: Build Summary + run: | + echo "${{ steps.srtool_build.outputs.json }}" | jq . > ${{ matrix.runtime.name }}-srtool-digest.json + cat ${{ matrix.runtime.name }}-srtool-digest.json + echo "Runtime location: ${{ steps.srtool_build.outputs.wasm }}" + + - name: Set up paths and runtime names + id: setup + run: | + RUNTIME_BLOB_NAME=$(echo ${{ matrix.runtime.package }} | sed 's/-/_/g').compact.compressed.wasm + PREFIX=${{ matrix.build_config.type == 'dev-debug-build' && 'DEV_DEBUG_BUILD__' || '' }} + + echo "RUNTIME_BLOB_NAME=$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + echo "ASSET_PATH=./${{ matrix.runtime.path }}/target/srtool/${{ env.PROFILE }}/wbuild/${{ matrix.runtime.package }}/$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + echo "ASSET_NAME=$PREFIX$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + + - name: Upload Runtime to Release + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: ${{ env.ASSET_PATH }} + asset_name: ${{ env.ASSET_NAME }} + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index 83b52e82313257affaea41eb66da44b164544116..97562f0da09569931582864bd764e6724900d619 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -3,11 +3,15 @@ name: Check labels on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: jobs: check-labels: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image env: IMAGE: paritytech/ruled_labels:0.4.0 diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index a5c4f2577c0019b91d4ba94da46b2dc7f15e2ec4..a66e3a539984ac7e96728d9812b5cdb9b63e26d7 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -2,6 +2,7 @@ name: Check licenses on: pull_request: + merge_group: permissions: packages: read @@ -14,13 +15,13 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - - uses: actions/setup-node@v3.8.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-node@v4.0.0 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" scope: "@paritytech" - + - name: Check the licenses in Polkadot run: | shopt -s globstar diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml new file mode 100644 index 0000000000000000000000000000000000000000..0932d38c9adda4e170745deaecb0cb18bec67ed8 --- /dev/null +++ b/.github/workflows/check-links.yml @@ -0,0 +1,41 @@ +name: Check links + +on: + pull_request: + paths: + - "*.rs" + - "*.prdoc" + - ".github/workflows/check-links.yml" + - ".config/lychee.toml" + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +permissions: + packages: read + +jobs: + link-checker: + runs-on: ubuntu-latest + steps: + - name: Restore lychee cache + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 (7. Sep 2023) + with: + path: .lycheecache + key: cache-lychee-${{ github.sha }} + # This should restore from the most recent one: + restore-keys: cache-lychee- + + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) + + - name: Lychee link checker + uses: lycheeverse/lychee-action@2ac9f030ccdea0033e2510a23a67da2a2da98492 # for v1.8.0 (15. May 2023) + with: + args: >- + --config .config/lychee.toml + --no-progress + './**/*.rs' + './**/*.prdoc' + fail: true + env: + # To bypass GitHub rate-limit: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index be676d3c1e8c85afc4ee39a6427105487c6a5d53..2108f94209003c55c0965eeedb09a1336e340242 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -3,6 +3,7 @@ name: Check Markdown on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read @@ -13,9 +14,9 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/setup-node@v3.8.1 + - uses: actions/setup-node@v4.0.0 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" @@ -23,8 +24,8 @@ jobs: - name: Install tooling run: | - npm install -g markdownlint-cli - markdownlint --version + npm install -g markdownlint-cli + markdownlint --version - name: Check Markdown env: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 23ae2d1aca9dc7e9cdaa099402332dfb543e9641..54e4d2b9680e107e63559fbe578878e3ef45225b 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -3,9 +3,10 @@ name: Check PRdoc on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: env: - IMAGE: paritytech/prdoc:latest + IMAGE: paritytech/prdoc:v0.0.5 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -17,6 +18,9 @@ jobs: check-prdoc: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image run: | echo "Pulling $IMAGE" @@ -40,7 +44,7 @@ jobs: - name: Checkout repo if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 #v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0d2b889381343a10f84a23d4ebf5fe6d10b4742 --- /dev/null +++ b/.github/workflows/check-publish.yml @@ -0,0 +1,29 @@ +name: Check publish + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +jobs: + check-publish: + strategy: + matrix: + os: ["ubuntu-latest"] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish --profile dev + + - name: parity-publish check + run: parity-publish check --allow-unpublished diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml new file mode 100644 index 0000000000000000000000000000000000000000..345d24c7566748db92fd7c28b840bdac13d6b1d6 --- /dev/null +++ b/.github/workflows/claim-crates.yml @@ -0,0 +1,26 @@ +name: Claim Crates + +on: + push: + branches: + - master + +jobs: + claim-crates: + runs-on: ubuntu-latest + environment: master + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.2.0 + + - name: parity-publish claim + env: + PARITY_PUBLISH_CRATESIO_TOKEN: ${{ secrets.CRATESIO_PUBLISH_CLAIM_TOKEN }} + run: parity-publish claim diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index db93a3d53a76ceab86879ffa8740df380ddeb2d2..e4d39acabfd776b67ac37df28b0905b0517967ef 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: quick_check: @@ -14,9 +15,9 @@ jobs: os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} container: - image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 + image: paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231025 steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Cargo fmt run: cargo +nightly fmt --all -- --check diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml new file mode 100644 index 0000000000000000000000000000000000000000..59347fad6d6fc663861c3ca3ddd256239987ff55 --- /dev/null +++ b/.github/workflows/gitspiegel-trigger.yml @@ -0,0 +1,23 @@ +name: gitspiegel sync + +# This workflow doesn't do anything, it's only use is to trigger "workflow_run" +# webhook, that'll be consumed by gitspiegel +# This way, gitspiegel won't do mirroring, unless this workflow runs, +# and running the workflow is protected by GitHub + +on: + pull_request: + types: + - opened + - synchronize + - unlocked + - ready_for_review + - reopened + merge_group: + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - name: Do nothing + run: echo "let's go" diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml new file mode 100644 index 0000000000000000000000000000000000000000..f3fb7765ca67f9b56aa5afaa0ee63dacba761321 --- /dev/null +++ b/.github/workflows/merge-queue.yml @@ -0,0 +1,24 @@ +name: Merge-Queue + +on: + merge_group: + +jobs: + trigger-merge-queue-action: + runs-on: ubuntu-latest + environment: master + steps: + - name: Generate token + id: app_token + uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + with: + app_id: ${{ secrets.REVIEW_APP_ID }} + private_key: ${{ secrets.REVIEW_APP_KEY }} + - name: Add Merge Queue status check + uses: billyjbryant/create-status-check@3e6fa0ac599d10d9588cf9516ca4330ef669b858 # v2 + with: + authToken: ${{ steps.app_token.outputs.token }} + context: 'review-bot' + description: 'PRs for merge queue gets approved' + state: 'success' + sha: ${{ github.event.merge_group.head_commit.id }} diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml index b15d20c696fe83fd5c19cf834328458c8a5c6c3c..4e0809cbfdc3743e0b01f0e023ad74a88a28fed2 100644 --- a/.github/workflows/pr-custom-review.yml +++ b/.github/workflows/pr-custom-review.yml @@ -14,11 +14,15 @@ on: - ready_for_review - converted_to_draft pull_request_review: + merge_group: jobs: pr-custom-review: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Skip if pull request is in Draft # `if: github.event.pull_request.draft == true` should be kept here, at # the step level, rather than at the job level. The latter is not diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 15631c172b9fe1fe2027d661149615435a3b975d..567e996b8fd94a61f92e457435a05c57469179f2 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -79,7 +79,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 #TODO: this step will be needed when automated triggering will work #this step runs only if the workflow is triggered automatically when new release is published @@ -114,10 +114,11 @@ jobs: if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest needs: fetch-artifacts + environment: release steps: - name: Checkout sources - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Get artifacts from cache uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 @@ -237,9 +238,10 @@ jobs: if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} runs-on: ubuntu-latest needs: fetch-latest-debian-package-version + environment: release steps: - name: Checkout sources - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 @@ -266,7 +268,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index b9799935abe678dea2600298cf5bbe5309f08112..5970989cde09374743bde8ac8890eae192527cec 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -1,31 +1,32 @@ -name: Review PR +name: Review Bot on: - pull_request_target: + workflow_run: + workflows: + - Review-Trigger types: - - opened - - reopened - - synchronize - - review_requested - - review_request_removed - - ready_for_review - pull_request_review: - -permissions: - contents: read + - completed jobs: review-approvals: runs-on: ubuntu-latest + environment: master steps: + - name: Extract content of artifact + id: number + uses: Bullrich/extract-text-from-artifact@v1.0.0 + with: + artifact-name: pr_number - name: Generate token - id: team_token + id: app_token uses: tibdex/github-app-token@v1 with: app_id: ${{ secrets.REVIEW_APP_ID }} private_key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" - uses: paritytech/review-bot@v2.0.1 + uses: paritytech/review-bot@v2.2.0 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - team-token: ${{ steps.team_token.outputs.token }} - checks-token: ${{ steps.team_token.outputs.token }} + repo-token: ${{ steps.app_token.outputs.token }} + team-token: ${{ steps.app_token.outputs.token }} + checks-token: ${{ steps.app_token.outputs.token }} + pr-number: ${{ steps.number.outputs.content }} + request-reviewers: true diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml new file mode 100644 index 0000000000000000000000000000000000000000..e5fcb434fd360bd229cbc9e18a5588c24afac2fb --- /dev/null +++ b/.github/workflows/review-trigger.yml @@ -0,0 +1,37 @@ +name: Review-Trigger + +on: + pull_request_target: + types: + - opened + - reopened + - synchronize + - review_requested + - review_request_removed + - ready_for_review + pull_request_review: + merge_group: + +jobs: + trigger-review-bot: + # (It is not a draft) && (it is not a review || it is an approving review) + if: ${{ github.event.pull_request.draft != true && (github.event_name != 'pull_request_review' || (github.event.review && github.event.review.state == 'APPROVED')) }} + runs-on: ubuntu-latest + name: trigger review bot + steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 + - name: Get PR number + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + echo "Saving PR number: $PR_NUMBER" + mkdir -p ./pr + echo $PR_NUMBER > ./pr/pr_number + - uses: actions/upload-artifact@v3 + name: Save PR number + with: + name: pr_number + path: pr/ + retention-days: 5 diff --git a/.gitignore b/.gitignore index 35e02e706b4261dcac2a3f9c6686245a01de9a8a..581c417cb85408bee2e13bedefe5aa1edf0d3d33 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ .env* .idea .local +.lycheecache .vscode .wasm-binaries *.adoc @@ -16,7 +17,6 @@ **/._* **/.criterion/ **/*.rs.bk -**/chains/ **/hfuzz_target/ **/hfuzz_workspace/ **/node_modules diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 61451a9c462011369bc7ade4b593347f4a92f9f0..f7e54961fa987a1ba7cb130ec5fee5d0271fc76e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,6 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.69" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: @@ -136,11 +135,13 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-pr-refs: rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs @@ -152,6 +153,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-refs-no-trigger: @@ -162,6 +164,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ @@ -172,6 +175,7 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .publish-refs: rules: @@ -192,9 +196,7 @@ default: - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - -.zombienet-refs: - extends: .build-refs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues include: # check jobs diff --git a/.gitlab/check-each-crate.py b/.gitlab/check-each-crate.py index adad4f5bd583543b4610cd010a9cefbd04bc5401..da2eaad36c522e5ebfdc0d43e78c38507807e1a6 100755 --- a/.gitlab/check-each-crate.py +++ b/.gitlab/check-each-crate.py @@ -19,7 +19,11 @@ output = subprocess.check_output(["cargo", "tree", "--locked", "--workspace", "- crates = [] for line in output.splitlines(): if line != b"": - crates.append(line.decode('utf8').split(" ")[0]) + line = line.decode('utf8').split(" ") + crate_name = line[0] + # The crate path is always the last element in the line. + crate_path = line[len(line) - 1].replace("(", "").replace(")", "") + crates.append((crate_name, crate_path)) # Make the list unique and sorted crates = list(set(crates)) @@ -49,9 +53,9 @@ print(f"Crates per group: {crates_per_group}", file=sys.stderr) for i in range(0, crates_per_group + overflow_crates): crate = crates_per_group * target_group + i - print(f"Checking {crates[crate]}", file=sys.stderr) + print(f"Checking {crates[crate][0]}", file=sys.stderr) - res = subprocess.run(["cargo", "check", "--locked", "-p", crates[crate]]) + res = subprocess.run(["cargo", "check", "--locked"], cwd = crates[crate][1]) if res.returncode != 0: sys.exit(1) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 029c0f6a3cddd168edd83e645022f29155f9136a..5c13045706c40e2a6049e67e3a5bb2a1b140fddb 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,6 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 + RUSTDOCFLAGS: "" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -99,7 +100,6 @@ build-rustdoc: - ./crate-docs/ script: # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features` - # FIXME: return to stable when https://github.com/rust-lang/rust/issues/96937 gets into stable - time cargo doc --features try-runtime,experimental --workspace --no-deps - rm -f ./target/doc/.lock - mv ./target/doc ./crate-docs @@ -305,7 +305,7 @@ build-linux-substrate: # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" script: - - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release -p node-cli + - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release -p staging-node-cli - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate - echo -n "Substrate version = " - if [ "${CI_COMMIT_TAG}" ]; then diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 4f92e6c15d2b07d93a3e16fabfc92369e0ee5e99..429491fb1742800487fd251fdaf3a58ba3c63a9e 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -21,17 +21,6 @@ check-try-runtime: # experimental code may rely on try-runtime and vice-versa - time cargo check --locked --all --features try-runtime,experimental -cargo-fmt-manifest: - stage: check - extends: - - .docker-env - - .common-refs - script: - - cargo install zepter --locked --version 0.11.0 -q -f --no-default-features && zepter --version - - echo "👉 Hello developer! If you see this CI check failing then it means that one of the your changes in a Cargo.toml file introduced ill-formatted or unsorted features. Please take a look at 'docs/STYLE_GUIDE.md#manifest-formatting' to find out more." - - zepter format features --check - allow_failure: true # Experimental - # FIXME .cargo-deny-licenses: stage: check @@ -92,17 +81,14 @@ job-starter: script: - echo ok -test-rust-feature-propagation: +check-rust-feature-propagation: stage: check extends: - .kubernetes-env - - .test-pr-refs + - .common-refs script: - - cargo install --locked --version 0.11.1 -q -f zepter && zepter --version - - echo "👉 Hello developer! If you see this CI check failing then it means that one of the crates is missing a feature for one of its dependencies. The output below tells you which feature needs to be added for which dependency to which crate. You can do this by modifying the Cargo.toml file. For more context see the MR where this check was introduced https://github.com/paritytech/substrate/pull/14660" - - zepter lint propagate-feature --feature try-runtime --left-side-feature-missing=ignore --workspace --feature-enables-dep="try-runtime:frame-try-runtime" --locked - - zepter lint propagate-feature --feature runtime-benchmarks --left-side-feature-missing=ignore --workspace --feature-enables-dep="runtime-benchmarks:frame-benchmarking" --locked - - zepter lint propagate-feature --feature std --left-side-feature-missing=ignore --workspace --locked + - cargo install --locked --version 0.13.3 -q -f zepter && zepter --version + - zepter run check # More info can be found here: https://github.com/paritytech/polkadot/pull/5865 .check-runtime-migration: @@ -113,12 +99,16 @@ test-rust-feature-propagation: script: - | export RUST_LOG=remote-ext=debug,runtime=debug - echo "---------- Installing try-runtime-cli ----------" - time cargo install --locked --git https://github.com/paritytech/try-runtime-cli --tag v0.3.0 + + echo "---------- Downloading try-runtime CLI ----------" + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.3.3/try-runtime-x86_64-unknown-linux-musl -o try-runtime + chmod +x ./try-runtime + echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime - echo "---------- Executing `on-runtime-upgrade` for ${NETWORK} ----------" - time try-runtime \ + + echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" + time ./try-runtime \ --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ on-runtime-upgrade --checks=pre-and-post ${EXTRA_ARGS} live --uri ${URI} @@ -161,6 +151,31 @@ check-runtime-migration-asset-hub-westend: PACKAGE: "asset-hub-westend-runtime" WASM: "asset_hub_westend_runtime.compact.compressed.wasm" URI: "wss://westend-asset-hub-rpc.polkadot.io:443" + +check-runtime-migration-asset-hub-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "asset-hub-rococo" + PACKAGE: "asset-hub-rococo-runtime" + WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" + URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" + +# Check runtime migrations for Parity managed bridge hub chains +check-runtime-migration-bridge-hub-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "bridge-hub-westend" + PACKAGE: "bridge-hub-westend-runtime" + WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" check-runtime-migration-bridge-hub-rococo: stage: check @@ -187,6 +202,19 @@ check-runtime-migration-contracts-rococo: WASM: "contracts_rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-contracts-rpc.polkadot.io:443" +# Check runtime migrations for Parity managed collectives chains +check-runtime-migration-collectives-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "collectives-westend" + PACKAGE: "collectives-westend-runtime" + WASM: "collectives_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-collectives-rpc.polkadot.io:443" + find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index a03d407c040904a6a1a4fd7095052a039d0019e0..92ebc9eea1faad8a6ce87b1bb322431de1126aa4 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -63,16 +63,16 @@ publish-rustdoc: after_script: - rm -rf .git/ ./* -# cumulus - +# note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE variables: DOCKERFILE: "" # docker/path-to.Dockerfile IMAGE_NAME: "" # docker.io/paritypr/image_name script: - # - test "$PARITYPR_USER" -a "$PARITYPR_PASS" || - # ( echo "no docker credentials provided"; exit 1 ) + # Dockertag should differ in a merge queue + # TODO: test this + # - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" @@ -112,59 +112,6 @@ build-push-image-test-parachain: variables: DOCKERFILE: "docker/dockerfiles/test-parachain_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/test-parachain" -# publish-s3: -# stage: publish -# extends: -# - .kubernetes-env -# - .publish-refs -# image: paritytech/awscli:latest -# needs: -# - job: build-linux-stable-cumulus -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/${ARCH}-${DOCKER_OS}" -# script: -# - echo "___Publishing a binary with debug assertions!___" -# - echo "___VERSION = $(cat ./artifacts/VERSION) ___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ -# - echo "___Updating objects in latest path___" -# - aws s3 sync s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ s3://${BUCKET}/${PREFIX}/latest/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ -# --recursive --human-readable --summarize - -# publish-benchmarks-assets-s3: &publish-benchmarks -# stage: publish -# extends: -# - .kubernetes-env -# - .benchmarks-refs -# image: paritytech/awscli:latest -# needs: -# - job: benchmarks-assets -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-assets" -# script: -# - echo "___Publishing benchmark results___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/ --recursive --human-readable --summarize - -# publish-benchmarks-collectives-s3: -# <<: *publish-benchmarks -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-collectives" -# needs: -# - job: benchmarks-collectives -# artifacts: true - -### Polkadot build-push-image-polkadot-debug: stage: publish @@ -217,143 +164,3 @@ build-push-image-substrate-pr: variables: DOCKERFILE: "docker/dockerfiles/substrate_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/substrate" -# old way - -# .build-push-image-polkadot: -# before_script: -# # - test -s ./artifacts/VERSION || exit 1 -# # - test -s ./artifacts/EXTRATAG || exit 1 -# - VERSION="$(cat ./artifacts/VERSION)" -# - EXTRATAG="$(cat ./artifacts/EXTRATAG)" -# - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" -# script: -# # - test "$DOCKER_USER" -a "$DOCKER_PASS" || -# # ( echo "no docker credentials provided"; exit 1 ) -# - cd ./artifacts -# - $BUILDAH_COMMAND build -# --format=docker -# --build-arg VCS_REF="${CI_COMMIT_SHA}" -# --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" -# --build-arg IMAGE_NAME="${IMAGE_NAME}" -# --tag "$IMAGE_NAME:$VERSION" -# --tag "$IMAGE_NAME:$EXTRATAG" -# --file ${DOCKERFILE} . -# # The job will success only on the protected branch -# # - echo "$DOCKER_PASS" | -# # buildah login --username "$DOCKER_USER" --password-stdin docker.io -# # - $BUILDAH_COMMAND info -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" -# after_script: -# - buildah logout --all - -# publish-polkadot-debug-image: -# stage: publish -# image: ${BUILDAH_IMAGE} -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# rules: -# - if: $CI_PIPELINE_SOURCE == "web" -# - if: $CI_PIPELINE_SOURCE == "schedule" -# - if: $CI_COMMIT_REF_NAME == "master" -# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs -# - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -# variables: -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile -# DOCKERFILE: polkadot_injected_debug.Dockerfile -# IMAGE_NAME: docker.io/paritypr/polkadot-debug -# needs: -# - job: build-linux-stable -# artifacts: true -# after_script: -# # pass artifacts to the zombienet-tests job -# # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance -# - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env -# - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/parachains.env -# expire_in: 1 days - -# publish-test-collators-image: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/collator_injected.Dockerfile -# DOCKERFILE: collator_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/colander -# needs: -# - job: build-test-collators -# artifacts: true -# after_script: -# - buildah logout --all -# # pass artifacts to the zombienet-tests job -# - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env -# - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/collator.env - -# publish-malus-image: -# # service image for Simnet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/malus_injected.Dockerfile -# DOCKERFILE: malus_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/malus -# needs: -# - job: build-malus -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" -# # pass artifacts to the zombienet-tests job -# - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env -# - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/malus.env - -# substrate - -# publish-substrate-image-pr: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# DOCKERFILE: substrate_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/substrate -# needs: -# - job: build-linux-substrate -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 5bfe4b729e6204a9899e6b1abd0c70d5a1f400f4..0218d3fdac062593d527514e40c104e6baeaa232 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -59,6 +59,11 @@ short-benchmark-asset-hub-kusama: variables: RUNTIME_CHAIN: asset-hub-kusama-dev +short-benchmark-asset-hub-rococo: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: asset-hub-rococo-dev + short-benchmark-asset-hub-westend: <<: *short-bench-cumulus variables: @@ -79,12 +84,27 @@ short-benchmark-bridge-hub-rococo: variables: RUNTIME_CHAIN: bridge-hub-rococo-dev +short-benchmark-bridge-hub-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: bridge-hub-westend-dev + short-benchmark-collectives-polkadot: <<: *short-bench-cumulus variables: RUNTIME_CHAIN: collectives-polkadot-dev +short-benchmark-collectives-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: collectives-westend-dev + short-benchmark-glutton-kusama: <<: *short-bench-cumulus variables: RUNTIME_CHAIN: glutton-kusama-dev-1300 + +short-benchmark-glutton-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: glutton-westend-dev-1300 diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index e1e8b96bca5d6b0071e433c27bc5b147924dc8a1..79ef9070dba9e7a078940feede3b8a33f645f24e 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -29,7 +29,7 @@ test-linux-stable: --locked \ --release \ --no-fail-fast \ - --features try-runtime,experimental \ + --features try-runtime,experimental,ci-only-tests \ --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} # Upload tests results to Elasticsearch - echo "Upload test results to Elasticsearch" @@ -181,10 +181,8 @@ test-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "-Dwarnings" script: - time cargo doc --workspace --all-features --no-deps - allow_failure: true cargo-check-all-benches: stage: test @@ -354,7 +352,7 @@ quick-benchmarks: WASM_BUILD_NO_COLOR: 1 WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" script: - - time cargo run --locked --release -p node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 test-frame-examples-compile-to-wasm: # into one job @@ -398,7 +396,7 @@ test-linux-stable-int: script: - WASM_BUILD_NO_COLOR=1 RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace - time cargo test -p node-cli --release --locked -- --ignored + time cargo test -p staging-node-cli --release --locked -- --ignored # more information about this job can be found here: # https://github.com/paritytech/substrate/pull/6916 @@ -505,3 +503,23 @@ cargo-hfuzz: - cargo hfuzz build - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done + +# cf https://github.com/paritytech/polkadot-sdk/issues/1652 +test-syscalls: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + SKIP_WASM_BUILD: 1 + script: + - cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker + - cd polkadot/scripts/list-syscalls + - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - + - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - + after_script: + - if [[ "$CI_JOB_STATUS" == "failed" ]]; then + printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; + fi + allow_failure: true # TODO: remove this once we have an idea how often the syscall lists will change diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 64210d6a00ab5b83b6b018ca3e0c27d827f74de1..fc3fd1256194623eeba26d516a7f9fd84970a215 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,3 +1,8 @@ +.zombienet-refs: + extends: .build-refs + variables: + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.82" + include: # substrate tests - .gitlab/pipeline/zombienet/substrate.yml diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3f2c6f64fbfe120ae15bf394982d6dbdd29405b0..3cac67c2966e11d8e41300d8983d8cd96acfe67e 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -3,6 +3,8 @@ .zombienet-before-script: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 0402c194134b78256aaee00999e87c7a18397bf0..995dd9825320a6108f6320c74dc61eaf8f192a47 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-polkadot-common: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -12,12 +14,12 @@ - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} - IMAGE_AVAILABLE=$(curl -o /dev/null -w "%{http_code}" -I -L -s https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/${BUILD_RELEASE_VERSION}) - if [ $IMAGE_AVAILABLE -eq 200 ]; then - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; else - echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; - VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); - VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; + echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; + VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); + VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; fi - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" @@ -105,10 +107,20 @@ zombienet-polkadot-functional-0005-parachains-disputes-past-session: --local-dir="${LOCAL_DIR}/functional" --test="0005-parachains-disputes-past-session.zndsl" +zombienet-polkadot-functional-0006-parachains-max-tranche0: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0006-parachains-max-tranche0.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -126,6 +138,8 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -168,6 +182,8 @@ zombienet-polkadot-misc-0002-upgrade-node: - job: build-linux-stable artifacts: true before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9fb2f161ad73367a86279a6e7268f9116eae8f54..b687576267de5b40bab9fb1f544bb0afbb1959a0 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-substrate-common: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" diff --git a/Cargo.lock b/Cargo.lock index cc80e2542fb063181700aee41685156559db3733..bfe24e528ebeb870fc75c79cf24150cfc084edcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -317,33 +317,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] -name = "ark-algebra-test-templates" -version = "0.4.2" +name = "ark-bls12-377" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" +checksum = "fb00293ba84f51ce3bd026bd0de55899c4e68f0a39a5728cebae3a73ffdc0a4f" dependencies = [ "ark-ec", "ark-ff", - "ark-serialize", "ark-std", - "hex", - "num-bigint", - "num-integer", - "num-traits", - "serde", - "serde_derive", - "serde_json", - "sha2 0.10.7", ] [[package]] -name = "ark-bls12-377" -version = "0.4.0" +name = "ark-bls12-377-ext" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb00293ba84f51ce3bd026bd0de55899c4e68f0a39a5728cebae3a73ffdc0a4f" +checksum = "20c7021f180a0cbea0380eba97c2af3c57074cdaffe0eef7e840e1c9f2841e55" dependencies = [ + "ark-bls12-377", "ark-ec", - "ark-ff", + "ark-models-ext", "ark-std", ] @@ -359,6 +351,20 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bls12-381-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1dc4b3d08f19e8ec06e949712f95b8361e43f1391d94f65e4234df03480631c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-models-ext", + "ark-serialize", + "ark-std", +] + [[package]] name = "ark-bw6-761" version = "0.4.0" @@ -371,6 +377,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bw6-761-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccee5fba47266f460067588ee1bf070a9c760bf2050c1c509982c5719aadb4f2" +dependencies = [ + "ark-bw6-761", + "ark-ec", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ec" version = "0.4.2" @@ -385,6 +404,7 @@ dependencies = [ "hashbrown 0.13.2", "itertools 0.10.5", "num-traits", + "rayon", "zeroize", ] @@ -400,6 +420,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-ed-on-bls12-377-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524a4fb7540df2e1a8c2e67a83ba1d1e6c3947f4f9342cc2359fc2e789ad731d" +dependencies = [ + "ark-ec", + "ark-ed-on-bls12-377", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ed-on-bls12-381-bandersnatch" version = "0.4.0" @@ -412,6 +445,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15185f1acb49a07ff8cbe5f11a1adc5a93b19e211e325d826ae98e98e124346" +dependencies = [ + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.4.2" @@ -456,64 +502,49 @@ dependencies = [ ] [[package]] -name = "ark-poly" -version = "0.4.2" +name = "ark-models-ext" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +checksum = "3e9eab5d4b5ff2f228b763d38442adc9b084b0a465409b059fac5c2308835ec2" dependencies = [ + "ark-ec", "ark-ff", "ark-serialize", "ark-std", "derivative", - "hashbrown 0.13.2", ] [[package]] -name = "ark-r1cs-std" -version = "0.4.0" +name = "ark-poly" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de1d1472e5cb020cb3405ce2567c91c8d43f21b674aef37b0202f5c3304761db" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" dependencies = [ - "ark-ec", "ark-ff", - "ark-relations", + "ark-serialize", "ark-std", "derivative", - "num-bigint", - "num-integer", - "num-traits", - "tracing", -] - -[[package]] -name = "ark-relations" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" -dependencies = [ - "ark-ff", - "ark-std", - "tracing", - "tracing-subscriber", + "hashbrown 0.13.2", ] [[package]] name = "ark-scale" -version = "0.0.10" +version = "0.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b08346a3e38e2be792ef53ee168623c9244d968ff00cd70fb9932f6fe36393" +checksum = "51bd73bb6ddb72630987d37fa963e99196896c0d0ea81b7c894567e74a2f83af" dependencies = [ "ark-ec", "ark-ff", "ark-serialize", "ark-std", "parity-scale-codec", + "scale-info", ] [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=f4fe253#f4fe2534ccc6d916cd10d9c16891e673728ec8b4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -556,12 +587,13 @@ checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand 0.8.5", + "rayon", ] [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=f4fe253#f4fe2534ccc6d916cd10d9c16891e673728ec8b4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ff", "ark-serialize", @@ -623,7 +655,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -639,7 +671,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -728,6 +760,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", @@ -742,7 +775,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -760,11 +792,12 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", "sp-weights", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -800,6 +833,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-nfts", "pallet-nfts-runtime-api", @@ -812,7 +846,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -829,45 +862,181 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", "sp-weights", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", ] +[[package]] +name = "asset-hub-rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "rococo-emulated-chain", + "serde_json", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "asset-hub-rococo-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-hub-rococo-runtime", + "asset-test-utils", + "emulated-integration-tests-common", + "frame-support", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "penpal-runtime", + "rococo-runtime", + "rococo-system-emulated-network", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "asset-hub-rococo-runtime" +version = "0.9.420" +dependencies = [ + "asset-test-utils", + "assets-common", + "bp-asset-hub-rococo", + "bp-asset-hub-westend", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-asset-conversion", + "pallet-asset-conversion-tx-payment", + "pallet-assets", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-proxy", + "pallet-session", + "pallet-state-trie-migration", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-uniques", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "primitive-types", + "rococo-runtime-constants", + "scale-info", + "smallvec", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "sp-weights", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + +[[package]] +name = "asset-hub-westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", + "westend-emulated-chain", +] + [[package]] name = "asset-hub-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", "asset-hub-westend-runtime", + "asset-test-utils", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", + "emulated-integration-tests-common", "frame-support", "frame-system", - "integration-tests-common", "pallet-asset-conversion", "pallet-asset-rate", "pallet-assets", "pallet-balances", + "pallet-message-queue", "pallet-treasury", "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", "polkadot-runtime-common", - "polkadot-runtime-parachains", "sp-runtime", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", - "xcm-emulator", + "westend-runtime", + "westend-runtime-constants", + "westend-system-emulated-network", ] [[package]] @@ -876,6 +1045,10 @@ version = "0.9.420" dependencies = [ "asset-test-utils", "assets-common", + "bp-asset-hub-rococo", + "bp-asset-hub-westend", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -900,6 +1073,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", @@ -913,7 +1087,7 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", + "pallet-xcm-bridge-hub-router", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -928,14 +1102,14 @@ dependencies = [ "sp-core", "sp-genesis-builder", "sp-inherents", - "sp-io", "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -948,7 +1122,6 @@ name = "asset-test-utils" version = "1.0.0" dependencies = [ "assets-common", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", @@ -962,7 +1135,7 @@ dependencies = [ "pallet-collator-selection", "pallet-session", "pallet-xcm", - "parachain-info", + "pallet-xcm-bridge-hub-router", "parachains-common", "parachains-runtimes-test-utils", "parity-scale-codec", @@ -971,8 +1144,10 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", + "staging-parachain-info", "staging-xcm", + "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", ] @@ -993,7 +1168,7 @@ dependencies = [ "scale-info", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1096,17 +1271,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "async-recursion" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.38", -] - [[package]] name = "async-stream" version = "0.3.5" @@ -1115,7 +1279,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", ] [[package]] @@ -1156,7 +1320,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", ] [[package]] @@ -1205,8 +1369,8 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" -version = "0.0.1" -source = "git+https://github.com/w3f/ring-vrf?rev=f4fe253#f4fe2534ccc6d916cd10d9c16891e673728ec8b4" +version = "0.0.4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-bls12-381", "ark-ec", @@ -1221,6 +1385,8 @@ dependencies = [ "rand_core 0.6.4", "ring 0.1.0", "sha2 0.10.7", + "sp-ark-bls12-381", + "sp-ark-ed-on-bls12-381-bandersnatch", "zeroize", ] @@ -1327,6 +1493,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "unicode-normalization", ] [[package]] @@ -1355,6 +1525,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -1414,16 +1585,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq 0.3.0", - "digest 0.10.7", ] [[package]] @@ -1518,7 +1688,7 @@ dependencies = [ ] [[package]] -name = "bp-asset-hub-kusama" +name = "bp-asset-hub-rococo" version = "0.1.0" dependencies = [ "bp-xcm-bridge-hub-router", @@ -1528,14 +1698,13 @@ dependencies = [ ] [[package]] -name = "bp-asset-hub-polkadot" +name = "bp-asset-hub-westend" version = "0.1.0" dependencies = [ "bp-xcm-bridge-hub-router", "frame-support", "parity-scale-codec", "scale-info", - "sp-runtime", ] [[package]] @@ -1549,7 +1718,7 @@ dependencies = [ "frame-system", "polkadot-primitives", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1562,7 +1731,7 @@ dependencies = [ "frame-support", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1575,7 +1744,7 @@ dependencies = [ "frame-support", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1588,11 +1757,11 @@ dependencies = [ "frame-support", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] -name = "bp-bridge-hub-wococo" +name = "bp-bridge-hub-westend" version = "0.1.0" dependencies = [ "bp-bridge-hub-cumulus", @@ -1601,7 +1770,7 @@ dependencies = [ "frame-support", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1620,7 +1789,7 @@ dependencies = [ "sp-consensus-grandpa", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1632,7 +1801,7 @@ dependencies = [ "bp-runtime", "frame-support", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1648,7 +1817,7 @@ dependencies = [ "scale-info", "serde", "sp-core", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1664,7 +1833,7 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1676,7 +1845,7 @@ dependencies = [ "bp-runtime", "frame-support", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1693,7 +1862,7 @@ dependencies = [ "scale-info", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1711,7 +1880,7 @@ dependencies = [ "serde", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1726,7 +1895,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1738,7 +1907,7 @@ dependencies = [ "bp-runtime", "frame-support", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1759,7 +1928,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", "sp-trie", "trie-db", ] @@ -1779,21 +1948,20 @@ dependencies = [ "sp-consensus-grandpa", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] [[package]] -name = "bp-wococo" +name = "bp-westend" version = "0.1.0" dependencies = [ "bp-header-chain", "bp-polkadot-core", - "bp-rococo", "bp-runtime", "frame-support", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -1832,6 +2000,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-session", "pallet-timestamp", @@ -1840,7 +2009,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -1859,10 +2027,11 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1895,6 +2064,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-session", "pallet-timestamp", @@ -1903,7 +2073,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -1922,42 +2091,62 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", ] +[[package]] +name = "bridge-hub-rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "bridge-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ + "asset-test-utils", "bp-messages", + "bridge-hub-rococo-runtime", "cumulus-pallet-dmp-queue", "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", "frame-support", - "integration-tests-common", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", + "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", + "rococo-westend-system-emulated-network", "staging-xcm", - "xcm-emulator", + "staging-xcm-executor", ] [[package]] name = "bridge-hub-rococo-runtime" version = "0.1.0" dependencies = [ + "bp-asset-hub-rococo", + "bp-asset-hub-westend", "bp-bridge-hub-rococo", - "bp-bridge-hub-wococo", + "bp-bridge-hub-westend", "bp-header-chain", "bp-messages", "bp-parachains", @@ -1965,7 +2154,7 @@ dependencies = [ "bp-relayers", "bp-rococo", "bp-runtime", - "bp-wococo", + "bp-westend", "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", @@ -1993,6 +2182,7 @@ dependencies = [ "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-collator-selection", + "pallet-message-queue", "pallet-multisig", "pallet-session", "pallet-timestamp", @@ -2001,7 +2191,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2022,10 +2211,11 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2037,10 +2227,7 @@ dependencies = [ name = "bridge-hub-test-utils" version = "0.1.0" dependencies = [ - "assert_matches", "asset-test-utils", - "bp-bridge-hub-rococo", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -2049,7 +2236,6 @@ dependencies = [ "bp-runtime", "bp-test-utils", "bridge-runtime-common", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "frame-benchmarking", @@ -2067,7 +2253,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parachains-runtimes-test-utils", "parity-scale-codec", @@ -2075,9 +2260,131 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", + "sp-tracing 10.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "bridge-hub-westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "bridge-hub-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "bridge-hub-westend-integration-tests" +version = "1.0.0" +dependencies = [ + "asset-test-utils", + "bp-messages", + "bridge-hub-westend-runtime", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", + "frame-support", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "rococo-westend-system-emulated-network", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "bridge-hub-westend-runtime" +version = "0.1.0" +dependencies = [ + "bp-asset-hub-westend", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", + "bp-rococo", + "bp-runtime", + "bp-westend", + "bridge-hub-test-utils", + "bridge-runtime-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "serde", + "smallvec", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", + "static_assertions", + "substrate-wasm-builder", + "westend-runtime-constants", ] [[package]] @@ -2109,7 +2416,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", "staging-xcm", "staging-xcm-builder", @@ -2343,30 +2650,17 @@ dependencies = [ "zeroize", ] -[[package]] -name = "chain-spec-builder" -version = "2.0.0" -dependencies = [ - "ansi_term", - "clap 4.4.6", - "node-cli", - "rand 0.8.5", - "sc-chain-spec", - "sc-keystore", - "sp-core", - "sp-keystore", -] - [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "f56b4c72906975ca04becb8a30e102dfecddd0c06181e3e95ddc444be28881f8" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", + "time 0.1.45", "wasm-bindgen", "windows-targets 0.48.5", ] @@ -2496,6 +2790,7 @@ dependencies = [ "anstyle", "clap_lex 0.5.1", "strsim", + "terminal_size", ] [[package]] @@ -2570,7 +2865,79 @@ dependencies = [ ] [[package]] -name = "collectives-polkadot-runtime" +name = "collectives-polkadot-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-alliance", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-core-fellowship", + "pallet-message-queue", + "pallet-multisig", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-referenda", + "pallet-salary", + "pallet-scheduler", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "smallvec", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + +[[package]] +name = "collectives-westend-runtime" version = "1.0.0" dependencies = [ "cumulus-pallet-aura-ext", @@ -2598,6 +2965,7 @@ dependencies = [ "pallet-collective", "pallet-collective-content", "pallet-core-fellowship", + "pallet-message-queue", "pallet-multisig", "pallet-preimage", "pallet-proxy", @@ -2611,7 +2979,6 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2630,14 +2997,17 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", + "testnets-common", + "westend-runtime-constants", ] [[package]] @@ -2694,7 +3064,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof?rev=8657210#86572101f4210647984ab4efedba6b3fcc890895" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -2814,6 +3184,7 @@ dependencies = [ "pallet-contracts", "pallet-contracts-primitives", "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", "pallet-multisig", "pallet-session", "pallet-sudo", @@ -2822,12 +3193,12 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-runtime-common", + "rococo-runtime-constants", "scale-info", "smallvec", "sp-api", @@ -2839,10 +3210,11 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -3288,7 +3660,7 @@ dependencies = [ "sp-maybe-compressed-blob", "sp-runtime", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "tracing", ] @@ -3359,7 +3731,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-timestamp", - "sp-tracing", + "sp-tracing 10.0.0", "sp-trie", "substrate-prometheus-endpoint", "tracing", @@ -3510,7 +3882,7 @@ dependencies = [ "sp-application-crypto", "sp-consensus-aura", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -3518,6 +3890,7 @@ name = "cumulus-pallet-dmp-queue" version = "0.1.0" dependencies = [ "cumulus-primitives-core", + "frame-benchmarking", "frame-support", "frame-system", "log", @@ -3526,8 +3899,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-version", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "staging-xcm", ] @@ -3543,25 +3916,30 @@ dependencies = [ "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", + "frame-benchmarking", "frame-support", "frame-system", + "futures", "hex-literal", "impl-trait-for-tuples", "lazy_static", "log", + "pallet-message-queue", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-runtime-parachains", + "rand 0.8.5", "sc-client-api", "scale-info", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-inherents", "sp-io", "sp-keyring", "sp-runtime", "sp-state-machine", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-trie", "sp-version", "staging-xcm", @@ -3588,7 +3966,7 @@ dependencies = [ "pallet-session", "parity-scale-codec", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -3603,7 +3981,7 @@ dependencies = [ "polkadot-primitives", "scale-info", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -3617,7 +3995,7 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", ] @@ -3625,6 +4003,8 @@ dependencies = [ name = "cumulus-pallet-xcmp-queue" version = "0.1.0" dependencies = [ + "bounded-collections", + "bp-xcm-bridge-hub-router", "cumulus-pallet-parachain-system", "cumulus-primitives-core", "frame-benchmarking", @@ -3632,14 +4012,15 @@ dependencies = [ "frame-system", "log", "pallet-balances", + "pallet-message-queue", "parity-scale-codec", "polkadot-runtime-common", - "rand_chacha 0.3.1", + "polkadot-runtime-parachains", "scale-info", "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -3656,7 +4037,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", ] @@ -3670,7 +4051,7 @@ dependencies = [ "sp-api", "sp-consensus-aura", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -3684,7 +4065,7 @@ dependencies = [ "scale-info", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", "staging-xcm", ] @@ -3705,8 +4086,8 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-trie", "tracing", ] @@ -3719,7 +4100,7 @@ dependencies = [ "futures", "parity-scale-codec", "sp-inherents", - "sp-std", + "sp-std 8.0.0", "sp-timestamp", ] @@ -3730,11 +4111,13 @@ dependencies = [ "cumulus-primitives-core", "frame-support", "log", + "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-runtime-common", + "polkadot-runtime-parachains", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -3800,6 +4183,7 @@ dependencies = [ "polkadot-core-primitives", "polkadot-network-bridge", "polkadot-node-collation-generation", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", @@ -3811,11 +4195,11 @@ dependencies = [ "sc-service", "sc-tracing", "sc-utils", - "schnellru", "sp-api", "sp-consensus", "sp-consensus-babe", "sp-runtime", + "substrate-prometheus-endpoint", "tracing", ] @@ -3848,7 +4232,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-state-machine", - "sp-storage", + "sp-storage 13.0.0", "thiserror", "tokio", "tokio-util", @@ -3896,7 +4280,7 @@ dependencies = [ "polkadot-primitives", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] @@ -3912,6 +4296,7 @@ dependencies = [ "frame-system-rpc-runtime-api", "pallet-balances", "pallet-glutton", + "pallet-message-queue", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -3920,12 +4305,13 @@ dependencies = [ "sp-api", "sp-block-builder", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-version", "substrate-wasm-builder", @@ -3986,6 +4372,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "serde", + "serde_json", "sp-api", "sp-arithmetic", "sp-authority-discovery", @@ -3998,7 +4385,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-timestamp", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-test-client", "substrate-test-utils", "tempfile", @@ -4426,7 +4813,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=f4fe253#f4fe2534ccc6d916cd10d9c16891e673728ec8b4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -4457,18 +4844,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ee528c501ddd15d5181997e9518e59024844eac44fd1e40cb20ddb2a8562fa" +checksum = "4235e9b248e2ba4b92007fe9c646f3adf0ffde16dc74713eacc92b8bc58d8d2f" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca01728ab2679c464242eca99f94e2ce0514b52ac9ad950e2ed03fca991231c" +checksum = "47020e12d7c7505670d1363dd53d6c23724f71a90a3ae32ff8eba40de8404626" dependencies = [ "common-path", "derive-syn-parse", @@ -4653,6 +5040,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "emulated-integration-tests-common" +version = "1.0.0" +dependencies = [ + "asset-test-utils", + "bp-messages", + "bridge-runtime-common", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "frame-support", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-im-online", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "paste", + "polkadot-primitives", + "polkadot-runtime-parachains", + "polkadot-service", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", + "staging-xcm", + "xcm-emulator", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -4920,11 +5341,12 @@ dependencies = [ [[package]] name = "fdlimit" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", + "thiserror", ] [[package]] @@ -4966,7 +5388,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +source = "git+https://github.com/w3f/fflonk#1beb0585e1c8488956fac7f05da061f9b41e8948" dependencies = [ "ark-ec", "ark-ff", @@ -5109,6 +5531,36 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +[[package]] +name = "frame" +version = "0.0.1-dev" +dependencies = [ + "docify", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "log", + "pallet-examples", + "parity-scale-codec", + "scale-info", + "simple-mermaid", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-transaction-pool", + "sp-version", +] + [[package]] name = "frame-benchmarking" version = "4.0.0-dev" @@ -5130,9 +5582,9 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-runtime-interface", - "sp-std", - "sp-storage", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", + "sp-storage 13.0.0", "static_assertions", ] @@ -5170,15 +5622,15 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-database", - "sp-externalities", + "sp-externalities 0.19.0", "sp-inherents", "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-storage", + "sp-storage 13.0.0", "sp-trie", - "sp-wasm-interface", + "sp-wasm-interface 14.0.0", "thiserror", "thousands", ] @@ -5194,7 +5646,7 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -5228,7 +5680,7 @@ dependencies = [ "sp-io", "sp-npos-elections", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -5265,8 +5717,8 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-version", ] @@ -5286,7 +5738,6 @@ dependencies = [ name = "frame-remote-externalities" version = "0.10.0-dev" dependencies = [ - "async-recursion", "futures", "indicatif", "jsonrpsee", @@ -5297,7 +5748,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "spinners", "substrate-rpc-client", "tokio", @@ -5332,7 +5783,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", - "sp-debug-derive", + "sp-debug-derive 8.0.0", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -5340,8 +5791,8 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-weights", "static_assertions", "tt-call", @@ -5407,7 +5858,7 @@ dependencies = [ "sp-metadata-ir", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", "sp-version", "static_assertions", "trybuild", @@ -5438,22 +5889,32 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "frame-support-test-stg-frame-crate" +version = "0.1.0" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "frame-system" version = "4.0.0-dev" dependencies = [ "cfg-if", "criterion 0.4.0", + "docify", "frame-support", "log", "parity-scale-codec", "scale-info", "serde", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-version", "sp-weights", "substrate-test-runtime-client", @@ -5469,10 +5930,10 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-version", ] @@ -5492,7 +5953,7 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -5517,7 +5978,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" dependencies = [ - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -5593,7 +6054,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "waker-fn", ] @@ -5650,7 +6111,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "pin-utils", "slab", ] @@ -5725,10 +6186,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -5806,9 +6265,54 @@ dependencies = [ "frame-try-runtime", "pallet-aura", "pallet-glutton", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "parachains-common", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + +[[package]] +name = "glutton-westend-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-timestamp", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-glutton", + "pallet-message-queue", "pallet-sudo", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -5821,10 +6325,11 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -6069,7 +6574,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", ] [[package]] @@ -6112,7 +6617,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "socket2 0.4.9", "tokio", "tower-service", @@ -6330,84 +6835,37 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "integer-encoding" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "integration-tests-common" -version = "1.0.0" -dependencies = [ - "asset-hub-kusama-runtime", - "asset-hub-polkadot-runtime", - "asset-hub-westend-runtime", - "bp-messages", - "bridge-hub-kusama-runtime", - "bridge-hub-polkadot-runtime", - "bridge-hub-rococo-runtime", - "bridge-runtime-common", - "collectives-polkadot-runtime", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "frame-support", - "pallet-assets", - "pallet-bridge-messages", - "pallet-im-online", - "pallet-message-queue", - "pallet-staking", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "paste", - "penpal-runtime", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", - "polkadot-service", - "rococo-runtime", - "rococo-runtime-constants", - "sc-consensus-grandpa", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-runtime", - "staging-xcm", - "westend-runtime", - "westend-runtime-constants", - "xcm-emulator", +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", ] [[package]] @@ -6471,7 +6929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -6798,6 +7256,7 @@ dependencies = [ "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", + "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", @@ -6819,6 +7278,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "scale-info", + "serde_json", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -6834,8 +7294,8 @@ dependencies = [ "sp-session", "sp-staking", "sp-statement-store", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", "static_assertions", @@ -6886,9 +7346,9 @@ dependencies = [ [[package]] name = "landlock" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520baa32708c4e957d2fc3a186bc5bd8d26637c33137f399ddfc202adb240068" +checksum = "1530c5b973eeed4ac216af7e24baf5737645a6272e361f1fb95710678b67d9cc" dependencies = [ "enumflags2", "libc", @@ -6924,9 +7384,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libflate" @@ -6948,6 +7408,17 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libfuzzer-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + [[package]] name = "libloading" version = "0.7.4" @@ -7510,9 +7981,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lioness" @@ -7628,9 +8099,9 @@ dependencies = [ [[package]] name = "macro_magic" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aee866bfee30d2d7e83835a4574aad5b45adba4cc807f2a3bbba974e5d4383c9" +checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d" dependencies = [ "macro_magic_core", "macro_magic_macros", @@ -7640,9 +8111,9 @@ dependencies = [ [[package]] name = "macro_magic_core" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e766a20fd9c72bab3e1e64ed63f36bd08410e75803813df210d1ce297d7ad00" +checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" dependencies = [ "const-random", "derive-syn-parse", @@ -7654,9 +8125,9 @@ dependencies = [ [[package]] name = "macro_magic_core_macros" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" +checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", @@ -7665,9 +8136,9 @@ dependencies = [ [[package]] name = "macro_magic_macros" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" +checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", @@ -7722,9 +8193,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -7836,6 +8307,58 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "minimal-node" +version = "4.0.0-dev" +dependencies = [ + "clap 4.4.6", + "frame", + "futures", + "futures-timer", + "jsonrpsee", + "minimal-runtime", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-manual-seal", + "sc-executor", + "sc-network", + "sc-offchain", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde_json", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "minimal-runtime" +version = "0.1.0" +dependencies = [ + "frame", + "frame-support", + "pallet-balances", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-genesis-builder", + "substrate-wasm-builder", +] + [[package]] name = "miniz_oxide" version = "0.7.1" @@ -7899,7 +8422,7 @@ dependencies = [ "sp-core", "sp-mmr-primitives", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-test-runtime-client", "tokio", ] @@ -8143,220 +8666,86 @@ dependencies = [ "futures", "libc", "log", - "tokio", -] - -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset 0.7.1", - "pin-utils", - "static_assertions", -] - -[[package]] -name = "no-std-net" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" - -[[package]] -name = "node-bench" -version = "0.9.0-dev" -dependencies = [ - "array-bytes 6.1.0", - "clap 4.4.6", - "derive_more", - "fs_extra", - "futures", - "hash-db", - "kitchensink-runtime", - "kvdb", - "kvdb-rocksdb", - "lazy_static", - "log", - "node-primitives", - "node-testing", - "parity-db", - "rand 0.8.5", - "sc-basic-authorship", - "sc-client-api", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde", - "serde_json", - "sp-consensus", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "sp-timestamp", - "sp-tracing", - "sp-trie", - "tempfile", -] - -[[package]] -name = "node-cli" -version = "3.0.0-dev" -dependencies = [ - "array-bytes 6.1.0", - "assert_cmd", - "clap 4.4.6", - "clap_complete", - "criterion 0.4.0", - "frame-benchmarking-cli", - "frame-system", - "frame-system-rpc-runtime-api", - "futures", - "jsonrpsee", - "kitchensink-runtime", - "log", - "nix 0.26.2", - "node-executor", - "node-inspect", - "node-primitives", - "node-rpc", - "pallet-asset-conversion-tx-payment", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-balances", - "pallet-im-online", - "pallet-timestamp", - "parity-scale-codec", - "platforms", - "rand 0.8.5", - "regex", - "sc-authority-discovery", - "sc-basic-authorship", - "sc-block-builder", - "sc-chain-spec", - "sc-cli", - "sc-client-api", - "sc-client-db", - "sc-consensus", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-consensus-grandpa", - "sc-consensus-slots", - "sc-executor", - "sc-keystore", - "sc-mixnet", - "sc-network", - "sc-network-common", - "sc-network-statement", - "sc-network-sync", - "sc-offchain", - "sc-rpc", - "sc-service", - "sc-service-test", - "sc-statement-store", - "sc-storage-monitor", - "sc-sync-state-rpc", - "sc-sysinfo", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde", - "serde_json", - "soketto", - "sp-api", - "sp-authority-discovery", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-keystore", - "sp-mixnet", - "sp-runtime", - "sp-statement-store", - "sp-timestamp", - "sp-tracing", - "sp-transaction-storage-proof", - "substrate-build-script-utils", - "substrate-cli-test-utils", - "substrate-frame-cli", - "substrate-rpc-client", - "tempfile", - "tokio", - "tokio-util", - "try-runtime-cli", - "wait-timeout", + "tokio", ] [[package]] -name = "node-executor" -version = "3.0.0-dev" +name = "nix" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "criterion 0.4.0", - "frame-benchmarking", - "frame-support", - "frame-system", - "futures", - "kitchensink-runtime", - "node-primitives", - "node-testing", - "pallet-balances", - "pallet-contracts", - "pallet-glutton", - "pallet-im-online", - "pallet-root-testing", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", - "parity-scale-codec", - "sc-executor", - "scale-info", - "sp-application-crypto", - "sp-consensus-babe", - "sp-core", - "sp-externalities", - "sp-keyring", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-statement-store", - "sp-tracing", - "sp-trie", - "wat", + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.6.5", +] + +[[package]] +name = "nix" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.7.1", + "pin-utils", + "static_assertions", +] + +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.0", + "cfg-if", + "libc", ] [[package]] -name = "node-inspect" +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + +[[package]] +name = "node-bench" version = "0.9.0-dev" dependencies = [ + "array-bytes 6.1.0", "clap 4.4.6", - "parity-scale-codec", - "sc-cli", + "derive_more", + "fs_extra", + "futures", + "hash-db", + "kitchensink-runtime", + "kvdb", + "kvdb-rocksdb", + "lazy_static", + "log", + "node-primitives", + "node-testing", + "parity-db", + "rand 0.8.5", + "sc-basic-authorship", "sc-client-api", - "sc-service", - "sp-blockchain", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde", + "serde_json", + "sp-consensus", "sp-core", + "sp-inherents", "sp-runtime", - "thiserror", + "sp-state-machine", + "sp-timestamp", + "sp-tracing 10.0.0", + "sp-trie", + "tempfile", ] [[package]] @@ -8435,6 +8824,7 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", + "serde_json", "sp-api", "sp-block-builder", "sp-blockchain", @@ -8486,6 +8876,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "scale-info", + "serde_json", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -8496,8 +8887,8 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", "substrate-wasm-builder", @@ -8512,12 +8903,12 @@ dependencies = [ "futures", "kitchensink-runtime", "log", - "node-executor", "node-primitives", "pallet-asset-conversion", "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", + "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -8535,6 +8926,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", + "staging-node-executor", "substrate-test-client", "tempfile", ] @@ -8790,6 +9182,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_pipe" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "os_str_bytes" version = "6.5.1" @@ -8842,7 +9244,7 @@ dependencies = [ "sp-core-hashing", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8862,7 +9264,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8880,8 +9282,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", ] [[package]] @@ -8897,7 +9299,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8918,8 +9320,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", ] [[package]] @@ -8936,7 +9338,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8951,7 +9353,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8969,7 +9371,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -8986,7 +9388,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9001,7 +9403,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9029,7 +9431,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9049,8 +9451,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -9076,9 +9478,9 @@ dependencies = [ "pallet-staking", "sp-core", "sp-runtime", - "sp-std", - "sp-storage", - "sp-tracing", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -9096,7 +9498,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9124,7 +9526,7 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9149,7 +9551,7 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9167,7 +9569,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9188,7 +9590,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] @@ -9207,10 +9609,9 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9232,7 +9633,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] @@ -9252,10 +9653,9 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-arithmetic", - "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9272,7 +9672,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9291,7 +9691,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9315,8 +9715,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -9332,7 +9732,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9347,7 +9747,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9364,10 +9764,13 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", + "pallet-assets", "pallet-balances", + "pallet-contracts-fixtures", "pallet-contracts-primitives", "pallet-contracts-proc-macro", "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", "pallet-proxy", "pallet-timestamp", "pallet-utility", @@ -9383,12 +9786,62 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std", + "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", "wasm-instrument 0.4.0", "wasmi", "wat", ] +[[package]] +name = "pallet-contracts-fixtures" +version = "1.0.0" +dependencies = [ + "frame-system", + "sp-runtime", + "wat", +] + +[[package]] +name = "pallet-contracts-mock-network" +version = "1.0.0" +dependencies = [ + "assert_matches", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-contracts", + "pallet-contracts-fixtures", + "pallet-contracts-primitives", + "pallet-contracts-proc-macro", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-core", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", +] + [[package]] name = "pallet-contracts-primitives" version = "24.0.0" @@ -9397,7 +9850,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-weights", ] @@ -9426,7 +9879,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9443,7 +9896,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9457,7 +9910,7 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9477,7 +9930,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9493,7 +9946,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9518,8 +9971,8 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -9542,8 +9995,8 @@ dependencies = [ "sp-io", "sp-npos-elections", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "strum", ] @@ -9557,7 +10010,7 @@ dependencies = [ "parity-scale-codec", "sp-npos-elections", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9576,8 +10029,8 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "substrate-test-utils", ] @@ -9595,7 +10048,16 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", +] + +[[package]] +name = "pallet-example-frame-crate" +version = "0.0.1" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", ] [[package]] @@ -9612,7 +10074,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9629,7 +10091,7 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9644,7 +10106,7 @@ dependencies = [ "scale-info", "sp-core", "sp-io", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9654,6 +10116,7 @@ dependencies = [ "pallet-default-config-example", "pallet-dev-mode", "pallet-example-basic", + "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", "pallet-example-split", @@ -9679,8 +10142,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "substrate-test-utils", ] @@ -9699,7 +10162,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9729,7 +10192,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9746,7 +10209,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9766,7 +10229,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9783,7 +10246,7 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9798,7 +10261,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9815,7 +10278,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9831,7 +10294,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9851,8 +10314,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-weights", ] @@ -9872,7 +10335,7 @@ dependencies = [ "sp-io", "sp-mixnet", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9892,7 +10355,7 @@ dependencies = [ "sp-io", "sp-mmr-primitives", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9908,7 +10371,7 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9927,7 +10390,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9946,7 +10409,7 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9970,7 +10433,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -9987,7 +10450,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10002,7 +10465,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10019,8 +10482,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -10042,9 +10505,9 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-runtime-interface", + "sp-runtime-interface 17.0.0", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10059,7 +10522,7 @@ dependencies = [ "rand 0.8.5", "sp-io", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", ] [[package]] @@ -10069,7 +10532,7 @@ dependencies = [ "pallet-nomination-pools", "parity-scale-codec", "sp-api", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10092,8 +10555,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -10111,7 +10574,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10138,7 +10601,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10155,7 +10618,7 @@ dependencies = [ "sp-io", "sp-metadata-ir", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10198,7 +10661,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10215,7 +10678,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10232,7 +10695,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10248,7 +10711,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10270,7 +10733,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10286,7 +10749,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10307,7 +10770,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10321,7 +10784,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10340,7 +10803,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10357,7 +10820,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10375,7 +10838,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-weights", "substrate-test-utils", ] @@ -10392,7 +10855,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10412,7 +10875,7 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] @@ -10436,7 +10899,19 @@ dependencies = [ "sp-io", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", +] + +[[package]] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 8.0.0", ] [[package]] @@ -10456,7 +10931,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10484,8 +10959,8 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "substrate-test-utils", ] @@ -10514,6 +10989,7 @@ version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "sp-api", + "sp-staking", ] [[package]] @@ -10533,8 +11009,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "substrate-state-trie-migration-rpc", "thousands", "tokio", @@ -10556,7 +11032,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-statement-store", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10572,7 +11048,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10587,7 +11063,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10605,8 +11081,8 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-timestamp", ] @@ -10626,8 +11102,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", ] [[package]] @@ -10644,7 +11120,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10690,7 +11166,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-transaction-storage-proof", ] @@ -10711,7 +11187,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10729,7 +11205,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10746,7 +11222,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10765,7 +11241,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10782,7 +11258,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10800,7 +11276,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -10812,6 +11288,7 @@ dependencies = [ "frame-support", "frame-system", "log", + "pallet-assets", "pallet-balances", "parity-scale-codec", "polkadot-parachain-primitives", @@ -10821,7 +11298,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -10845,8 +11322,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -10866,24 +11343,11 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", ] -[[package]] -name = "parachain-info" -version = "0.1.0" -dependencies = [ - "cumulus-primitives-core", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-runtime", - "sp-std", -] - [[package]] name = "parachain-template-node" version = "0.1.0" @@ -10926,6 +11390,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "serde", + "serde_json", "sp-api", "sp-block-builder", "sp-blockchain", @@ -10966,6 +11431,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-parachain-template", "pallet-session", "pallet-sudo", @@ -10973,7 +11439,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", + "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-runtime-common", @@ -10988,9 +11454,10 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -11012,6 +11479,7 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "parity-scale-codec", "polkadot-core-primitives", "polkadot-primitives", @@ -11022,10 +11490,10 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", - "staging-xcm-executor", "substrate-wasm-builder", "westend-runtime-constants", ] @@ -11035,7 +11503,6 @@ name = "parachains-runtimes-test-utils" version = "1.0.0" dependencies = [ "assets-common", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", @@ -11049,7 +11516,6 @@ dependencies = [ "pallet-collator-selection", "pallet-session", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11057,8 +11523,9 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-parachain-info", "staging-xcm", "staging-xcm-executor", "substrate-wasm-builder", @@ -11227,15 +11694,6 @@ dependencies = [ "crypto-mac 0.11.1", ] -[[package]] -name = "pbkdf2" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "pbkdf2" version = "0.12.2" @@ -11269,6 +11727,21 @@ dependencies = [ "base64ct", ] +[[package]] +name = "penpal-emulated-chain" +version = "0.0.0" +dependencies = [ + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "penpal-runtime", + "rococo-emulated-chain", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "penpal-runtime" version = "0.9.27" @@ -11296,13 +11769,13 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", + "pallet-message-queue", "pallet-session", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11319,10 +11792,11 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", - "sp-storage", + "sp-std 8.0.0", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -11417,9 +11891,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -11513,9 +11987,11 @@ name = "polkadot-approval-distribution" version = "1.0.0" dependencies = [ "assert_matches", + "bitvec", "env_logger 0.9.3", "futures", "futures-timer", + "itertools 0.10.5", "log", "polkadot-node-jaeger", "polkadot-node-metrics", @@ -11586,7 +12062,7 @@ dependencies = [ "sp-core", "sp-keyring", "sp-keystore", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing-gum", ] @@ -11630,6 +12106,7 @@ dependencies = [ "futures", "log", "polkadot-node-metrics", + "polkadot-node-primitives", "polkadot-service", "pyroscope", "pyroscope_pprofrs", @@ -11686,7 +12163,7 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -11717,7 +12194,7 @@ dependencies = [ "sp-application-crypto", "sp-keyring", "sp-keystore", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing-gum", ] @@ -11760,7 +12237,7 @@ dependencies = [ "sp-core", "sp-keyring", "sp-keystore", - "sp-tracing", + "sp-tracing 10.0.0", "tracing-gum", ] @@ -11822,10 +12299,13 @@ dependencies = [ "async-trait", "bitvec", "derive_more", + "env_logger 0.9.3", "futures", "futures-timer", + "itertools 0.10.5", "kvdb", "kvdb-memorydb", + "log", "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", @@ -11837,6 +12317,8 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.5.1", "sc-keystore", "schnellru", @@ -11904,7 +12386,7 @@ dependencies = [ "sp-core", "sp-keyring", "sp-keystore", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing-gum", ] @@ -12013,7 +12495,7 @@ dependencies = [ "sp-core", "sp-keyring", "sp-keystore", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing-gum", ] @@ -12085,10 +12567,13 @@ version = "1.0.0" dependencies = [ "always-assert", "assert_matches", + "blake3", "cfg-if", + "criterion 0.4.0", "futures", "futures-timer", "hex-literal", + "is_executable", "libc", "parity-scale-codec", "pin-project", @@ -12099,14 +12584,18 @@ dependencies = [ "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-parachain-primitives", "polkadot-primitives", + "procfs", "rand 0.8.5", + "rococo-runtime", + "rusty-fork", + "sc-sysinfo", "slotmap", "sp-core", "sp-maybe-compressed-blob", - "sp-wasm-interface", - "substrate-build-script-utils", + "sp-wasm-interface 14.0.0", "tempfile", "test-parachain-adder", "test-parachain-halt", @@ -12153,12 +12642,14 @@ dependencies = [ "sc-executor", "sc-executor-common", "sc-executor-wasmtime", + "seccompiler", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-io", - "sp-tracing", + "sp-tracing 10.0.0", + "substrate-build-script-utils", "tempfile", - "tokio", + "thiserror", "tracing-gum", ] @@ -12167,16 +12658,13 @@ name = "polkadot-node-core-pvf-execute-worker" version = "1.0.0" dependencies = [ "cpu-time", - "futures", + "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", "polkadot-primitives", - "rayon", - "sp-core", - "sp-maybe-compressed-blob", - "sp-tracing", - "tokio", "tracing-gum", ] @@ -12184,22 +12672,23 @@ dependencies = [ name = "polkadot-node-core-pvf-prepare-worker" version = "1.0.0" dependencies = [ + "blake3", "cfg-if", - "futures", + "criterion 0.4.0", "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", - "polkadot-parachain-primitives", "polkadot-primitives", "rayon", - "sc-executor", + "rococo-runtime", "sc-executor-common", "sc-executor-wasmtime", - "sp-io", "sp-maybe-compressed-blob", - "sp-tracing", + "staging-tracking-allocator", "tikv-jemalloc-ctl", - "tokio", + "tikv-jemallocator", "tracing-gum", ] @@ -12295,6 +12784,7 @@ dependencies = [ name = "polkadot-node-primitives" version = "1.0.0" dependencies = [ + "bitvec", "bounded-vec", "futures", "parity-scale-codec", @@ -12346,6 +12836,7 @@ name = "polkadot-node-subsystem-types" version = "1.0.0" dependencies = [ "async-trait", + "bitvec", "derive_more", "futures", "orchestra", @@ -12428,7 +12919,6 @@ dependencies = [ "polkadot-primitives-test-helpers", "prioritized-metered-channel", "sc-client-api", - "schnellru", "sp-api", "sp-core", "tikv-jemalloc-ctl", @@ -12442,13 +12932,16 @@ dependencies = [ "assert_cmd", "asset-hub-kusama-runtime", "asset-hub-polkadot-runtime", + "asset-hub-rococo-runtime", "asset-hub-westend-runtime", "async-trait", "bridge-hub-kusama-runtime", "bridge-hub-polkadot-runtime", "bridge-hub-rococo-runtime", + "bridge-hub-westend-runtime", "clap 4.4.6", "collectives-polkadot-runtime", + "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", "cumulus-client-cli", @@ -12466,6 +12959,7 @@ dependencies = [ "frame-benchmarking-cli", "futures", "glutton-runtime", + "glutton-westend-runtime", "hex-literal", "jsonrpsee", "log", @@ -12508,6 +13002,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-timestamp", + "sp-tracing 10.0.0", "sp-transaction-pool", "staging-xcm", "substrate-build-script-utils", @@ -12525,14 +13020,14 @@ version = "1.0.0" dependencies = [ "bounded-collections", "derive_more", - "frame-support", "parity-scale-codec", "polkadot-core-primitives", "scale-info", "serde", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", + "sp-weights", ] [[package]] @@ -12557,7 +13052,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -12623,6 +13118,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-multi-phase", "pallet-fast-unstake", + "pallet-identity", "pallet-session", "pallet-staking", "pallet-staking-reward-fn", @@ -12630,6 +13126,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", + "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-primitives", "polkadot-primitives-test-helpers", @@ -12650,9 +13147,10 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", + "staging-xcm-executor", "static_assertions", ] @@ -12664,8 +13162,8 @@ dependencies = [ "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", ] [[package]] @@ -12716,8 +13214,8 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "staging-xcm", "staging-xcm-executor", "static_assertions", @@ -12783,6 +13281,7 @@ dependencies = [ "polkadot-overseer", "polkadot-parachain-primitives", "polkadot-primitives", + "polkadot-primitives-test-helpers", "polkadot-rpc", "polkadot-runtime-parachains", "polkadot-statement-distribution", @@ -12834,7 +13333,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", - "sp-storage", + "sp-storage 13.0.0", "sp-timestamp", "sp-transaction-pool", "sp-version", @@ -12877,7 +13376,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-staking", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing-gum", ] @@ -13005,7 +13504,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-trie", "sp-version", @@ -13091,7 +13590,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "windows-sys 0.48.0", ] @@ -13327,9 +13826,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro-warning" -version = "0.4.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" +checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", @@ -13338,13 +13837,39 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] +[[package]] +name = "procfs" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" +dependencies = [ + "bitflags 2.4.0", + "chrono", + "flate2", + "hex", + "lazy_static", + "procfs-core", + "rustix 0.38.21", +] + +[[package]] +name = "procfs-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +dependencies = [ + "bitflags 2.4.0", + "chrono", + "hex", +] + [[package]] name = "prometheus" version = "0.13.3" @@ -13694,7 +14219,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring 0.16.20", - "time", + "time 0.3.27", "x509-parser 0.13.2", "yasna", ] @@ -13707,7 +14232,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring 0.16.20", - "time", + "time 0.3.27", "yasna", ] @@ -13787,14 +14312,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -13811,10 +14336,16 @@ name = "regex-automata" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -13825,9 +14356,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "remote-ext-tests-bags-list" @@ -13838,7 +14369,7 @@ dependencies = [ "log", "pallet-bags-list-remote-tests", "sp-core", - "sp-tracing", + "sp-tracing 10.0.0", "tokio", "westend-runtime", "westend-runtime-constants", @@ -13866,7 +14397,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "rustls 0.21.6", "rustls-pemfile", "serde", @@ -13917,7 +14448,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof?rev=8657210#86572101f4210647984ab4efedba6b3fcc890895" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -13971,6 +14502,25 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "emulated-integration-tests-common", + "pallet-im-online", + "parachains-common", + "polkadot-primitives", + "rococo-runtime", + "rococo-runtime-constants", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", +] + [[package]] name = "rococo-parachain-runtime" version = "0.1.0" @@ -13981,6 +14531,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-utility", "frame-benchmarking", @@ -13991,15 +14542,16 @@ dependencies = [ "pallet-assets", "pallet-aura", "pallet-balances", + "pallet-message-queue", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "sp-api", "sp-block-builder", @@ -14010,9 +14562,10 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -14062,6 +14615,7 @@ dependencies = [ "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", + "pallet-root-testing", "pallet-scheduler", "pallet-session", "pallet-society", @@ -14106,9 +14660,9 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", - "sp-storage", - "sp-tracing", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-tracing 10.0.0", "sp-transaction-pool", "sp-trie", "sp-version", @@ -14132,6 +14686,31 @@ dependencies = [ "sp-core", "sp-runtime", "sp-weights", + "staging-xcm", +] + +[[package]] +name = "rococo-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "bridge-hub-rococo-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "rococo-emulated-chain", +] + +[[package]] +name = "rococo-westend-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "asset-hub-westend-emulated-chain", + "bridge-hub-rococo-emulated-chain", + "bridge-hub-westend-emulated-chain", + "emulated-integration-tests-common", + "rococo-emulated-chain", + "westend-emulated-chain", ] [[package]] @@ -14270,14 +14849,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.10", "windows-sys 0.48.0", ] @@ -14374,6 +14953,7 @@ dependencies = [ "fnv", "quick-error", "tempfile", + "wait-timeout", ] [[package]] @@ -14437,7 +15017,7 @@ version = "4.1.0-dev" dependencies = [ "log", "sp-core", - "sp-wasm-interface", + "sp-wasm-interface 14.0.0", "thiserror", ] @@ -14465,7 +15045,7 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", @@ -14501,7 +15081,6 @@ name = "sc-block-builder" version = "0.10.0-dev" dependencies = [ "parity-scale-codec", - "sc-client-api", "sp-api", "sp-block-builder", "sp-blockchain", @@ -14516,7 +15095,11 @@ dependencies = [ name = "sc-chain-spec" version = "4.0.0-dev" dependencies = [ + "array-bytes 6.1.0", + "docify", + "log", "memmap2", + "parity-scale-codec", "sc-chain-spec-derive", "sc-client-api", "sc-executor", @@ -14524,10 +15107,16 @@ dependencies = [ "sc-telemetry", "serde", "serde_json", + "sp-application-crypto", "sp-blockchain", + "sp-consensus-babe", "sp-core", + "sp-genesis-builder", + "sp-io", + "sp-keyring", "sp-runtime", "sp-state-machine", + "substrate-test-runtime", ] [[package]] @@ -14545,11 +15134,13 @@ name = "sc-cli" version = "0.10.0-dev" dependencies = [ "array-bytes 6.1.0", + "bip39", "chrono", "clap 4.4.6", "fdlimit", "futures", "futures-timer", + "itertools 0.10.5", "libp2p-identity", "log", "names 0.13.0", @@ -14574,11 +15165,10 @@ dependencies = [ "sp-keystore", "sp-panic-handler", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "sp-version", "tempfile", "thiserror", - "tiny-bip39", "tokio", ] @@ -14599,11 +15189,11 @@ dependencies = [ "sp-consensus", "sp-core", "sp-database", - "sp-externalities", + "sp-externalities 0.19.0", "sp-runtime", "sp-state-machine", "sp-statement-store", - "sp-storage", + "sp-storage 13.0.0", "sp-test-primitives", "sp-trie", "substrate-prometheus-endpoint", @@ -14638,7 +15228,7 @@ dependencies = [ "sp-database", "sp-runtime", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "sp-trie", "substrate-test-runtime-client", "tempfile", @@ -14699,7 +15289,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", @@ -14741,7 +15331,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", @@ -14809,7 +15399,7 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", @@ -14875,6 +15465,7 @@ dependencies = [ "sc-network", "sc-network-common", "sc-network-gossip", + "sc-network-sync", "sc-network-test", "sc-telemetry", "sc-transaction-pool-api", @@ -14891,7 +15482,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", @@ -15027,17 +15618,17 @@ dependencies = [ "schnellru", "sp-api", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-io", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-runtime", - "sp-runtime-interface", + "sp-runtime-interface 17.0.0", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "sp-trie", "sp-version", - "sp-wasm-interface", + "sp-wasm-interface 14.0.0", "substrate-test-runtime", "tempfile", "tracing", @@ -15051,7 +15642,7 @@ version = "0.10.0-dev" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", - "sp-wasm-interface", + "sp-wasm-interface 14.0.0", "thiserror", "wasm-instrument 0.3.0", ] @@ -15066,14 +15657,15 @@ dependencies = [ "libc", "log", "parity-scale-codec", + "parking_lot 0.12.1", "paste", "rustix 0.36.15", "sc-allocator", "sc-executor-common", "sc-runtime-test", "sp-io", - "sp-runtime-interface", - "sp-wasm-interface", + "sp-runtime-interface 17.0.0", + "sp-wasm-interface 14.0.0", "tempfile", "wasmtime", "wat", @@ -15090,6 +15682,7 @@ dependencies = [ "sc-client-api", "sc-network", "sc-network-common", + "sc-network-sync", "sp-blockchain", "sp-runtime", ] @@ -15173,7 +15766,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-test-primitives", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -15242,6 +15835,7 @@ dependencies = [ "quickcheck", "sc-network", "sc-network-common", + "sc-network-sync", "schnellru", "sp-runtime", "substrate-prometheus-endpoint", @@ -15282,6 +15876,7 @@ dependencies = [ "parity-scale-codec", "sc-network", "sc-network-common", + "sc-network-sync", "sp-consensus", "sp-statement-store", "substrate-prometheus-endpoint", @@ -15319,7 +15914,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-test-primitives", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", @@ -15351,7 +15946,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-test-runtime", "substrate-test-runtime-client", "tokio", @@ -15368,6 +15963,7 @@ dependencies = [ "parity-scale-codec", "sc-network", "sc-network-common", + "sc-network-sync", "sc-utils", "sp-consensus", "sp-runtime", @@ -15404,11 +16000,11 @@ dependencies = [ "sp-api", "sp-consensus", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-keystore", "sp-offchain", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-test-runtime-client", "threadpool", "tokio", @@ -15523,6 +16119,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-maybe-compressed-blob", + "sp-rpc", "sp-runtime", "sp-version", "substrate-test-runtime", @@ -15539,8 +16136,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-runtime-interface", - "sp-std", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", "substrate-wasm-builder", ] @@ -15559,7 +16156,6 @@ dependencies = [ "parking_lot 0.12.1", "pin-project", "rand 0.8.5", - "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-client-db", @@ -15588,12 +16184,12 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-keystore", "sp-runtime", "sp-session", "sp-state-machine", - "sp-storage", + "sp-storage 13.0.0", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", @@ -15636,8 +16232,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-state-machine", - "sp-storage", - "sp-tracing", + "sp-storage 13.0.0", + "sp-tracing 10.0.0", "sp-trie", "substrate-test-runtime", "substrate-test-runtime-client", @@ -15710,6 +16306,7 @@ dependencies = [ name = "sc-sysinfo" version = "6.0.0-dev" dependencies = [ + "derive_more", "futures", "libc", "log", @@ -15722,7 +16319,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -15765,7 +16362,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "thiserror", "tracing", "tracing-log", @@ -15806,7 +16403,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime", @@ -15848,9 +16445,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" +checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" dependencies = [ "bitvec", "cfg-if", @@ -15862,9 +16459,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" +checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -15998,20 +16595,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "seccompiler" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345a3e4dddf721a478089d4697b83c6c0a8f5bf16086f6c13397e4534eb6e2e5" +dependencies = [ + "libc", +] + [[package]] name = "secp256k1" -version = "0.24.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" +checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" dependencies = [ "cc", ] @@ -16064,7 +16670,6 @@ dependencies = [ "pallet-balances", "pallet-sudo", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -16077,9 +16682,10 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "substrate-wasm-builder", ] @@ -16153,9 +16759,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -16311,8 +16917,8 @@ dependencies = [ "frame-system", "frame-try-runtime", "pallet-aura", + "pallet-message-queue", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -16325,9 +16931,10 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 8.0.0", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -16392,6 +16999,11 @@ dependencies = [ "wide", ] +[[package]] +name = "simple-mermaid" +version = "0.1.0" +source = "git+https://github.com/kianenigma/simple-mermaid.git?rev=e48b187bcfd5cc75111acd9d241f1bd36604344b#e48b187bcfd5cc75111acd9d241f1bd36604344b" + [[package]] name = "siphasher" version = "0.3.11" @@ -16421,7 +17033,7 @@ dependencies = [ "parity-scale-codec", "paste", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16616,11 +17228,11 @@ dependencies = [ "scale-info", "sp-api-proc-macro", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-metadata-ir", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 8.0.0", "sp-test-primitives", "sp-trie", "sp-version", @@ -16657,7 +17269,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "sp-version", "static_assertions", "substrate-test-runtime-client", @@ -16673,7 +17285,7 @@ dependencies = [ "serde", "sp-core", "sp-io", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16700,7 +17312,7 @@ dependencies = [ "scale-info", "serde", "sp-core", - "sp-std", + "sp-std 8.0.0", "static_assertions", ] @@ -16715,98 +17327,22 @@ dependencies = [ "sp-arithmetic", ] -[[package]] -name = "sp-ark-bls12-377" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b60ba7d8fbb82e21f5be499b02438c9a79365acb441a4dc3993179f09c4cc9" -dependencies = [ - "ark-bls12-377", - "ark-ff", - "ark-r1cs-std", - "ark-scale", - "ark-std", - "parity-scale-codec", - "sp-ark-models", -] - [[package]] name = "sp-ark-bls12-381" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2cd101171d2e988a4e1b2320ad3f26f8746a263110c7153213fe86293e0552b" -dependencies = [ - "ark-bls12-381", - "ark-ff", - "ark-scale", - "ark-serialize", - "ark-std", - "parity-scale-codec", - "sp-ark-models", -] - -[[package]] -name = "sp-ark-bw6-761" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94d66ba98893cc42dfe81d5b5dee9142577176bdbdba80ec25a37d8cdffdbd5" -dependencies = [ - "ark-bw6-761", - "ark-ff", - "ark-scale", - "ark-std", - "parity-scale-codec", - "sp-ark-models", -] - -[[package]] -name = "sp-ark-ed-on-bls12-377" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37f6ea96c9b1cd4cbd05d741225ff7f6328ab035bda16cf3fac105c87ad98959" +version = "0.4.2" +source = "git+https://github.com/paritytech/arkworks-substrate#caa2eed74beb885dd07c7db5f916f2281dad818f" dependencies = [ - "ark-ed-on-bls12-377", - "ark-ff", - "ark-r1cs-std", - "ark-scale", - "ark-serialize", - "ark-std", - "parity-scale-codec", - "sp-ark-models", + "ark-bls12-381-ext", + "sp-crypto-ec-utils 0.4.1 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] name = "sp-ark-ed-on-bls12-381-bandersnatch" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4db7a801260397cd58077befcee87acfdde8c189f48718bba1bc3783c799b67b" -dependencies = [ - "ark-ec", - "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", - "ark-r1cs-std", - "ark-scale", - "ark-std", - "parity-scale-codec", - "sp-ark-bls12-381", - "sp-ark-models", -] - -[[package]] -name = "sp-ark-models" -version = "0.4.1-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd77599e09f12893739e1ef822ae065f2f46c3be040ba1979bb786ae21059f44" +version = "0.4.2" +source = "git+https://github.com/paritytech/arkworks-substrate#caa2eed74beb885dd07c7db5f916f2281dad818f" dependencies = [ - "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", - "derivative", - "getrandom 0.2.10", - "itertools 0.10.5", - "num-traits", - "zeroize", + "ark-ed-on-bls12-381-bandersnatch-ext", + "sp-crypto-ec-utils 0.4.1 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] @@ -16818,7 +17354,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16828,7 +17364,7 @@ dependencies = [ "sp-api", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16875,7 +17411,7 @@ dependencies = [ "sp-consensus-slots", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-timestamp", ] @@ -16893,7 +17429,7 @@ dependencies = [ "sp-core", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-timestamp", ] @@ -16912,7 +17448,7 @@ dependencies = [ "sp-io", "sp-mmr-primitives", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "strum", "w3f-bls", ] @@ -16931,7 +17467,7 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16942,7 +17478,7 @@ dependencies = [ "sp-api", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16957,7 +17493,7 @@ dependencies = [ "sp-consensus-slots", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -16967,7 +17503,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-std", + "sp-std 8.0.0", "sp-timestamp", ] @@ -16977,6 +17513,7 @@ version = "21.0.0" dependencies = [ "array-bytes 6.1.0", "bandersnatch_vrfs", + "bip39", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", @@ -16988,6 +17525,7 @@ dependencies = [ "hash-db", "hash256-std-hasher", "impl-serde", + "itertools 0.10.5", "lazy_static", "libsecp256k1", "log", @@ -17006,20 +17544,29 @@ dependencies = [ "serde_json", "sp-core-hashing", "sp-core-hashing-proc-macro", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", + "sp-debug-derive 8.0.0", + "sp-externalities 0.19.0", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", + "sp-storage 13.0.0", "ss58-registry", "substrate-bip39", "thiserror", - "tiny-bip39", "tracing", "w3f-bls", "zeroize", ] +[[package]] +name = "sp-core-fuzz" +version = "0.0.0" +dependencies = [ + "lazy_static", + "libfuzzer-sys", + "regex", + "sp-core", +] + [[package]] name = "sp-core-hashing" version = "9.0.0" @@ -17043,29 +17590,43 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" -version = "0.4.0" +version = "0.4.1" dependencies = [ - "ark-algebra-test-templates", "ark-bls12-377", + "ark-bls12-377-ext", "ark-bls12-381", + "ark-bls12-381-ext", "ark-bw6-761", + "ark-bw6-761-ext", "ark-ec", "ark-ed-on-bls12-377", + "ark-ed-on-bls12-377-ext", "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", + "ark-ed-on-bls12-381-bandersnatch-ext", "ark-scale", - "ark-serialize", - "ark-std", - "parity-scale-codec", - "sp-ark-bls12-377", - "sp-ark-bls12-381", - "sp-ark-bw6-761", - "sp-ark-ed-on-bls12-377", - "sp-ark-ed-on-bls12-381-bandersnatch", - "sp-ark-models", - "sp-io", - "sp-runtime-interface", - "sp-std", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", +] + +[[package]] +name = "sp-crypto-ec-utils" +version = "0.4.1" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "ark-bls12-377", + "ark-bls12-377-ext", + "ark-bls12-381", + "ark-bls12-381-ext", + "ark-bw6-761", + "ark-bw6-761-ext", + "ark-ec", + "ark-ed-on-bls12-377", + "ark-ed-on-bls12-377-ext", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ed-on-bls12-381-bandersnatch-ext", + "ark-scale", + "sp-runtime-interface 17.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] @@ -17085,14 +17646,35 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "sp-debug-derive" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + +[[package]] +name = "sp-externalities" +version = "0.19.0" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std 8.0.0", + "sp-storage 13.0.0", +] + [[package]] name = "sp-externalities" version = "0.19.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "environmental", "parity-scale-codec", - "sp-std", - "sp-storage", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-storage 13.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] @@ -17102,7 +17684,7 @@ dependencies = [ "serde_json", "sp-api", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17115,7 +17697,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "thiserror", ] @@ -17131,12 +17713,12 @@ dependencies = [ "rustversion", "secp256k1", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-keystore", - "sp-runtime-interface", + "sp-runtime-interface 17.0.0", "sp-state-machine", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-trie", "tracing", "tracing-core", @@ -17161,7 +17743,7 @@ dependencies = [ "rand 0.7.3", "rand_chacha 0.2.2", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "thiserror", ] @@ -17180,7 +17762,7 @@ dependencies = [ "frame-metadata", "parity-scale-codec", "scale-info", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17191,7 +17773,7 @@ dependencies = [ "scale-info", "sp-api", "sp-application-crypto", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17206,9 +17788,9 @@ dependencies = [ "serde", "sp-api", "sp-core", - "sp-debug-derive", + "sp-debug-derive 8.0.0", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "thiserror", ] @@ -17223,7 +17805,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "substrate-test-utils", ] @@ -17286,8 +17868,8 @@ dependencies = [ "sp-core", "sp-io", "sp-state-machine", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-weights", "substrate-test-runtime-client", "zstd 0.12.4", @@ -17303,19 +17885,37 @@ dependencies = [ "primitive-types", "rustversion", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-io", - "sp-runtime-interface-proc-macro", + "sp-runtime-interface-proc-macro 11.0.0", "sp-runtime-interface-test-wasm", "sp-state-machine", - "sp-std", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-tracing 10.0.0", + "sp-wasm-interface 14.0.0", "static_assertions", "trybuild", ] +[[package]] +name = "sp-runtime-interface" +version = "17.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities 0.19.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-runtime-interface-proc-macro 11.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-storage 13.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-tracing 10.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-wasm-interface 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "static_assertions", +] + [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" @@ -17327,6 +17927,18 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "11.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "sp-runtime-interface-test" version = "2.0.0" @@ -17335,7 +17947,7 @@ dependencies = [ "sc-executor-common", "sp-io", "sp-runtime", - "sp-runtime-interface", + "sp-runtime-interface 17.0.0", "sp-runtime-interface-test-wasm", "sp-runtime-interface-test-wasm-deprecated", "sp-state-machine", @@ -17350,8 +17962,8 @@ dependencies = [ "bytes", "sp-core", "sp-io", - "sp-runtime-interface", - "sp-std", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", "substrate-wasm-builder", ] @@ -17361,7 +17973,7 @@ version = "2.0.0" dependencies = [ "sp-core", "sp-io", - "sp-runtime-interface", + "sp-runtime-interface 17.0.0", "substrate-wasm-builder", ] @@ -17376,7 +17988,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17389,7 +18001,7 @@ dependencies = [ "serde", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17406,10 +18018,10 @@ dependencies = [ "rand 0.8.5", "smallvec", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-panic-handler", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", "thiserror", "tracing", @@ -17431,10 +18043,10 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-runtime", - "sp-runtime-interface", - "sp-std", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", "thiserror", "x25519-dalek 2.0.0", ] @@ -17443,16 +18055,34 @@ dependencies = [ name = "sp-std" version = "8.0.0" +[[package]] +name = "sp-std" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" + +[[package]] +name = "sp-storage" +version = "13.0.0" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 8.0.0", + "sp-std 8.0.0", +] + [[package]] name = "sp-storage" version = "13.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive", - "sp-std", + "sp-debug-derive 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] @@ -17465,7 +18095,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", ] [[package]] @@ -17476,7 +18106,7 @@ dependencies = [ "parity-scale-codec", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "thiserror", ] @@ -17485,7 +18115,19 @@ name = "sp-tracing" version = "10.0.0" dependencies = [ "parity-scale-codec", - "sp-std", + "sp-std 8.0.0", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-tracing" +version = "10.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "parity-scale-codec", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", "tracing", "tracing-core", "tracing-subscriber", @@ -17509,7 +18151,7 @@ dependencies = [ "sp-core", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-trie", ] @@ -17527,11 +18169,12 @@ dependencies = [ "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.1", + "rand 0.8.5", "scale-info", "schnellru", "sp-core", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "thiserror", "tracing", "trie-bench", @@ -17551,7 +18194,7 @@ dependencies = [ "serde", "sp-core-hashing-proc-macro", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-version-proc-macro", "thiserror", ] @@ -17575,7 +18218,20 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std", + "sp-std 8.0.0", + "wasmtime", +] + +[[package]] +name = "sp-wasm-interface" +version = "14.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "anyhow", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", "wasmtime", ] @@ -17589,8 +18245,8 @@ dependencies = [ "smallvec", "sp-arithmetic", "sp-core", - "sp-debug-derive", - "sp-std", + "sp-debug-derive 8.0.0", + "sp-std 8.0.0", ] [[package]] @@ -17657,6 +18313,181 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "staging-chain-spec-builder" +version = "2.0.0" +dependencies = [ + "clap 4.4.6", + "log", + "sc-chain-spec", + "serde_json", + "sp-tracing 10.0.0", +] + +[[package]] +name = "staging-node-cli" +version = "3.0.0-dev" +dependencies = [ + "array-bytes 6.1.0", + "assert_cmd", + "clap 4.4.6", + "clap_complete", + "criterion 0.4.0", + "frame-benchmarking-cli", + "frame-system", + "frame-system-rpc-runtime-api", + "futures", + "jsonrpsee", + "kitchensink-runtime", + "log", + "nix 0.26.2", + "node-primitives", + "node-rpc", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-balances", + "pallet-im-online", + "pallet-skip-feeless-payment", + "pallet-timestamp", + "parity-scale-codec", + "platforms", + "rand 0.8.5", + "regex", + "sc-authority-discovery", + "sc-basic-authorship", + "sc-block-builder", + "sc-chain-spec", + "sc-cli", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-consensus-babe", + "sc-consensus-epochs", + "sc-consensus-grandpa", + "sc-consensus-slots", + "sc-executor", + "sc-keystore", + "sc-mixnet", + "sc-network", + "sc-network-common", + "sc-network-statement", + "sc-network-sync", + "sc-offchain", + "sc-rpc", + "sc-service", + "sc-service-test", + "sc-statement-store", + "sc-storage-monitor", + "sc-sync-state-rpc", + "sc-sysinfo", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde", + "serde_json", + "soketto", + "sp-api", + "sp-authority-discovery", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-keystore", + "sp-mixnet", + "sp-runtime", + "sp-statement-store", + "sp-timestamp", + "sp-tracing 10.0.0", + "sp-transaction-storage-proof", + "staging-node-executor", + "staging-node-inspect", + "substrate-build-script-utils", + "substrate-cli-test-utils", + "substrate-frame-cli", + "substrate-rpc-client", + "tempfile", + "tokio", + "tokio-util", + "try-runtime-cli", + "wait-timeout", +] + +[[package]] +name = "staging-node-executor" +version = "3.0.0-dev" +dependencies = [ + "criterion 0.4.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "futures", + "kitchensink-runtime", + "node-primitives", + "node-testing", + "pallet-balances", + "pallet-contracts", + "pallet-glutton", + "pallet-im-online", + "pallet-root-testing", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-treasury", + "parity-scale-codec", + "sc-executor", + "scale-info", + "serde_json", + "sp-application-crypto", + "sp-consensus-babe", + "sp-core", + "sp-externalities 0.19.0", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-state-machine", + "sp-statement-store", + "sp-tracing 10.0.0", + "sp-trie", + "wat", +] + +[[package]] +name = "staging-node-inspect" +version = "0.9.0-dev" +dependencies = [ + "clap 4.4.6", + "parity-scale-codec", + "sc-cli", + "sc-client-api", + "sc-service", + "sp-blockchain", + "sp-core", + "sp-runtime", + "thiserror", +] + +[[package]] +name = "staging-parachain-info" +version = "0.1.0" +dependencies = [ + "cumulus-primitives-core", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 8.0.0", +] + +[[package]] +name = "staging-tracking-allocator" +version = "1.0.0" + [[package]] name = "staging-xcm" version = "1.0.0" @@ -17700,7 +18531,7 @@ dependencies = [ "sp-arithmetic", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-weights", "staging-xcm", "staging-xcm-executor", @@ -17716,11 +18547,12 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", + "scale-info", "sp-arithmetic", "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "sp-weights", "staging-xcm", ] @@ -17843,9 +18675,8 @@ dependencies = [ name = "substrate" version = "1.0.0" dependencies = [ - "aquamarine", - "chain-spec-builder", "frame-support", + "sc-chain-spec", "sc-cli", "sc-consensus-aura", "sc-consensus-babe", @@ -17854,7 +18685,9 @@ dependencies = [ "sc-consensus-manual-seal", "sc-consensus-pow", "sc-service", + "simple-mermaid", "sp-runtime", + "staging-chain-spec-builder", "subkey", ] @@ -17882,12 +18715,12 @@ dependencies = [ "assert_cmd", "futures", "nix 0.26.2", - "node-cli", "node-primitives", "regex", "sc-cli", "sc-service", "sp-rpc", + "staging-node-cli", "substrate-rpc-client", "tokio", ] @@ -17917,7 +18750,7 @@ dependencies = [ "serde", "sp-core", "sp-runtime", - "sp-storage", + "sp-storage 13.0.0", "tokio", ] @@ -17939,7 +18772,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "sp-tracing", + "sp-tracing 10.0.0", "substrate-test-runtime-client", "tokio", ] @@ -18042,7 +18875,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-grandpa", "sp-core", - "sp-externalities", + "sp-externalities 0.19.0", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -18051,8 +18884,8 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "sp-transaction-pool", "sp-trie", "sp-version", @@ -18325,7 +19158,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -18338,6 +19171,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.21", + "windows-sys 0.48.0", +] + [[package]] name = "termtree" version = "0.4.1" @@ -18352,7 +19195,7 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives", "sp-io", - "sp-std", + "sp-std 8.0.0", "substrate-wasm-builder", "tiny-keccak", ] @@ -18400,7 +19243,7 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives", "sp-io", - "sp-std", + "sp-std 8.0.0", "substrate-wasm-builder", "tiny-keccak", ] @@ -18455,6 +19298,19 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "testnets-common" +version = "1.0.0" +dependencies = [ + "frame-support", + "polkadot-core-primitives", + "rococo-runtime-constants", + "smallvec", + "sp-runtime", + "substrate-wasm-builder", + "westend-runtime-constants", +] + [[package]] name = "textwrap" version = "0.16.0" @@ -18463,9 +19319,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] @@ -18492,9 +19348,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", @@ -18570,6 +19426,17 @@ dependencies = [ "tikv-jemalloc-sys", ] +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + [[package]] name = "time" version = "0.3.27" @@ -18598,25 +19465,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tiny-bip39" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" -dependencies = [ - "anyhow", - "hmac 0.12.1", - "once_cell", - "pbkdf2 0.11.0", - "rand 0.8.5", - "rustc-hash", - "sha2 0.10.7", - "thiserror", - "unicode-normalization", - "wasm-bindgen", - "zeroize", -] - [[package]] name = "tiny-keccak" version = "2.0.2" @@ -18663,7 +19511,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "signal-hook-registry", "socket2 0.5.3", "tokio-macros", @@ -18709,7 +19557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "tokio", "tokio-util", ] @@ -18749,7 +19597,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "tokio", "tracing", ] @@ -18821,7 +19669,7 @@ dependencies = [ "http", "http-body", "http-range-header", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "tower-layer", "tower-service", ] @@ -18846,7 +19694,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.12", "tracing-attributes", "tracing-core", ] @@ -18887,7 +19735,6 @@ name = "tracing-gum" version = "1.0.0" dependencies = [ "coarsetime", - "polkadot-node-jaeger", "polkadot-primitives", "tracing", "tracing-gum-proc-macro", @@ -19071,8 +19918,8 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-core", - "sp-debug-derive", - "sp-externalities", + "sp-debug-derive 8.0.0", + "sp-externalities 0.19.0", "sp-inherents", "sp-io", "sp-keystore", @@ -19158,7 +20005,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] @@ -19422,6 +20269,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -19525,9 +20378,9 @@ dependencies = [ [[package]] name = "wasm-opt" -version = "0.114.1" +version = "0.116.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d005a95f934878a1fb446a816d51c3601a0120ff929005ba3bab3c749cfd1c7" +checksum = "fc942673e7684671f0c5708fc18993569d184265fd5223bb51fc8e5b9b6cfd52" dependencies = [ "anyhow", "libc", @@ -19541,9 +20394,9 @@ dependencies = [ [[package]] name = "wasm-opt-cxx-sys" -version = "0.114.1" +version = "0.116.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d04e240598162810fad3b2e96fa0dec6dba1eb65a03f3bd99a9248ab8b56caa" +checksum = "8c57b28207aa724318fcec6575fe74803c23f6f266fce10cbc9f3f116762f12e" dependencies = [ "anyhow", "cxx", @@ -19553,9 +20406,9 @@ dependencies = [ [[package]] name = "wasm-opt-sys" -version = "0.114.1" +version = "0.116.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efd2aaca519d64098c4faefc8b7433a97ed511caf4c9e516384eb6aef1ff4f9" +checksum = "8a1cce564dc768dacbdb718fc29df2dba80bd21cb47d8f77ae7e3d95ceb98cbe" dependencies = [ "anyhow", "cc", @@ -19924,7 +20777,7 @@ dependencies = [ "sha2 0.10.7", "stun", "thiserror", - "time", + "time 0.3.27", "tokio", "turn", "url", @@ -20106,6 +20959,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "emulated-integration-tests-common", + "pallet-im-online", + "pallet-staking", + "parachains-common", + "polkadot-primitives", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", + "westend-runtime", + "westend-runtime-constants", +] + [[package]] name = "westend-runtime" version = "1.0.0" @@ -20155,6 +21028,7 @@ dependencies = [ "pallet-proxy", "pallet-recovery", "pallet-referenda", + "pallet-root-testing", "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", @@ -20202,9 +21076,9 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", - "sp-storage", - "sp-tracing", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-tracing 10.0.0", "sp-transaction-pool", "sp-version", "staging-xcm", @@ -20227,6 +21101,18 @@ dependencies = [ "sp-core", "sp-runtime", "sp-weights", + "staging-xcm", +] + +[[package]] +name = "westend-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-westend-emulated-chain", + "bridge-hub-westend-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "westend-emulated-chain", ] [[package]] @@ -20538,7 +21424,7 @@ dependencies = [ "ring 0.16.20", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -20556,7 +21442,7 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -20573,6 +21459,7 @@ name = "xcm-emulator" version = "0.1.0" dependencies = [ "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-test-relay-sproof-builder", @@ -20593,8 +21480,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "staging-xcm", "staging-xcm-executor", ] @@ -20615,7 +21502,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-state-machine", - "sp-tracing", + "sp-tracing 10.0.0", "staging-xcm", "staging-xcm-executor", ] @@ -20627,7 +21514,9 @@ dependencies = [ "Inflector", "proc-macro2", "quote", + "staging-xcm", "syn 2.0.38", + "trybuild", ] [[package]] @@ -20641,7 +21530,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-runtime-parachains", "sp-io", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -20666,8 +21555,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", - "sp-tracing", + "sp-std 8.0.0", + "sp-tracing 10.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -20693,7 +21582,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 8.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -20726,7 +21615,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time", + "time 0.3.27", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 75da6681465d23a45653b238c1325c4ffd480853..57079aa4d03dcc2584bff350a7ed00216a7ac6b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,18 +14,18 @@ members = [ "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-kusama", - "bridges/primitives/chain-asset-hub-polkadot", + "bridges/primitives/chain-asset-hub-rococo", + "bridges/primitives/chain-asset-hub-westend", "bridges/primitives/chain-bridge-hub-cumulus", "bridges/primitives/chain-bridge-hub-kusama", "bridges/primitives/chain-bridge-hub-polkadot", "bridges/primitives/chain-bridge-hub-rococo", - "bridges/primitives/chain-bridge-hub-wococo", + "bridges/primitives/chain-bridge-hub-westend", "bridges/primitives/chain-kusama", "bridges/primitives/chain-polkadot", "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-rococo", - "bridges/primitives/chain-wococo", + "bridges/primitives/chain-westend", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", @@ -60,23 +60,39 @@ members = [ "cumulus/parachain-template/pallets/template", "cumulus/parachain-template/runtime", "cumulus/parachains/common", - "cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend", - "cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/common", + "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", + "cumulus/parachains/integration-tests/emulated/chains/relays/westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/networks/rococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", + "cumulus/parachains/integration-tests/emulated/networks/westend-system", + "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", "cumulus/parachains/pallets/ping", "cumulus/parachains/runtimes/assets/asset-hub-kusama", "cumulus/parachains/runtimes/assets/asset-hub-polkadot", + "cumulus/parachains/runtimes/assets/asset-hub-rococo", "cumulus/parachains/runtimes/assets/asset-hub-westend", "cumulus/parachains/runtimes/assets/common", "cumulus/parachains/runtimes/assets/test-utils", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", + "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "cumulus/parachains/runtimes/bridge-hubs/test-utils", "cumulus/parachains/runtimes/collectives/collectives-polkadot", + "cumulus/parachains/runtimes/collectives/collectives-westend", "cumulus/parachains/runtimes/contracts/contracts-rococo", "cumulus/parachains/runtimes/glutton/glutton-kusama", + "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/starters/seedling", "cumulus/parachains/runtimes/starters/shell", "cumulus/parachains/runtimes/test-utils", @@ -174,6 +190,8 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", + "substrate/bin/minimal/node", + "substrate/bin/minimal/runtime", "substrate", "substrate/bin/node-template/node", "substrate/bin/node-template/pallets/template", @@ -246,6 +264,7 @@ members = [ "substrate/client/transaction-pool", "substrate/client/transaction-pool/api", "substrate/client/utils", + "substrate/frame", "substrate/frame/alliance", "substrate/frame/asset-conversion", "substrate/frame/asset-rate", @@ -268,8 +287,10 @@ members = [ "substrate/frame/child-bounties", "substrate/frame/collective", "substrate/frame/contracts", + "substrate/frame/contracts/fixtures", "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", + "substrate/frame/contracts/mock-network", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", @@ -345,6 +366,7 @@ members = [ "substrate/frame/support/test", "substrate/frame/support/test/compile_pass", "substrate/frame/support/test/pallet", + "substrate/frame/support/test/stg_frame_crate", "substrate/frame/system", "substrate/frame/system/benchmarking", "substrate/frame/system/rpc/runtime-api", @@ -355,6 +377,7 @@ members = [ "substrate/frame/transaction-payment/asset-tx-payment", "substrate/frame/transaction-payment/rpc", "substrate/frame/transaction-payment/rpc/runtime-api", + "substrate/frame/transaction-payment/skip-feeless-payment", "substrate/frame/transaction-storage", "substrate/frame/treasury", "substrate/frame/try-runtime", @@ -382,6 +405,7 @@ members = [ "substrate/primitives/consensus/sassafras", "substrate/primitives/consensus/slots", "substrate/primitives/core", + "substrate/primitives/core/fuzz", "substrate/primitives/core/hashing", "substrate/primitives/core/hashing/proc-macro", "substrate/primitives/crypto/ec-utils", @@ -447,7 +471,7 @@ members = [ "substrate/utils/prometheus", "substrate/utils/wasm-builder", ] -default-members = [ "polkadot" ] +default-members = [ "polkadot", "substrate/bin/node/cli" ] [profile.release] # Polkadot runtime requires unwinding. diff --git a/bridges/README.md b/bridges/README.md index da46fe67d924acb2afffcf971bacb60b560f0cd5..a2ce213d2541c346361eb28125a06e3079e1c269 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -68,7 +68,7 @@ For example, consider the case below where we want to bridge two Substrate based ``` +---------------+ +---------------+ | | | | -| Rialto | | Millau | +| Rococo | | Westend | | | | | +-------+-------+ +-------+-------+ ^ ^ @@ -79,9 +79,9 @@ For example, consider the case below where we want to bridge two Substrate based +---------------+ ``` -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by using a runtime +The Rococo chain must be able to accept Westend headers and verify their integrity. It does this by using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact directly they need an external service, -called a relayer, to communicate. The relayer will subscribe to new Rialto headers via RPC and submit them to the Millau +called a relayer, to communicate. The relayer will subscribe to new Rococo headers via RPC and submit them to the Westend chain for verification. Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth description of the @@ -94,164 +94,23 @@ Here's an overview of how the project is laid out. The main bits are the `bin`, messages between chains. ``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks +├── modules // Substrate Runtime Modules (a.k.a Pallets) +│ ├── beefy // On-Chain BEEFY Light Client (in progress) +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── parachains // On-Chain Parachains Light Client +│ ├── relayers // Relayer Rewards Registry +│ ├── xcm-bridge-hub // Multiple Dynamic Bridges Support +│ ├── xcm-bridge-hub-router // XCM Router that may be used to Connect to XCM Bridge Hub +├── primitives // Code shared between modules, runtimes, and relays │ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── beefy // On-Chain BEEFY Light Client (in progress) -│ ├── grandpa // On-Chain GRANDPA Light Client -│ ├── messages // Cross Chain Message Passing -│ ├── parachains // On-Chain Parachains Light Client -│ ├── relayers // Relayer rewards registry +├── relays // Application for sending finality proofs and messages between chains │ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending finality proofs and messages between chains -│ └── ... -└── scripts // Useful development and maintenance scripts +└── scripts // Useful development and maintenance scripts ``` ## Running the Bridge -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes on each side of the -bridge (source and target chain). - -There are 2 ways to run the bridge, described below: - -- building & running from source: with this option, you'll be able to run the bridge between two standalone chains that -are running GRANDPA finality gadget to achieve finality; - -- running a Docker Compose setup: this is a recommended option, where you'll see bridges with parachains, complex relays -and more. - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running a Dev network - -We will launch a dev network to demonstrate how to relay a message between two Substrate based chains (named Rialto and -Millau). - -To do this we will need two nodes, two relayers which will relay headers, and two relayers which will relay messages. - -#### Running from local scripts - -To run a simple dev network you can use the scripts located in the [`deployments/local-scripts` -folder](./deployments/local-scripts). - -First, we must run the two Substrate nodes. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-node.sh -./deployments/local-scripts/run-millau-node.sh -``` - -After the nodes are up we can run the header relayers. - -```bash -./deployments/local-scripts/relay-millau-to-rialto.sh -./deployments/local-scripts/relay-rialto-to-millau.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the Rialto Substrate -chain. - -``` -# Header Relayer Logs -[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto -[...] [date] INFO bridge Synced 147 of 147 headers -[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto -[...] [date] INFO bridge Synced 148 of 149 headers -``` - -Finally, we can run the message relayers. - -```bash -./deployments/local-scripts/relay-messages-millau-to-rialto.sh -./deployments/local-scripts/relay-messages-rialto-to-millau.sh -``` - -You will also see the message lane relayers listening for new messages. - -``` -# Message Relayer Logs -[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces -[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race -[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces -[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces -[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } -[...] [date] DEBUG bridge Asking Millau node about its state -[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: - HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } -``` - -To send a message see the ["How to send a message" section](#how-to-send-a-message). - -### How to send a message - -In this section we'll show you how to quickly send a bridge message. The message is just an encoded XCM `Trap(43)` -message. - -```bash -# In `parity-bridges-common` folder -./scripts/send-message-from-millau-rialto.sh -``` - -After sending a message you will see the following logs showing a message was successfully sent: - -``` -INFO bridge Sending message to Rialto. Size: 11. -TRACE bridge Sent transaction to Millau node: 0x5e68... -``` - -And at the Rialto node logs you'll something like this: - -``` -... runtime::bridge-messages: Received messages: total=1, valid=1. Weight used: Weight(ref_time: 1215065371, proof_size: - 48559)/Weight(ref_time: 1215065371, proof_size: 54703). -``` - -It means that the message has been delivered and dispatched. Message may be dispatched with an error, though - the goal -of our test bridge is to ensure that messages are successfully delivered and all involved components are working. - -## Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, monitoring dashboards, -etc. see the [Deployments README](./deployments/README.md). - -You should note that you can find images for all the bridge components published on [Docker -Hub](https://hub.docker.com/u/paritytech). - -To run a Rialto node for example, you can use the following command: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external -``` - -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat server like, for -example, Discord. Most discussions around Polkadot and Substrate happen in various Element "rooms" (channels). So, -joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please feel free to join the -[Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element channel is most suited -for discussions regarding Substrate itself. +Apart from live Rococo <> Westend bridge, you may spin up local networks and test see how it works locally. More +details may be found in +[this document](https://github.com/paritytech/polkadot-sdk/tree/master//cumulus/parachains/runtimes/bridge-hubs/README.md). diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 5c3fefc69ce710b1af10721ad41d1e5144b00f5a..0ccf30987e822be4bcc739c2806246caafe28ef0 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "bridge-runtime-common" version = "0.1.0" +description = "Common types and functions that may be used by substrate-based runtimes of all bridged chains" authors.workspace = true edition.workspace = true repository.workspace = true @@ -10,7 +11,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } # Bridge dependencies diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index ae6f40b142145dc265a69e8c78ef313f5ce9340f..d3b3b21061d05ab1e120ca3c17f8e9d12aaefe39 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -22,7 +22,6 @@ use crate::messages_call_ext::MessagesCallSubType; use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; use sp_runtime::transaction_validity::TransactionValidity; -use xcm::v3::NetworkId; pub mod messages; pub mod messages_api; @@ -92,8 +91,8 @@ where /// ```nocompile /// generate_bridge_reject_obsolete_headers_and_messages!{ /// Call, AccountId -/// BridgeRialtoGrandpa, BridgeWestendGrandpa, -/// BridgeRialtoParachains +/// BridgeRococoGrandpa, BridgeRococoMessages, +/// BridgeRococoParachains /// } /// ``` /// @@ -147,42 +146,6 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { }; } -/// A mapping over `NetworkId`. -/// Since `NetworkId` doesn't include `Millau`, `Rialto` and `RialtoParachain`, we create some -/// synthetic associations between these chains and `NetworkId` chains. -pub enum CustomNetworkId { - /// The Millau network ID, associated with Kusama. - Millau, - /// The Rialto network ID, associated with Polkadot. - Rialto, - /// The RialtoParachain network ID, associated with Westend. - RialtoParachain, -} - -impl TryFrom for CustomNetworkId { - type Error = (); - - fn try_from(chain: bp_runtime::ChainId) -> Result { - Ok(match chain { - bp_runtime::MILLAU_CHAIN_ID => Self::Millau, - bp_runtime::RIALTO_CHAIN_ID => Self::Rialto, - bp_runtime::RIALTO_PARACHAIN_CHAIN_ID => Self::RialtoParachain, - _ => return Err(()), - }) - } -} - -impl CustomNetworkId { - /// Converts self to XCM' network id. - pub const fn as_network_id(&self) -> NetworkId { - match *self { - CustomNetworkId::Millau => NetworkId::Kusama, - CustomNetworkId::Rialto => NetworkId::Polkadot, - CustomNetworkId::RialtoParachain => NetworkId::Westend, - } - } -} - #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index d80a88f1068c8975426105f881d963e1c3053ee5..e7e7891461b2160a3d51b7731b300af58b80b2d6 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -22,24 +22,22 @@ use crate::{ messages::{ source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain, + AccountIdOf, BridgedChain, HashOf, MessageBridge, ThisChain, }, messages_generation::{ - encode_all_messages, encode_lane_data, grow_trie_leaf_value, prepare_messages_storage_proof, + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, }, }; -use bp_messages::{storage_keys, MessagePayload}; +use bp_messages::MessagePayload; use bp_polkadot_core::parachains::ParaHash; -use bp_runtime::{ - record_all_trie_keys, Chain, Parachain, RawStorageProof, StorageProofSize, UnderlyingChainOf, -}; +use bp_runtime::{Chain, Parachain, StorageProofSize, UnderlyingChainOf}; use codec::Encode; use frame_support::weights::Weight; use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams}; use sp_runtime::traits::{Header, Zero}; use sp_std::prelude::*; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; use xcm::v3::prelude::*; /// Prepare inbound bridge message according to given message proof parameters. @@ -172,7 +170,11 @@ where { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_proof::(params); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( + params.lane, + params.inbound_lane_data, + params.size, + ); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -200,7 +202,11 @@ where { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_proof::(params); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( + params.lane, + params.inbound_lane_data, + params.size, + ); // update runtime storage let (_, bridged_header_hash) = @@ -213,36 +219,6 @@ where } } -/// Prepare in-memory message delivery proof, without inserting anything to the runtime storage. -fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams>>, -) -> (HashOf>, RawStorageProof) -where - B: MessageBridge, -{ - // prepare Bridged chain storage with inbound lane state - let storage_key = - storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, ¶ms.lane).0; - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - let inbound_lane_data = - grow_trie_leaf_value(params.inbound_lane_data.encode(), params.size); - trie.insert(&storage_key, &inbound_lane_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - } - - // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); - - (root, storage_proof) -} - /// Insert header to the bridge GRANDPA pallet. pub(crate) fn insert_header_to_grandpa_pallet( state_root: bp_runtime::HashOf, diff --git a/bridges/bin/runtime-common/src/messages_call_ext.rs b/bridges/bin/runtime-common/src/messages_call_ext.rs index 07a99d2c0a16c381799c02e58cb9c4b574e2ad1c..5303fcb7ba030fa3f00a74c817d97537243f0e24 100644 --- a/bridges/bin/runtime-common/src/messages_call_ext.rs +++ b/bridges/bin/runtime-common/src/messages_call_ext.rs @@ -18,6 +18,7 @@ use crate::messages::{ source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, }; use bp_messages::{target_chain::MessageDispatch, InboundLaneData, LaneId, MessageNonce}; +use bp_runtime::OwnedBridgeModule; use frame_support::{ dispatch::CallableCallFor, traits::{Get, IsSubType}, @@ -187,8 +188,22 @@ pub trait MessagesCallSubType, I: 'static>: /// or a `ReceiveMessagesDeliveryProof` call, if the call is for the provided lane. fn call_info_for(&self, lane_id: LaneId) -> Option; - /// Check that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call is trying - /// to deliver/confirm at least some messages that are better than the ones we know of. + /// Ensures that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call: + /// + /// - does not deliver already delivered messages. We require all messages in the + /// `ReceiveMessagesProof` call to be undelivered; + /// + /// - does not submit empty `ReceiveMessagesProof` call with zero messages, unless the lane + /// needs to be unblocked by providing relayer rewards proof; + /// + /// - brings no new delivery confirmations in a `ReceiveMessagesDeliveryProof` call. We require + /// at least one new delivery confirmation in the unrewarded relayers set; + /// + /// - does not violate some basic (easy verifiable) messages pallet rules obsolete (like + /// submitting a call when a pallet is halted or delivering messages when a dispatcher is + /// inactive). + /// + /// If one of above rules is violated, the transaction is treated as invalid. fn check_obsolete_call(&self) -> TransactionValidity; } @@ -278,7 +293,17 @@ impl< } fn check_obsolete_call(&self) -> TransactionValidity { + let is_pallet_halted = Pallet::::ensure_not_halted().is_err(); match self.call_info() { + Some(proof_info) if is_pallet_halted => { + log::trace!( + target: pallet_bridge_messages::LOG_TARGET, + "Rejecting messages transaction on halted pallet: {:?}", + proof_info + ); + + return sp_runtime::transaction_validity::InvalidTransaction::Call.into() + }, Some(CallInfo::ReceiveMessagesProof(proof_info)) if proof_info.is_obsolete(T::MessageDispatch::is_active()) => { diff --git a/bridges/bin/runtime-common/src/messages_generation.rs b/bridges/bin/runtime-common/src/messages_generation.rs index 3c550a9bd0fd546ba90753b8871e049e65a702e3..c37aaa5d4d5378a1b76507e017c73aec3c7aabbd 100644 --- a/bridges/bin/runtime-common/src/messages_generation.rs +++ b/bridges/bin/runtime-common/src/messages_generation.rs @@ -16,10 +16,11 @@ //! Helpers for generating message storage proofs, that are used by tests and by benchmarks. -use crate::messages::{BridgedChain, HashOf, HasherOf, MessageBridge}; +use crate::messages::{AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain}; use bp_messages::{ - storage_keys, LaneId, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, + storage_keys, InboundLaneData, LaneId, MessageKey, MessageNonce, MessagePayload, + OutboundLaneData, }; use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize}; use codec::Encode; @@ -104,6 +105,38 @@ where (root, storage_proof) } +/// Prepare storage proof of given messages delivery. +/// +/// Returns state trie root and nodes with prepared messages. +pub fn prepare_message_delivery_storage_proof( + lane: LaneId, + inbound_lane_data: InboundLaneData>>, + size: StorageProofSize, +) -> (HashOf>, RawStorageProof) +where + B: MessageBridge, +{ + // prepare Bridged chain storage with inbound lane state + let storage_key = storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; + let mut root = Default::default(); + let mut mdb = MemoryDB::default(); + { + let mut trie = + TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); + let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); + trie.insert(&storage_key, &inbound_lane_data) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + } + + // generate storage proof to be delivered to This chain + let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) + .map_err(|_| "record_all_trie_keys has failed") + .expect("record_all_trie_keys should not fail in benchmarks"); + + (root, storage_proof) +} + /// Add extra data to the trie leaf value so that it'll be of given size. pub fn grow_trie_leaf_value(mut value: Vec, size: StorageProofSize) -> Vec { match size { diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 45ef790d74482b01948a74cdb0a53c332aa41896..ffabf7f6e2f84bc43e8ac7e90ac90287c5ec39ca 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -14,12 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! A mock runtime for testing different stuff in the crate. We've been using Millau -//! runtime for that before, but it has two drawbacks: -//! -//! - circular dependencies between this crate and Millau runtime; -//! -//! - we can't use (e.g. as git subtree or by copying) this crate in repo without Millau. +//! A mock runtime for testing different stuff in the crate. #![cfg(test)] @@ -44,13 +39,13 @@ use bp_runtime::{ }; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, }; use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8, IdentityLookup}, + traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, FixedPointNumber, Perquintill, }; @@ -146,30 +141,14 @@ parameter_types! { pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Hash = ThisChainHash; type Hashing = ThisChainHasher; type AccountId = ThisChainAccountId; - type Lookup = IdentityLookup; type Block = ThisChainBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { @@ -179,20 +158,10 @@ impl pallet_utility::Config for TestRuntime { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type Balance = ThisChainBalance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_transaction_payment::Config for TestRuntime { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index fd10344812517bab17ad33e5d276a5627e01c0c0..a597fb9e2f49289360acfd7ee305b44eb7874a3e 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -27,6 +27,7 @@ use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; // reexport everything from `integrity_tests` module +#[allow(unused_imports)] pub use integrity_tests::*; /// Compute priority boost for message delivery transaction that delivers diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index 876c069dc0f77dfd23a6732936319ff189221049..6d8b2114808588a83571de6dc02a141cc146d2e3 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -838,21 +838,23 @@ mod tests { mock::*, }; use bp_messages::{ - DeliveredMessages, InboundLaneData, MessageNonce, OutboundLaneData, UnrewardedRelayer, - UnrewardedRelayersState, + DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, + UnrewardedRelayer, UnrewardedRelayersState, }; use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; - use bp_runtime::HeaderId; + use bp_runtime::{BasicOperatingMode, HeaderId}; use bp_test_utils::{make_default_justification, test_keyring}; use frame_support::{ assert_storage_noop, parameter_types, traits::{fungible::Mutate, ReservableCurrency}, weights::Weight, }; - use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet}; - use pallet_bridge_messages::Call as MessagesCall; - use pallet_bridge_parachains::{Call as ParachainsCall, RelayBlockHash}; + use pallet_bridge_grandpa::{Call as GrandpaCall, Pallet as GrandpaPallet, StoredAuthoritySet}; + use pallet_bridge_messages::{Call as MessagesCall, Pallet as MessagesPallet}; + use pallet_bridge_parachains::{ + Call as ParachainsCall, Pallet as ParachainsPallet, RelayBlockHash, + }; use sp_runtime::{ traits::{ConstU64, Header as HeaderT}, transaction_validity::{InvalidTransaction, ValidTransaction}, @@ -1592,6 +1594,99 @@ mod tests { }); } + #[test] + fn ext_rejects_batch_with_grandpa_finality_proof_when_grandpa_pallet_is_halted() { + run_test(|| { + initialize_environment(100, 100, 100); + + GrandpaPallet::::set_operating_mode( + RuntimeOrigin::root(), + BasicOperatingMode::Halted, + ) + .unwrap(); + + assert_eq!( + run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + }); + } + + #[test] + fn ext_rejects_batch_with_parachain_finality_proof_when_parachains_pallet_is_halted() { + run_test(|| { + initialize_environment(100, 100, 100); + + ParachainsPallet::::set_operating_mode( + RuntimeOrigin::root(), + BasicOperatingMode::Halted, + ) + .unwrap(); + + assert_eq!( + run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + + assert_eq!( + run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + }); + } + + #[test] + fn ext_rejects_transaction_when_messages_pallet_is_halted() { + run_test(|| { + initialize_environment(100, 100, 100); + + MessagesPallet::::set_operating_mode( + RuntimeOrigin::root(), + MessagesOperatingMode::Basic(BasicOperatingMode::Halted), + ) + .unwrap(); + + assert_eq!( + run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + + assert_eq!( + run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + + assert_eq!( + run_pre_dispatch(message_delivery_call(200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + assert_eq!( + run_pre_dispatch(message_confirmation_call(200)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), + ); + }); + } + #[test] fn pre_dispatch_parses_batch_with_relay_chain_and_parachain_headers() { run_test(|| { diff --git a/bridges/docs/high-level-overview.md b/bridges/docs/high-level-overview.md index 42efc8100bd080763c22ea3e4e813f3c3c87db37..d6d6fb3f0996dd15d4fc2987deacf79e4ecd4e5f 100644 --- a/bridges/docs/high-level-overview.md +++ b/bridges/docs/high-level-overview.md @@ -1,7 +1,7 @@ # High-Level Bridge Documentation This document gives a brief, abstract description of main components that may be found in this repository. If you want -to see how we're using them to build Rococo <> Wococo (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> +to see how we're using them to build Rococo <> Westend (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> Kusama Bridge](./polkadot-kusama-bridge-overview.md). ## Purpose diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 05d6a8e5c26b0d82f5dc80ce5be21d9a53b46d33..dbbe18febc618a0c8d16c6cd5edc5fd207a4a6ba 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "pallet-bridge-grandpa" version = "0.1.0" +description = "Module implementing GRANDPA on-chain light client used for bridging consensus of substrate-based chains." authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -11,7 +12,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index e0648d5dd0f1d9cd26b2d9cdcc6dc983ef082bb2..f238064f92bcacde89971479a501b60469f01691 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -16,7 +16,7 @@ use crate::{weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, Error, Pallet}; use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; -use bp_runtime::BlockNumberOf; +use bp_runtime::{BlockNumberOf, OwnedBridgeModule}; use codec::Encode; use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight}; use sp_runtime::{ @@ -126,6 +126,10 @@ pub trait CallSubType, I: 'static>: _ => return Ok(ValidTransaction::default()), }; + if Pallet::::ensure_not_halted().is_err() { + return InvalidTransaction::Call.into() + } + match SubmitFinalityProofHelper::::check_obsolete(finality_target.block_number) { Ok(_) => Ok(ValidTransaction::default()), Err(Error::::OldHeader) => InvalidTransaction::Stale.into(), @@ -192,10 +196,10 @@ mod tests { use crate::{ call_ext::CallSubType, mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime}, - BestFinalized, Config, WeightInfo, + BestFinalized, Config, PalletOperatingMode, WeightInfo, }; use bp_header_chain::ChainWithGrandpa; - use bp_runtime::HeaderId; + use bp_runtime::{BasicOperatingMode, HeaderId}; use bp_test_utils::{ make_default_justification, make_justification_for_header, JustificationGeneratorParams, }; @@ -238,6 +242,17 @@ mod tests { }); } + #[test] + fn extension_rejects_new_header_if_pallet_is_halted() { + run_test(|| { + // when pallet is halted => tx is rejected + sync_to_header_10(); + PalletOperatingMode::::put(BasicOperatingMode::Halted); + + assert!(!validate_block_submit(15)); + }); + } + #[test] fn extension_accepts_new_header() { run_test(|| { diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index f88a0a3e6a6ee187222a6fb8eb1628e22b37abbe..7efa84971fe7f319146a62fe5b07d2de0e75e48d 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,16 +20,9 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::Chain; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU32, ConstU64, Hooks}, - weights::Weight, + construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, }; use sp_core::sr25519::Signature; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; pub type AccountId = u64; pub type TestHeader = sp_runtime::testing::Header; @@ -49,43 +42,14 @@ construct_runtime! { } } -parameter_types! { - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; pub const HeadersToKeep: u32 = 5; - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; } impl grandpa::Config for TestRuntime { diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index 89ed70d13ac3359f23fa6b756d3a8e07ff8b7092..a75e7b5a8e4ada8ce880a040492c904d8035642c 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -58,39 +58,39 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. @@ -113,39 +113,39 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 7b7ea06198102ece6d93313413451a7c0294ef8b..a5c86693309041e0182d9b45a8f91f45f37381b9 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge dependencies @@ -22,7 +22,6 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -43,7 +42,6 @@ std = [ "log/std", "num-traits/std", "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index aebb7eafa7871d13cd8140d5a352978ca36433dd..648acad772d7a04b2985f69d038d5c52634c4708 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -34,16 +34,11 @@ use bp_messages::{ use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, - traits::ConstU64, + derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, Perbill, -}; +use sp_runtime::BuildStorage; use std::{ collections::{BTreeMap, VecDeque}, ops::RangeInclusive, @@ -84,55 +79,19 @@ frame_support::construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - pub type DbWeight = RocksDbWeight; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; } parameter_types! { diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5b6863984ec78c0e250c54a0ed3363d1278f196f..5bf7d56756079df8a5e469b9c50ba7607b65d983 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for RialtoMessages +//! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -23,13 +23,13 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 -// --pallet=RialtoMessages +// --pallet=pallet_bridge_messages // --extrinsic=* // --execution=wasm // --wasm-execution=Compiled @@ -48,7 +48,7 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for RialtoMessages. +/// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; fn receive_two_messages_proof() -> Weight; @@ -61,24 +61,24 @@ pub trait WeightInfo { fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; } -/// Weights for `RialtoMessages` that are generated using one of the Bridge testnets. +/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. /// /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -89,19 +89,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -112,19 +112,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -135,19 +135,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -158,19 +158,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -181,19 +181,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -209,19 +209,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -237,19 +237,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -265,19 +265,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. @@ -296,19 +296,19 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -319,19 +319,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -342,19 +342,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -365,19 +365,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -388,19 +388,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -411,19 +411,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -439,19 +439,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -467,19 +467,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -495,19 +495,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 4eb09ed78d17f3b4da620ac6b28645bdaab88c12..0d1b61ddea8022db5b9aaafee03624e3be3959d6 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "pallet-bridge-parachains" version = "0.1.0" +description = "Module that allows bridged relay chains to exchange information on their parachains' heads." authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index 99640dadc61f4a422ec41db4fe196f752b206d06..198ff11be49512ef90ab38a093e6889a3d8f0702 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -17,6 +17,7 @@ use crate::{Config, Pallet, RelayBlockNumber}; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaId}; +use bp_runtime::OwnedBridgeModule; use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, @@ -141,6 +142,10 @@ pub trait CallSubType, I: 'static>: None => return Ok(ValidTransaction::default()), }; + if Pallet::::ensure_not_halted().is_err() { + return InvalidTransaction::Call.into() + } + if SubmitParachainHeadsHelper::::is_obsolete(&update) { return InvalidTransaction::Stale.into() } @@ -160,10 +165,11 @@ where mod tests { use crate::{ mock::{run_test, RuntimeCall, TestRuntime}, - CallSubType, ParaInfo, ParasInfo, RelayBlockNumber, + CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockNumber, }; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; + use bp_runtime::BasicOperatingMode; fn validate_submit_parachain_heads( num: RelayBlockNumber, @@ -221,6 +227,17 @@ mod tests { }); } + #[test] + fn extension_rejects_header_if_pallet_is_halted() { + run_test(|| { + // when pallet is halted => tx is rejected + sync_to_relay_header_10(); + PalletOperatingMode::::put(BasicOperatingMode::Halted); + + assert!(!validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); + }); + } + #[test] fn extension_accepts_new_header() { run_test(|| { diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 14afe38417105a789f9eb13ca913c1be91e6f789..d95e76f31086251ac7ee8671e379d55e0afbb8f2 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -17,17 +17,18 @@ use bp_header_chain::ChainWithGrandpa; use bp_polkadot_core::parachains::ParaId; use bp_runtime::{Chain, Parachain}; -use frame_support::{construct_runtime, parameter_types, traits::ConstU32, weights::Weight}; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight, +}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, - MultiSignature, Perbill, + traits::{BlakeTwo256, Header as HeaderT}, + MultiSignature, }; use crate as pallet_bridge_parachains; pub type AccountId = u64; -pub type TestNumber = u64; pub type RelayBlockHeader = sp_runtime::generic::Header; @@ -152,42 +153,12 @@ construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: TestNumber = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = RegularParachainHasher; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; pub const HeadersToKeep: u32 = 5; } diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index 9182ec466117b2c642910148a1240a10ae438b8a..abddc8768947006e574bf6bca4d2301c2047199a 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -60,29 +60,29 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -97,29 +97,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -130,29 +130,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: @@ -167,29 +167,29 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -204,29 +204,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -237,29 +237,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 13bc9ad2bbce0f26d5946cf8198e689597e5f968..393086a85690fcc2846c1708bc788e1d67a61d66 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -31,7 +31,7 @@ use frame_support::weights::{RuntimeDbWeight, Weight}; pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// the Rialto chain. +/// some generic chain. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 46fc3bb43b1de23b5784179ccd2dbc31ae6c7230..6ec1971e3f6b13376cf4ace4490c8c946590342f 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge dependencies @@ -30,7 +30,6 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false [dev-dependencies] bp-runtime = { path = "../../primitives/runtime" } pallet-balances = { path = "../../../substrate/frame/balances" } -sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index b9b98ca7e253fdb952125e2079752484e37241a2..ce66c9df48e01c829a55d03ffe4437c41f006a6b 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -325,6 +325,12 @@ pub mod pallet { rewards_account_params, new_reward, ); + + Self::deposit_event(Event::::RewardRegistered { + relayer: relayer.clone(), + rewards_account_params, + reward, + }); }, ); } @@ -369,6 +375,15 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { + /// Relayer reward has been registered and may be claimed later. + RewardRegistered { + /// Relayer account that can claim reward. + relayer: T::AccountId, + /// Relayer can claim reward from this account. + rewards_account_params: RewardsAccountParams, + /// Reward amount. + reward: T::Reward, + }, /// Reward has been paid to the relayer. RewardPaid { /// Relayer account that has been rewarded. @@ -455,7 +470,7 @@ mod tests { use super::*; use mock::{RuntimeEvent as TestEvent, *}; - use crate::Event::RewardPaid; + use crate::Event::{RewardPaid, RewardRegistered}; use bp_messages::LaneId; use bp_relayers::RewardsAccountOwner; use frame_support::{ @@ -470,6 +485,33 @@ mod tests { System::::reset_events(); } + #[test] + fn register_relayer_reward_emit_event() { + run_test(|| { + get_ready_for_events(); + + Pallet::::register_relayer_reward( + TEST_REWARDS_ACCOUNT_PARAMS, + ®ULAR_RELAYER, + 100, + ); + + // Check if the `RewardRegistered` event was emitted. + assert_eq!( + System::::events().last(), + Some(&EventRecord { + phase: Phase::Initialization, + event: TestEvent::Relayers(RewardRegistered { + relayer: REGULAR_RELAYER, + rewards_account_params: TEST_REWARDS_ACCOUNT_PARAMS, + reward: 100 + }), + topics: vec![], + }), + ); + }); + } + #[test] fn root_cant_claim_anything() { run_test(|| { diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index 4713ec91658afa9ad4e0c2d13055a0921cf1be88..667b10e5c125ed74bad2aa7796756f372578c2ce 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -22,12 +22,10 @@ use bp_messages::LaneId; use bp_relayers::{ PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams, }; -use frame_support::{parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight, }; +use sp_runtime::BuildStorage; pub type AccountId = u64; pub type Balance = u64; @@ -61,46 +59,17 @@ parameter_types! { pub const Lease: BlockNumber = 8; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = ConstU32<1>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_bridge_relayers::Config for TestRuntime { diff --git a/bridges/modules/relayers/src/weights.rs b/bridges/modules/relayers/src/weights.rs index 2e064a3936df3a0661fc08af48be87520c04dba0..c2c065b0c0a270a254a60dccb62465d6c2fa4aa6 100644 --- a/bridges/modules/relayers/src/weights.rs +++ b/bridges/modules/relayers/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index c61cab291e142e5e1aa4680b162d288ee62284a5..56b9139d7d5f6771dd0dd048ff7a791a7e099c4c 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.8.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs index 4bbe414f66355bfb2fc866cc74b89f7b97988837..c4d1e3971e74777668b8bfa4dbcfdb88fbac3779 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs @@ -60,6 +60,8 @@ benchmarks_instance_pallet! { is_congested: false, delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, }); + + let _ = T::ensure_bridged_target_destination(); T::make_congested(); }: { crate::Pallet::::on_initialize(Zero::zero()) @@ -79,11 +81,11 @@ benchmarks_instance_pallet! { } send_message { - // make local queue congested, because it means additional db write - T::make_congested(); - let dest = T::ensure_bridged_target_destination(); let xcm = sp_std::vec![].into(); + + // make local queue congested, because it means additional db write + T::make_congested(); }: { send_xcm::>(dest, xcm).expect("message is sent") } diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index 5cf94fc83fd9dffda2e1fa614608eb53ea13922e..cf51ef82412fbeb99ed9b1b77b693ebe3a288dd4 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -335,7 +335,7 @@ impl, I: 'static> SendXcm for Pallet { // just use exporter to validate destination and insert instructions to pay message fee // at the sibling/child bridge hub // - // the cost will include both cost of: (1) to-sibling bridg hub delivery (returned by + // the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by // the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by // `Self::exporter_for`) ViaBridgeHubExporter::::validate(dest, xcm) diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 58df21a6d9016a70a8414b884ccedf33d7ae1524..2d173ebc0457a941c9f9038a9c1768c367cff362 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -19,15 +19,11 @@ use crate as pallet_xcm_bridge_hub_router; use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use frame_support::{construct_runtime, parameter_types}; +use frame_support::{construct_runtime, derive_impl, parameter_types}; use frame_system::EnsureRoot; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU128, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::{traits::ConstU128, BuildStorage}; use xcm::prelude::*; -use xcm_builder::NetworkExportTable; +use xcm_builder::{NetworkExportTable, NetworkExportTableItem}; pub type AccountId = u64; type Block = frame_system::mocking::MockBlock; @@ -53,34 +49,20 @@ parameter_types! { pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(ThisNetworkId::get()), Parachain(1000)); pub SiblingBridgeHubLocation: MultiLocation = ParentThen(X1(Parachain(1002))).into(); pub BridgeFeeAsset: AssetId = MultiLocation::parent().into(); - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> - = vec![(BridgedNetworkId::get(), SiblingBridgeHubLocation::get(), Some((BridgeFeeAsset::get(), BASE_FEE).into()))]; + pub BridgeTable: Vec + = vec![ + NetworkExportTableItem::new( + BridgedNetworkId::get(), + None, + SiblingBridgeHubLocation::get(), + Some((BridgeFeeAsset::get(), BASE_FEE).into()) + ) + ]; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index 62936e997f3cd0aaea17859b49d7c0edf6b65989..b0c8fc6252cd5e6eaa968cce06636a308e1c7e05 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml similarity index 77% rename from bridges/primitives/chain-asset-hub-kusama/Cargo.toml rename to bridges/primitives/chain-asset-hub-rococo/Cargo.toml index adb9a57bc134729ab753e3006093207e0de32e61..088510adcec63b6d30c76b965100823c0d466827 100644 --- a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-asset-hub-kusama" -description = "Primitives of AssetHubKusama parachain runtime." +name = "bp-asset-hub-rococo" +description = "Primitives of AssetHubRococo parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs similarity index 75% rename from bridges/primitives/chain-asset-hub-kusama/src/lib.rs rename to bridges/primitives/chain-asset-hub-rococo/src/lib.rs index 94016c1da0cb1235dec6717bd6da79ff4dab74f4..de2e9ae856d1f8756f0a2a6b9cae3da3e265e76e 100644 --- a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs +++ b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Module with configuration which reflects AssetHubKusama runtime setup. +//! Module with configuration which reflects AssetHubRococo runtime setup. #![cfg_attr(not(feature = "std"), no_std)] @@ -23,27 +23,26 @@ use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; -/// `AssetHubKusama` Runtime `Call` enum. +/// `AssetHubRococo` Runtime `Call` enum. /// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubKusama` chain. +/// The enum represents a subset of possible `Call`s we can send to `AssetHubRococo` chain. /// Ideally this code would be auto-generated from metadata, because we want to /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. /// /// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubKusama` `construct_runtime`, so that we maintain SCALE-compatibility. +/// `AssetHubRococo` `construct_runtime`, so that we maintain SCALE-compatibility. #[allow(clippy::large_enum_variant)] #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { - /// `ToPolkadotXcmRouter` bridge pallet. - #[codec(index = 43)] - ToPolkadotXcmRouter(XcmBridgeHubRouterCall), + /// `ToWestendXcmRouter` bridge pallet. + #[codec(index = 45)] + ToWestendXcmRouter(XcmBridgeHubRouterCall), } frame_support::parameter_types! { /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubKusama`. - /// (initially was calculated `170733333` + `10%` by test `BridgeHubKusama::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubKusamaBaseFeeInDots: u128 = 187806666; } + +/// Identifier of AssetHubRococo in the Rococo relay chain. +pub const ASSET_HUB_ROCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml b/bridges/primitives/chain-asset-hub-westend/Cargo.toml similarity index 67% rename from bridges/primitives/chain-asset-hub-polkadot/Cargo.toml rename to bridges/primitives/chain-asset-hub-westend/Cargo.toml index 857ead15b0ddf79b82fd7b8c01da767419e22e2b..c880f159ac1c1bca0f43c0beec4a06822f44b76f 100644 --- a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-westend/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-asset-hub-polkadot" -description = "Primitives of AssetHubPolkadot parachain runtime." +name = "bp-asset-hub-westend" +description = "Primitives of AssetHubWestend parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true @@ -8,11 +8,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } # Bridge Dependencies bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } @@ -24,5 +23,4 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", - "sp-runtime/std", ] diff --git a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs b/bridges/primitives/chain-asset-hub-westend/src/lib.rs similarity index 73% rename from bridges/primitives/chain-asset-hub-polkadot/src/lib.rs rename to bridges/primitives/chain-asset-hub-westend/src/lib.rs index 486fba60e1f8836d0eec2feece7919205cf6c372..9de1c88098942cdf7bd0684462a95ac3de412490 100644 --- a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs +++ b/bridges/primitives/chain-asset-hub-westend/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Module with configuration which reflects AssetHubPolkadot runtime setup. +//! Module with configuration which reflects AssetHubWestend runtime setup. #![cfg_attr(not(feature = "std"), no_std)] @@ -23,27 +23,26 @@ use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; -/// `AssetHubPolkadot` Runtime `Call` enum. +/// `AssetHubWestend` Runtime `Call` enum. /// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubPolkadot` chain. +/// The enum represents a subset of possible `Call`s we can send to `AssetHubWestend` chain. /// Ideally this code would be auto-generated from metadata, because we want to /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. /// /// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubPolkadot` `construct_runtime`, so that we maintain SCALE-compatibility. +/// `AssetHubWestend` `construct_runtime`, so that we maintain SCALE-compatibility. #[allow(clippy::large_enum_variant)] #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { - /// `ToKusamaXcmRouter` bridge pallet. - #[codec(index = 43)] - ToKusamaXcmRouter(XcmBridgeHubRouterCall), + /// `ToRococoXcmRouter` bridge pallet. + #[codec(index = 34)] + ToRococoXcmRouter(XcmBridgeHubRouterCall), } frame_support::parameter_types! { /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubPolkadot`. - /// (initially was calculated `51220000` + `10%` by test `BridgeHubPolkadot::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubPolkadotBaseFeeInDots: u128 = 56342000; } + +/// Identifier of AssetHubWestend in the Westend relay chain. +pub const ASSET_HUB_WESTEND_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml index 24cf7236d45333254dd62073dfebcd85ec526362..466979136743eda6953ce780a007c0ff7dfbc537 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-cumulus" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives for BridgeHub parachain runtimes." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml index 387f5e8ade6e7dc501159266a94168dd7055a8c1..c4cd229ef4362a62707f3e881c77d94ab3f25fc3 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-kusama" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives of BridgeHubKusama parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml index 40b386e22d224da198043f44210a8c1de65af3ac..4913d87e5fb21d244a1c04ebabdb3df80de660d0 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-polkadot" -description = "Primitives of BridgeHubWococo parachain runtime." +description = "Primitives of BridgeHubPolkadot parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index a50bda23ac8d358259d725e0c93be31983caba0d..59d293edf1c249a10394c1a0b6f0e7fb8fb0ebcf 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -74,5 +74,23 @@ pub const WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessa /// chains. pub const WITH_BRIDGE_HUB_ROCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; +/// Pallet index of `BridgeWestendMessages: pallet_bridge_messages::`. +pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; + decl_bridge_finality_runtime_apis!(bridge_hub_rococo); decl_bridge_messages_runtime_apis!(bridge_hub_rococo); + +frame_support::parameter_types! { + /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Rococo + /// BridgeHub. + /// (initially was calculated by test `BridgeHubRococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) + pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 1628875538; + + /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 6417262881; + + /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 6159996668; +} diff --git a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml similarity index 76% rename from bridges/primitives/chain-bridge-hub-wococo/Cargo.toml rename to bridges/primitives/chain-bridge-hub-westend/Cargo.toml index 17c134f4412f7d74a45e089c920bd260bcd95008..22daf280868de14d5555631add5f57e0f3bbe6ad 100644 --- a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-bridge-hub-wococo" -description = "Primitives of BridgeHubWococo parachain runtime." +name = "bp-bridge-hub-westend" +description = "Primitives of BridgeHubWestend parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true @@ -11,8 +11,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs b/bridges/primitives/chain-bridge-hub-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0124e05bf8871c35d68a64f00d9b34719b900c66 --- /dev/null +++ b/bridges/primitives/chain-bridge-hub-westend/src/lib.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Module with configuration which reflects BridgeHubWestend runtime setup +//! (AccountId, Headers, Hashes...) + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use bp_bridge_hub_cumulus::*; +use bp_messages::*; +use bp_runtime::{ + decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, Parachain, +}; +use frame_support::dispatch::DispatchClass; +use sp_runtime::RuntimeDebug; + +/// BridgeHubWestend parachain. +#[derive(RuntimeDebug)] +pub struct BridgeHubWestend; + +impl Chain for BridgeHubWestend { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; + + type AccountId = AccountId; + type Balance = Balance; + type Nonce = Nonce; + type Signature = Signature; + + fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) + } + + fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) + } +} + +impl Parachain for BridgeHubWestend { + const PARACHAIN_ID: u32 = BRIDGE_HUB_WESTEND_PARACHAIN_ID; +} + +/// Identifier of BridgeHubWestend in the Westend relay chain. +pub const BRIDGE_HUB_WESTEND_PARACHAIN_ID: u32 = 1002; + +/// Name of the With-BridgeHubWestend messages pallet instance that is deployed at bridged chains. +pub const WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME: &str = "BridgeWestendMessages"; + +/// Name of the With-BridgeHubWestend bridge-relayers pallet instance that is deployed at bridged +/// chains. +pub const WITH_BRIDGE_HUB_WESTEND_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; + +/// Pallet index of `BridgeRococoMessages: pallet_bridge_messages::`. +pub const WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 44; + +decl_bridge_finality_runtime_apis!(bridge_hub_westend); +decl_bridge_messages_runtime_apis!(bridge_hub_westend); + +frame_support::parameter_types! { + /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Westend + /// BridgeHub. + /// (initially was calculated by test `BridgeHubWestend::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) + pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 488662666666; + + /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1925196628010; + + /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1848016628010; +} diff --git a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs deleted file mode 100644 index ce4600f5ff35d674afb1f6a6bdd24a044990fe78..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubWococo runtime setup -//! (AccountId, Headers, Hashes...) - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubWococo parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubWococo; - -impl Chain for BridgeHubWococo { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubWococo { - const PARACHAIN_ID: u32 = BRIDGE_HUB_WOCOCO_PARACHAIN_ID; -} - -/// Identifier of BridgeHubWococo in the Wococo relay chain. -pub const BRIDGE_HUB_WOCOCO_PARACHAIN_ID: u32 = 1014; - -/// Name of the With-BridgeHubWococo messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; - -/// Name of the With-BridgeHubWococo bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_WOCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -decl_bridge_finality_runtime_apis!(bridge_hub_wococo); -decl_bridge_messages_runtime_apis!(bridge_hub_wococo); diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml index 4311aec47276e5144ddc56074cda205a1ec454da..1dd45ba95fd84820fc44cb5392cdd24668c689ee 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml @@ -2,13 +2,13 @@ name = "bp-polkadot-bulletin" description = "Primitives of Polkadot Bulletin chain runtime." version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" +authors.workspace = true +edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge Dependencies @@ -35,6 +35,7 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", + "scale-info/std", "sp-api/std", "sp-runtime/std", "sp-std/std", diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/primitives/chain-rococo/Cargo.toml index 3c4d3917bc219bfd7dd65ec9e8a0c283fe834e49..469be1dbd336db0299df445989a117dafc4166f3 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/primitives/chain-rococo/Cargo.toml @@ -16,9 +16,9 @@ bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies +frame-support = { path = "../../../substrate/frame/support", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } [features] default = [ "std" ] diff --git a/bridges/primitives/chain-wococo/Cargo.toml b/bridges/primitives/chain-westend/Cargo.toml similarity index 84% rename from bridges/primitives/chain-wococo/Cargo.toml rename to bridges/primitives/chain-westend/Cargo.toml index 05901821b366b71d40d9c28392150172be44a0b7..797621bbce2ea6a432082b148485140ea87865f5 100644 --- a/bridges/primitives/chain-wococo/Cargo.toml +++ b/bridges/primitives/chain-westend/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-wococo" -description = "Primitives of Wococo runtime." +name = "bp-westend" +description = "Primitives of Westend runtime." version = "0.1.0" authors.workspace = true edition.workspace = true @@ -13,7 +13,6 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" bp-header-chain = { path = "../header-chain", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -bp-rococo = { path = "../chain-rococo", default-features = false } # Substrate Based Dependencies @@ -26,7 +25,6 @@ default = [ "std" ] std = [ "bp-header-chain/std", "bp-polkadot-core/std", - "bp-rococo/std", "bp-runtime/std", "frame-support/std", "sp-api/std", diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs similarity index 65% rename from bridges/primitives/chain-wococo/src/lib.rs rename to bridges/primitives/chain-westend/src/lib.rs index b1df65630beffdaad80ce8f22ba357dbf766ea0f..45c13d600601fae14f48bcfb61dda225682f300a 100644 --- a/bridges/primitives/chain-wococo/src/lib.rs +++ b/bridges/primitives/chain-westend/src/lib.rs @@ -19,18 +19,15 @@ #![allow(clippy::too_many_arguments)] pub use bp_polkadot_core::*; -pub use bp_rococo::{ - SS58Prefix, MAX_AUTHORITIES_COUNT, MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE, PARAS_PALLET_NAME, -}; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain}; -use frame_support::weights::Weight; +use frame_support::{parameter_types, weights::Weight}; -/// Wococo Chain -pub struct Wococo; +/// Westend Chain +pub struct Westend; -impl Chain for Wococo { +impl Chain for Westend { type BlockNumber = ::BlockNumber; type Hash = ::Hash; type Hasher = ::Hasher; @@ -50,8 +47,8 @@ impl Chain for Wococo { } } -impl ChainWithGrandpa for Wococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WOCOCO_GRANDPA_PALLET_NAME; +impl ChainWithGrandpa for Westend { + const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WESTEND_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; @@ -59,10 +56,24 @@ impl ChainWithGrandpa for Wococo { const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; } -// The SignedExtension used by Wococo. -pub use bp_rococo::CommonSignedExtension as SignedExtension; +parameter_types! { + pub const SS58Prefix: u8 = 42; +} + +// The SignedExtension used by Westend. +pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; + +/// Name of the parachains pallet in the Rococo runtime. +pub const PARAS_PALLET_NAME: &str = "Paras"; + +/// Name of the With-Westend GRANDPA pallet instance that is deployed at bridged chains. +pub const WITH_WESTEND_GRANDPA_PALLET_NAME: &str = "BridgeWestendGrandpa"; -/// Name of the With-Wococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_GRANDPA_PALLET_NAME: &str = "BridgeWococoGrandpa"; +/// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Westend +/// parachains. +/// +/// It includes the block number and state root, so it shall be near 40 bytes, but let's have some +/// reserve. +pub const MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE: u32 = 128; -decl_bridge_finality_runtime_apis!(wococo, grandpa); +decl_bridge_finality_runtime_apis!(westend, grandpa); diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index e3e83235960a8d1565d5be040ce222b632e95876..19b2819bddce804f1db5e058871941db980c7bf5 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } # Bridge dependencies diff --git a/bridges/primitives/header-chain/src/justification/verification/equivocation.rs b/bridges/primitives/header-chain/src/justification/verification/equivocation.rs index e2d7a8e804c20bcaeef48e3ba96c29911a3c6012..fbad301281994ae9e0ed32444c3373e032e4355e 100644 --- a/bridges/primitives/header-chain/src/justification/verification/equivocation.rs +++ b/bridges/primitives/header-chain/src/justification/verification/equivocation.rs @@ -101,6 +101,13 @@ impl<'a, Header: HeaderT> EquivocationsCollector<'a, Header> { } impl<'a, Header: HeaderT> JustificationVerifier
for EquivocationsCollector<'a, Header> { + fn process_duplicate_votes_ancestries( + &mut self, + _duplicate_votes_ancestries: Vec, + ) -> Result<(), JustificationVerificationError> { + Ok(()) + } + fn process_redundant_vote( &mut self, _precommit_idx: usize, diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs index a66fc1e0d91d1fe45ca747a0060abdb87f9d0e44..c71149bf9c28e350fb43429623ca47cd367b9091 100644 --- a/bridges/primitives/header-chain/src/justification/verification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs @@ -27,7 +27,13 @@ use finality_grandpa::voter_set::VoterSet; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, SetId}; use sp_runtime::{traits::Header as HeaderT, RuntimeDebug}; use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + collections::{ + btree_map::{ + BTreeMap, + Entry::{Occupied, Vacant}, + }, + btree_set::BTreeSet, + }, prelude::*, }; @@ -44,23 +50,40 @@ pub struct AncestryChain { /// We expect all forks in the ancestry chain to be descendants of base. base: HeaderId, /// Header hash => parent header hash mapping. - pub parents: BTreeMap, + parents: BTreeMap, /// Hashes of headers that were not visited by `ancestry()`. - pub unvisited: BTreeSet, + unvisited: BTreeSet, } impl AncestryChain
{ - /// Create new ancestry chain. - pub fn new(justification: &GrandpaJustification
) -> AncestryChain
{ + /// Creates a new instance of `AncestryChain` starting from a `GrandpaJustification`. + /// + /// Returns the `AncestryChain` and a `Vec` containing the `votes_ancestries` entries + /// that were ignored when creating it, because they are duplicates. + pub fn new( + justification: &GrandpaJustification
, + ) -> (AncestryChain
, Vec) { let mut parents = BTreeMap::new(); let mut unvisited = BTreeSet::new(); - for ancestor in &justification.votes_ancestries { + let mut ignored_idxs = Vec::new(); + for (idx, ancestor) in justification.votes_ancestries.iter().enumerate() { let hash = ancestor.hash(); - let parent_hash = *ancestor.parent_hash(); - parents.insert(hash, parent_hash); - unvisited.insert(hash); + match parents.entry(hash) { + Occupied(_) => { + ignored_idxs.push(idx); + }, + Vacant(entry) => { + entry.insert(*ancestor.parent_hash()); + unvisited.insert(hash); + }, + } } - AncestryChain { base: justification.commit_target_id(), parents, unvisited } + (AncestryChain { base: justification.commit_target_id(), parents, unvisited }, ignored_idxs) + } + + /// Returns the hash of a block's parent if the block is present in the ancestry. + pub fn parent_hash_of(&self, hash: &Header::Hash) -> Option<&Header::Hash> { + self.parents.get(hash) } /// Returns a route if the precommit target block is a descendant of the `base` block. @@ -80,7 +103,7 @@ impl AncestryChain
{ break } - current_hash = match self.parents.get(¤t_hash) { + current_hash = match self.parent_hash_of(¤t_hash) { Some(parent_hash) => { let is_visited_before = self.unvisited.get(¤t_hash).is_none(); if is_visited_before { @@ -117,6 +140,8 @@ pub enum Error { InvalidAuthorityList, /// Justification is finalizing unexpected header. InvalidJustificationTarget, + /// The justification contains duplicate headers in its `votes_ancestries` field. + DuplicateVotesAncestries, /// Error validating a precommit Precommit(PrecommitError), /// The cumulative weight of all votes in the justification is not enough to justify commit @@ -168,6 +193,12 @@ enum IterationFlow { /// Verification callbacks. trait JustificationVerifier { + /// Called when there are duplicate headers in the votes ancestries. + fn process_duplicate_votes_ancestries( + &mut self, + duplicate_votes_ancestries: Vec, + ) -> Result<(), Error>; + fn process_redundant_vote( &mut self, precommit_idx: usize, @@ -216,10 +247,14 @@ trait JustificationVerifier { } let threshold = context.voter_set.threshold().get(); - let mut chain = AncestryChain::new(justification); + let (mut chain, ignored_idxs) = AncestryChain::new(justification); let mut signature_buffer = Vec::new(); let mut cumulative_weight = 0u64; + if !ignored_idxs.is_empty() { + self.process_duplicate_votes_ancestries(ignored_idxs)?; + } + for (precommit_idx, signed) in justification.commit.precommits.iter().enumerate() { if cumulative_weight >= threshold { let action = diff --git a/bridges/primitives/header-chain/src/justification/verification/optimizer.rs b/bridges/primitives/header-chain/src/justification/verification/optimizer.rs index 6552b359170a89d9b9816a8e933dacaf70d6d248..3f1e6ab670ca65283b1efcecfb1cb163c3a742d2 100644 --- a/bridges/primitives/header-chain/src/justification/verification/optimizer.rs +++ b/bridges/primitives/header-chain/src/justification/verification/optimizer.rs @@ -33,6 +33,7 @@ struct JustificationOptimizer { votes: BTreeSet, extra_precommits: Vec, + duplicate_votes_ancestries_idxs: Vec, redundant_votes_ancestries: BTreeSet, } @@ -41,6 +42,11 @@ impl JustificationOptimizer
{ for invalid_precommit_idx in self.extra_precommits.into_iter().rev() { justification.commit.precommits.remove(invalid_precommit_idx); } + if !self.duplicate_votes_ancestries_idxs.is_empty() { + for idx in self.duplicate_votes_ancestries_idxs.iter().rev() { + justification.votes_ancestries.swap_remove(*idx); + } + } if !self.redundant_votes_ancestries.is_empty() { justification .votes_ancestries @@ -50,6 +56,14 @@ impl JustificationOptimizer
{ } impl JustificationVerifier
for JustificationOptimizer
{ + fn process_duplicate_votes_ancestries( + &mut self, + duplicate_votes_ancestries: Vec, + ) -> Result<(), Error> { + self.duplicate_votes_ancestries_idxs = duplicate_votes_ancestries.to_vec(); + Ok(()) + } + fn process_redundant_vote( &mut self, precommit_idx: usize, @@ -118,6 +132,7 @@ pub fn verify_and_optimize_justification( let mut optimizer = JustificationOptimizer { votes: BTreeSet::new(), extra_precommits: vec![], + duplicate_votes_ancestries_idxs: vec![], redundant_votes_ancestries: Default::default(), }; optimizer.verify_justification(finalized_target, context, justification)?; diff --git a/bridges/primitives/header-chain/src/justification/verification/strict.rs b/bridges/primitives/header-chain/src/justification/verification/strict.rs index f899c6c8efc06c6a1dfa38c38c4050993b09b5af..858cf517a431e0ce958e06465afeba59c2cc0867 100644 --- a/bridges/primitives/header-chain/src/justification/verification/strict.rs +++ b/bridges/primitives/header-chain/src/justification/verification/strict.rs @@ -26,7 +26,7 @@ use crate::justification::verification::{ }; use sp_consensus_grandpa::AuthorityId; use sp_runtime::traits::Header as HeaderT; -use sp_std::collections::btree_set::BTreeSet; +use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; /// Verification callbacks that reject all unknown, duplicate or redundant votes. struct StrictJustificationVerifier { @@ -34,6 +34,13 @@ struct StrictJustificationVerifier { } impl JustificationVerifier
for StrictJustificationVerifier { + fn process_duplicate_votes_ancestries( + &mut self, + _duplicate_votes_ancestries: Vec, + ) -> Result<(), Error> { + Err(Error::DuplicateVotesAncestries) + } + fn process_redundant_vote( &mut self, _precommit_idx: usize, diff --git a/bridges/primitives/header-chain/tests/implementation_match.rs b/bridges/primitives/header-chain/tests/implementation_match.rs index f664a419621f67e7153adf3d9195546cca80fd0f..1f61f91ff4bbfc26fdfc53210408174c7eb934eb 100644 --- a/bridges/primitives/header-chain/tests/implementation_match.rs +++ b/bridges/primitives/header-chain/tests/implementation_match.rs @@ -42,7 +42,7 @@ struct AncestryChain(bp_header_chain::justification::AncestryChain); impl AncestryChain { fn new(justification: &GrandpaJustification) -> Self { - Self(bp_header_chain::justification::AncestryChain::new(justification)) + Self(bp_header_chain::justification::AncestryChain::new(justification).0) } } @@ -58,7 +58,7 @@ impl finality_grandpa::Chain for AncestryChain { if current_hash == base { break } - match self.0.parents.get(¤t_hash) { + match self.0.parent_hash_of(¤t_hash) { Some(parent_hash) => { current_hash = *parent_hash; route.push(current_hash); diff --git a/bridges/primitives/header-chain/tests/justification/optimizer.rs b/bridges/primitives/header-chain/tests/justification/optimizer.rs index 21bcd7e86b51e2f898024999a86c8ac403286bba..8d7e2d6502568c75e592e9a87003a930272aa2ab 100644 --- a/bridges/primitives/header-chain/tests/justification/optimizer.rs +++ b/bridges/primitives/header-chain/tests/justification/optimizer.rs @@ -158,6 +158,26 @@ fn unrelated_ancestry_votes_are_removed_by_optimizer() { assert_eq!(num_precommits_before - 1, num_precommits_after); } +#[test] +fn duplicate_votes_ancestries_are_removed_by_optimizer() { + let mut justification = make_default_justification::(&test_header(1)); + let optimized_votes_ancestries = justification.votes_ancestries.clone(); + justification.votes_ancestries = justification + .votes_ancestries + .into_iter() + .flat_map(|item| std::iter::repeat(item).take(3)) + .collect(); + + verify_and_optimize_justification::( + header_id::(1), + &verification_context(TEST_GRANDPA_SET_ID), + &mut justification, + ) + .unwrap(); + + assert_eq!(justification.votes_ancestries, optimized_votes_ancestries); +} + #[test] fn redundant_votes_ancestries_are_removed_by_optimizer() { let mut justification = make_default_justification::(&test_header(1)); diff --git a/bridges/primitives/header-chain/tests/justification/strict.rs b/bridges/primitives/header-chain/tests/justification/strict.rs index 188c9f5baba26b10f50fd8ed875b0cc856c3ec8d..639a669572b217526e0a35929026136e93d24285 100644 --- a/bridges/primitives/header-chain/tests/justification/strict.rs +++ b/bridges/primitives/header-chain/tests/justification/strict.rs @@ -149,7 +149,21 @@ fn justification_with_invalid_authority_signature_rejected() { } #[test] -fn justification_with_invalid_precommit_ancestry() { +fn justification_with_duplicate_votes_ancestry() { + let mut justification = make_default_justification::(&test_header(1)); + justification.votes_ancestries.push(justification.votes_ancestries[0].clone()); + + assert_eq!( + verify_justification::( + header_id::(1), + &verification_context(TEST_GRANDPA_SET_ID), + &justification, + ), + Err(JustificationVerificationError::DuplicateVotesAncestries), + ); +} +#[test] +fn justification_with_redundant_votes_ancestry() { let mut justification = make_default_justification::(&test_header(1)); justification.votes_ancestries.push(test_header(10)); diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index b30d6d2559f7916e79f2a3aa2c83ff66298bb24c..7a61643a0bc5d38524d1177310613101909739ac 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } -scale-info = { version = "2.9.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } # Bridge dependencies diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index ca69523dde37a68ea75dfbc23b5258d04c5510b9..11e9336f66af7dc8a5673fed37922a8aad35047f 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index aa7eb8024fbf2c7613264af8daa5fefe0b24f7cc..e2bd4c295225d0346ff321f01543bd9ccc249fd3 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 99cd79c68417205f11d329bcce8637022aaa6e51..ffed2debbe68ec4b1354e90692e094d9adab038d 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } -scale-info = { version = "2.9.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies @@ -30,7 +30,9 @@ default = [ "std" ] std = [ "bp-messages/std", "bp-runtime/std", + "codec/std", "frame-support/std", + "scale-info/std", "sp-runtime/std", "sp-std/std", ] diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index c529eea536d73e91aa4293aa23813f9273f6f8b5..c808c437b54cbaaa5813067a6413fe7189336ee6 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -115,7 +115,7 @@ where impl PaymentProcedure for PayRewardFromAccount where T: frame_support::traits::fungible::Mutate, - Relayer: Decode + Encode, + Relayer: Decode + Encode + Eq, { type Error = sp_runtime::DispatchError; diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 4454066b59faed0d57b1ab8d06f619949007b5d4..48f6722c982b71ae88e7958fcdebd9d70ec88bd3 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -12,7 +12,7 @@ hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.19", default-features = false } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } # Substrate Dependencies diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index e1809e145248f1cb37022ed4c2bb29d1547b2b2a..b78023efb1b863780dff63f98ab13adf89dfa968 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -280,7 +280,7 @@ pub type TransactionEraOf = crate::TransactionEra, HashOf /// - constants that are stringified names of runtime API methods: /// - `BEST_FINALIZED__HEADER_METHOD` /// - `_ACCEPTED__FINALITY_PROOFS_METHOD` -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_finality_runtime_apis { ($chain: ident $(, $consensus: ident => $justification_type: ty)?) => { @@ -332,7 +332,7 @@ macro_rules! decl_bridge_finality_runtime_apis { /// - `FromInboundLaneApi` /// - constants that are stringified names of runtime API methods: /// - `FROM__MESSAGE_DETAILS_METHOD`, -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_messages_runtime_apis { ($chain: ident) => { @@ -390,7 +390,7 @@ macro_rules! decl_bridge_messages_runtime_apis { /// Convenience macro that declares bridge finality runtime apis, bridge messages runtime apis /// and related constants for a chain. -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_runtime_apis { ($chain: ident $(, $consensus: ident)?) => { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index 44eeaad93c91603b34713ca477b5623f4e07aad0..8a618721b23a6665a14da11684553803170676be 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -88,7 +88,7 @@ pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<( /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` -/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWococoMessages)` +/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 7f4a1a030b14562e17e9938ddffa1293cfcda204..0513cfa2a6c75e2509edfe78b0a2c8043ea828bc 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -23,7 +23,6 @@ use frame_support::{ pallet_prelude::DispatchResult, weights::Weight, PalletError, StorageHasher, StorageValue, }; use frame_system::RawOrigin; -use log; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_core::storage::StorageKey; @@ -62,15 +61,6 @@ pub use sp_runtime::paste; /// Use this when something must be shared among all instances. pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; -/// Rialto chain id. -pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; - -/// RialtoParachain chain id. -pub const RIALTO_PARACHAIN_CHAIN_ID: ChainId = *b"rlpa"; - -/// Millau chain id. -pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; - /// Polkadot chain id. pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; @@ -89,14 +79,11 @@ pub const ASSET_HUB_WESTEND_CHAIN_ID: ChainId = *b"ahwe"; /// Rococo chain id. pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; -/// Wococo chain id. -pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; - /// BridgeHubRococo chain id. pub const BRIDGE_HUB_ROCOCO_CHAIN_ID: ChainId = *b"bhro"; -/// BridgeHubWococo chain id. -pub const BRIDGE_HUB_WOCOCO_CHAIN_ID: ChainId = *b"bhwo"; +/// BridgeHubWestend chain id. +pub const BRIDGE_HUB_WESTEND_CHAIN_ID: ChainId = *b"bhwd"; /// BridgeHubKusama chain id. pub const BRIDGE_HUB_KUSAMA_CHAIN_ID: ChainId = *b"bhks"; @@ -275,18 +262,6 @@ pub fn storage_map_final_key( StorageKey(final_key) } -/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; -/// }`) is computed. -/// -/// Copied from `frame_support::parameter_types` macro. -pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); - buffer.push(b':'); - buffer.extend_from_slice(parameter_name.as_bytes()); - buffer.push(b':'); - StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) -} - /// This is how a storage key of storage value is computed. /// /// Copied from `frame_support::storage::storage_prefix`. @@ -572,14 +547,6 @@ where mod tests { use super::*; - #[test] - fn storage_parameter_key_works() { - assert_eq!( - storage_parameter_key("MillauToRialtoConversionRate"), - StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), - ); - } - #[test] fn storage_value_key_works() { assert_eq!( diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 7081ce90f97b90c3eb437d297fae5cc9f65cfe61..9836c1877f00239f72dd97c74b6b5fa8698b12e6 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "bp-test-utils" version = "0.1.0" +description = "Utilities for testing substrate-based runtime bridge code" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 725a7d94564e226110453b35cdf4f0161c12c113..fb079b48e42a7aca9ae9846ef9eaac383cc06649 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } -scale-info = { version = "2.9.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index b8ac09e26b8bcd78e4a0c3022c098f2ebc93afd3..b96bbf1833b6b3ce2bb34d2dc34aa5b8f54eb528 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -61,20 +61,12 @@ trap revert_to_clean_state EXIT rm -rf $BRIDGES_FOLDER/.config rm -rf $BRIDGES_FOLDER/.github rm -rf $BRIDGES_FOLDER/.maintain -rm -rf $BRIDGES_FOLDER/bin/millau -rm -rf $BRIDGES_FOLDER/bin/rialto -rm -rf $BRIDGES_FOLDER/bin/rialto-parachain -rm -rf $BRIDGES_FOLDER/bin/.keep rm -rf $BRIDGES_FOLDER/deployments rm -f $BRIDGES_FOLDER/docs/dockerhub-* rm -rf $BRIDGES_FOLDER/fuzz rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy -rm -rf $BRIDGES_FOLDER/primitives/chain-millau -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto-parachain -rm -rf $BRIDGES_FOLDER/primitives/chain-westend rm -rf $BRIDGES_FOLDER/relays rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh @@ -82,8 +74,6 @@ rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh rm -rf $BRIDGES_FOLDER/scripts/dump-logs.sh rm -rf $BRIDGES_FOLDER/scripts/license_header rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-millau-rialto.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-rialto-millau.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh diff --git a/bridges/zombienet/README.md b/bridges/zombienet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b601154b624ce69ed921ea6c2453d17c4d37b6c8 --- /dev/null +++ b/bridges/zombienet/README.md @@ -0,0 +1,31 @@ +# Bridges Tests for Local Rococo <> Westend Bridge + +This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both +onchain and offchain bridges code. Due to some +[technical difficulties](https://github.com/paritytech/parity-bridges-common/pull/2649#issue-1965339051), we +are using native zombienet provider, which means that you need to build some binaries locally. + +To start those tests, you need to: + +- download latest [zombienet release](https://github.com/paritytech/zombienet/releases); + +- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the +[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; + +- build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the +[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; + +- ensure that you have [`node`](https://nodejs.org/en) installed. Additionally, we'll need globally installed +`polkadot/api-cli` package (use `npm install -g @polkadot/api-cli@beta` to install it); + +- build Substrate relay by running `cargo build -p substrate-relay --release` command in the +[`parity-bridges-common`](https://github.com/paritytech/parity-bridges-common) repository clone. + +- copy fresh `substrate-relay` binary, built in previous point, to the `~/local_bridge_testing/bin/substrate-relay`; + +- change the `POLKADOT_SDK_FOLDER` and `ZOMBIENET_BINARY_PATH` (and ensure that the nearby variables +have correct values) in the `./run-tests.sh`. + +After that, you could run tests with the `./run-tests.sh` command. Hopefully, it'll show the +"All tests have completed successfully" message in the end. Otherwise, it'll print paths to zombienet +process logs, which, in turn, may be used to track locations of all spinned relay and parachain nodes. diff --git a/bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js b/bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js new file mode 100644 index 0000000000000000000000000000000000000000..f7e1eefc84b3fa3e799d7111608cfc39783f5e21 --- /dev/null +++ b/bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js @@ -0,0 +1,25 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later + const bridgedChainName = args[0]; + const expectedBridgedChainHeaderNumber = Number(args[1]); + const runtimeApiMethod = bridgedChainName + "FinalityApi_best_finalized"; + + while (true) { + const encodedBestFinalizedHeaderId = await api.rpc.state.call(runtimeApiMethod, []); + const bestFinalizedHeaderId = api.createType("Option", encodedBestFinalizedHeaderId); + if (bestFinalizedHeaderId.isSome) { + const bestFinalizedHeaderNumber = Number(bestFinalizedHeaderId.unwrap().toHuman()[0]); + if (bestFinalizedHeaderNumber > expectedBridgedChainHeaderNumber) { + return bestFinalizedHeaderNumber; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/helpers/relayer-rewards.js b/bridges/zombienet/helpers/relayer-rewards.js new file mode 100644 index 0000000000000000000000000000000000000000..a5f567db797722e04d3bfae90745a728ff1abdff --- /dev/null +++ b/bridges/zombienet/helpers/relayer-rewards.js @@ -0,0 +1,28 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later + const relayerAccountAddress = args[0]; + const laneId = args[1]; + const bridgedChainId = args[2]; + const relayerFundOwner = args[3]; + const expectedRelayerReward = BigInt(args[4]); + while (true) { + const relayerReward = await api.query.bridgeRelayers.relayerRewards( + relayerAccountAddress, + { laneId: laneId, bridgedChainId: bridgedChainId, owner: relayerFundOwner } + ); + if (relayerReward.isSome) { + const relayerRewardBalance = relayerReward.unwrap().toBigInt(); + if (relayerRewardBalance > expectedRelayerReward) { + return relayerRewardBalance; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js new file mode 100644 index 0000000000000000000000000000000000000000..e700cab1d7481d77631e55492e4b0032f4382028 --- /dev/null +++ b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js @@ -0,0 +1,22 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const sibling = args[0]; + + while (true) { + const messagingStateAsObj = await api.query.parachainSystem.relevantMessagingState(); + const messagingState = api.createType("Option", messagingStateAsObj); + if (messagingState.isSome) { + const egressChannels = messagingState.unwrap().egressChannels; + if (egressChannels.find(x => x[0] == sibling)) { + return; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/helpers/wrapped-assets-balance.js b/bridges/zombienet/helpers/wrapped-assets-balance.js new file mode 100644 index 0000000000000000000000000000000000000000..bb3cea8858a850e551ba0380b1557ccad0761717 --- /dev/null +++ b/bridges/zombienet/helpers/wrapped-assets-balance.js @@ -0,0 +1,26 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later + const accountAddress = args[0]; + const expectedForeignAssetBalance = BigInt(args[1]); + const bridgedNetworkName = args[2]; + while (true) { + const foreignAssetAccount = await api.query.foreignAssets.account( + { parents: 2, interior: { X1: { GlobalConsensus: bridgedNetworkName } } }, + accountAddress + ); + if (foreignAssetAccount.isSome) { + const foreignAssetAccountBalance = foreignAssetAccount.unwrap().balance.toBigInt(); + if (foreignAssetAccountBalance > expectedForeignAssetBalance) { + return foreignAssetAccountBalance; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/run-tests.sh b/bridges/zombienet/run-tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..22fefd093602b3a72a6bf4c6687f0a295c59e19a --- /dev/null +++ b/bridges/zombienet/run-tests.sh @@ -0,0 +1,97 @@ +#!/bin/bash +#set -eu +shopt -s nullglob + +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + +# assuming that we'll be using native provide && all processes will be executing locally +# (we need absolute paths here, because they're used when scripts are called by zombienet from tmp folders) +export POLKADOT_SDK_FOLDER=`realpath $(dirname "$0")/../..` +export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests +export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot +export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain +export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH +export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH +export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux + +# bridge configuration +export LANE_ID="00000002" + +# tests configuration +ALL_TESTS_FOLDER=`mktemp -d` + +function start_coproc() { + local command=$1 + local name=$2 + local coproc_log=`mktemp -p $TEST_FOLDER` + coproc COPROC { + $command >$coproc_log 2>&1 + } + TEST_COPROCS[$COPROC_PID, 0]=$name + TEST_COPROCS[$COPROC_PID, 1]=$coproc_log + echo "Spawned $name coprocess. StdOut + StdErr: $coproc_log" + + return $COPROC_PID +} + +# execute every test from tests folder +TEST_INDEX=1 +while true +do + declare -A TEST_COPROCS + TEST_COPROCS_COUNT=0 + TEST_PREFIX=$(printf "%04d" $TEST_INDEX) + + # it'll be used by the `sync-exit.sh` script + export TEST_FOLDER=`mktemp -d -p $ALL_TESTS_FOLDER` + + # check if there are no more tests + zndsl_files=($BRIDGE_TESTS_FOLDER/$TEST_PREFIX-*.zndsl) + if [ ${#zndsl_files[@]} -eq 0 ]; then + break + fi + + # start relay + if [ -f $BRIDGE_TESTS_FOLDER/$TEST_PREFIX-start-relay.sh ]; then + start_coproc "${BRIDGE_TESTS_FOLDER}/${TEST_PREFIX}-start-relay.sh" "relay" + RELAY_COPROC=$COPROC_PID + ((TEST_COPROCS_COUNT++)) + fi + # start tests + for zndsl_file in "${zndsl_files[@]}"; do + start_coproc "$ZOMBIENET_BINARY_PATH --provider native test $zndsl_file" "$zndsl_file" + echo -n "1">>$TEST_FOLDER/exit-sync + ((TEST_COPROCS_COUNT++)) + done + # wait until all tests are completed + relay_exited=0 + for n in `seq 1 $TEST_COPROCS_COUNT`; do + wait -n -p COPROC_PID + exit_code=$? + coproc_name=${TEST_COPROCS[$COPROC_PID, 0]} + coproc_log=${TEST_COPROCS[$COPROC_PID, 1]} + coproc_stdout=$(cat $coproc_log) + relay_exited=$(expr "${coproc_name}" == "relay") + echo "Process $coproc_name has finished with exit code: $exit_code" + + # if exit code is not zero, exit + if [ $exit_code -ne 0 ]; then + echo "=====================================================================" + echo "=== Shutting down. Log of failed process below ===" + echo "=====================================================================" + echo $coproc_stdout + exit 1 + fi + + # if last test has exited, exit relay too + if [ $n -eq $(($TEST_COPROCS_COUNT - 1)) ] && [ $relay_exited -eq 0 ]; then + kill $RELAY_COPROC + break + fi + done + ((TEST_INDEX++)) +done + +echo "=====================================================================" +echo "=== All tests have completed successfully ===" +echo "=====================================================================" diff --git a/bridges/zombienet/scripts/invoke-script.sh b/bridges/zombienet/scripts/invoke-script.sh new file mode 100755 index 0000000000000000000000000000000000000000..6a3754a8824017e18409cde031be9a09e9392a75 --- /dev/null +++ b/bridges/zombienet/scripts/invoke-script.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +pushd $POLKADOT_SDK_FOLDER/cumulus/scripts +./bridges_rococo_westend.sh $1 +popd diff --git a/bridges/zombienet/scripts/sync-exit.sh b/bridges/zombienet/scripts/sync-exit.sh new file mode 100755 index 0000000000000000000000000000000000000000..cc20b098e7830fc164f3a0a643840c1e8188b7f2 --- /dev/null +++ b/bridges/zombienet/scripts/sync-exit.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +# every network adds a char to the file, let's remove ours +truncate -s -1 $TEST_FOLDER/exit-sync + +# when all chars are removed, then our test is done +while true +do + if [ `stat --printf="%s" $TEST_FOLDER/exit-sync` -eq 0 ]; then + exit + fi + sleep 100 +done diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..f68d658cdacb81b0542c4ff4d10b3db61760a880 --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl @@ -0,0 +1,26 @@ +Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +Creds: config + +# step 1: initialize Westend asset hub +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 240 seconds +asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 400 seconds + +# step 2: initialize Westend bridge hub +bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds + +# step 2: send WOC to Rococo +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds + +# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it +asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds + +# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..c862fa6d176055118a68b6a0970b50a0bf5d6b0a --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl @@ -0,0 +1,26 @@ +Description: User is able to transfer WOC from Westend Asset Hub to Rococo Asset Hub +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +Creds: config + +# step 1: initialize Rococo asset hub +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 240 seconds +asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 400 seconds + +# step 2: initialize Rococo bridge hub +bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds + +# step 4: send ROC to Westend +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds + +# step 5: elsewhere Westend has sent WOC to //Alice - let's wait for it +asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds + +# step 6: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-start-relay.sh b/bridges/zombienet/tests/0001-start-relay.sh new file mode 100755 index 0000000000000000000000000000000000000000..7be2cf4d5938797b98b86e8abf08ae43a5cee449 --- /dev/null +++ b/bridges/zombienet/tests/0001-start-relay.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +pushd $POLKADOT_SDK_FOLDER/cumulus/scripts +./bridges_rococo_westend.sh run-relay +popd diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 5dd18f0c156d1581c09d73e2ab9d6422dbe9ab4c..0f942feb59524d6557d71d58eb617ecf6b3fb52b 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-client-cli" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Parachain node CLI utilities." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] clap = { version = "4.4.6", features = ["derive"] } diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1b18ed064373fdd58224b70da022a969ad641eb6..a2238b73b2b596ae133dfadec43c85b1a4a3e2ed 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -296,7 +296,14 @@ pub struct RunCmd { #[arg(long, conflicts_with = "validator")] pub collator: bool, - /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with. + /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint. + /// + /// The provided URLs should point to RPC endpoints of the relay chain. + /// This node connects to the remote nodes following the order they were specified in. If the + /// connection fails, it attempts to connect to the next endpoint in the list. + /// + /// Note: This option doesn't stop the node from connecting to the relay chain network but + /// reduces bandwidth use. #[arg( long, value_parser = validate_relay_chain_url, diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 1d87efa443ce38f6b82a78d56c0fd090294e63d0..ad9f01ed08395cb52830495718d80ee920bbc32f 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-client-collator" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Common node-side functionality and glue code to collate parachain blocks." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] parking_lot = "0.12.1" diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 8239a498746e3cfd8bda6190b125c95a17ebe735..f440270c9822d1c5dc9a3c99c25687efc44ef034 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -4,6 +4,7 @@ description = "AURA consensus algorithm for parachains" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 3c904915ceafb9b97fa6493d4db21b8b1609585b..dc0078b0d6a9838534b5599c8838b261c470b7b3 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -23,7 +23,9 @@ //! For more information about AuRa, the Substrate crate should be checked. use codec::{Codec, Decode}; -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_collator::{ + relay_chain_driven::CollationRequest, service::ServiceInterface as CollatorServiceInterface, +}; use cumulus_client_consensus_common::ParachainBlockImportMarker; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{relay_chain::BlockId as RBlockId, CollectCollationInfo}; @@ -33,7 +35,7 @@ use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{CollatorPair, Id as ParaId}; -use futures::prelude::*; +use futures::{channel::mpsc::Receiver, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; use sp_api::ProvideRuntimeApi; @@ -81,6 +83,10 @@ pub struct Params { pub collator_service: CS, /// The amount of time to spend authoring each block. pub authoring_duration: Duration, + /// Receiver for collation requests. If `None`, Aura consensus will establish a new receiver. + /// Should be used when a chain migrates from a different consensus algorithm and was already + /// processing collation requests before initializing Aura. + pub collation_request_receiver: Option>, } /// Run bare Aura consensus as a relay-chain-driven collator. @@ -110,12 +116,16 @@ where P::Signature: TryFrom> + Member + Codec, { async move { - let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( - params.collator_key, - params.para_id, - params.overseer_handle, - ) - .await; + let mut collation_requests = match params.collation_request_receiver { + Some(receiver) => receiver, + None => + cumulus_client_collator::relay_chain_driven::init( + params.collator_key, + params.para_id, + params.overseer_handle, + ) + .await, + }; let mut collator = { let params = collator_util::Params { diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 26d7ba1b142c07cd5a5e133d7e4594028755dccf..9dfd14b1cf5e58c593f7590ea70f47bc46151fd9 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -4,6 +4,7 @@ description = "Cumulus specific common consensus implementations" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/consensus/common/src/level_monitor.rs b/cumulus/client/consensus/common/src/level_monitor.rs index 5f115ec2c4a3c885b60da5f5dd9f0fe3faefab16..270e3f57ae5a374aba2f22966c7376e0791020ca 100644 --- a/cumulus/client/consensus/common/src/level_monitor.rs +++ b/cumulus/client/consensus/common/src/level_monitor.rs @@ -98,7 +98,6 @@ where /// /// Level limits are not enforced during this phase. fn restore(&mut self) { - const ERR_MSG: &str = "route from finalized to leaf should be available; qed"; let info = self.backend.blockchain().info(); log::debug!( @@ -112,7 +111,14 @@ where self.import_counter = info.finalized_number; for leaf in self.backend.blockchain().leaves().unwrap_or_default() { - let mut meta = self.backend.blockchain().header_metadata(leaf).expect(ERR_MSG); + let Ok(mut meta) = self.backend.blockchain().header_metadata(leaf) else { + log::debug!( + target: LOG_TARGET, + "Could not fetch header metadata for leaf: {leaf:?}", + ); + + continue + }; self.import_counter = self.import_counter.max(meta.number); @@ -123,7 +129,19 @@ where if meta.number <= self.lowest_level { break } - meta = self.backend.blockchain().header_metadata(meta.parent).expect(ERR_MSG); + + meta = match self.backend.blockchain().header_metadata(meta.parent) { + Ok(m) => m, + Err(_) => { + // This can happen after we have warp synced a node. + log::debug!( + target: LOG_TARGET, + "Could not fetch header metadata for parent: {:?}", + meta.parent, + ); + break + }, + } } } diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index 08bceabb2bd4a49ac8917e82caf1b97cd9eac183..cebe34e7ea58828372a9261e3be94866e119546a 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -111,12 +111,15 @@ impl ParachainConsensus for Box + Send + /// Parachain specific block import. /// -/// This is used to set `block_import_params.fork_choice` to `false` as long as the block origin is -/// not `NetworkInitialSync`. The best block for parachains is determined by the relay chain. -/// Meaning we will update the best block, as it is included by the relay-chain. +/// Specialized block import for parachains. It supports to delay setting the best block until the +/// relay chain has included a candidate in its best block. By default the delayed best block +/// setting is disabled. The block import also monitors the imported blocks and prunes by default if +/// there are too many blocks at the same height. Too many blocks at the same height can for example +/// happen if the relay chain is rejecting the parachain blocks in the validation. pub struct ParachainBlockImport { inner: BI, monitor: Option>>, + delayed_best_block: bool, } impl> ParachainBlockImport { @@ -141,13 +144,27 @@ impl> ParachainBlockImport let monitor = level_limit.map(|level_limit| SharedData::new(LevelMonitor::new(level_limit, backend))); - Self { inner, monitor } + Self { inner, monitor, delayed_best_block: false } + } + + /// Create a new instance which delays setting the best block. + /// + /// The number of leaves per level limit is set to `LevelLimit::Default`. + pub fn new_with_delayed_best_block(inner: BI, backend: Arc) -> Self { + Self { + delayed_best_block: true, + ..Self::new_with_limit(inner, backend, LevelLimit::Default) + } } } impl Clone for ParachainBlockImport { fn clone(&self) -> Self { - ParachainBlockImport { inner: self.inner.clone(), monitor: self.monitor.clone() } + ParachainBlockImport { + inner: self.inner.clone(), + monitor: self.monitor.clone(), + delayed_best_block: self.delayed_best_block, + } } } @@ -182,11 +199,13 @@ where params.finalized = true; } - // Best block is determined by the relay chain, or if we are doing the initial sync - // we import all blocks as new best. - params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( - params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, - )); + if self.delayed_best_block { + // Best block is determined by the relay chain, or if we are doing the initial sync + // we import all blocks as new best. + params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + } let maybe_lock = self.monitor.as_ref().map(|monitor_lock| { let mut monitor = monitor_lock.shared_data_locked(); diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 22d3dd3abd49dfbe4a0bb548ce4b2c35cf5ed839..597d1ab2acc2cff42d3230898c1129a7ba63b6f3 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -579,7 +579,7 @@ fn follow_new_best_sets_best_after_it_is_imported() { block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false)); block_import_params.body = Some(body); - // Now import the unkown block to make it "known" + // Now import the unknown block to make it "known" client.import_block(block_import_params).await.unwrap(); loop { @@ -1124,7 +1124,8 @@ fn find_potential_parents_aligned_with_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. @@ -1279,7 +1280,8 @@ fn find_potential_parents_aligned_no_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 29720a8f4791124536095ffba44189762de47f90..4cfba66cec371835f36afefc84eb8abbc50c1a91 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -4,6 +4,7 @@ description = "A Substrate `Proposer` for building parachain blocks" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] anyhow = "1.0" diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index ba077f624030b8838c23d3c46c42b3527dbe6b2c..de280e6e9a890fd7e4680b3ab8a1111d6a02b717 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -4,6 +4,7 @@ description = "The relay-chain provided consensus algorithm" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index eaaf497ac3edcc74415ab0ba6a1c3caf6faff953..08956f9f6c641f7cf8ee4f2b21d5846fb8611c49 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 2ce903fd35249a4bbf3863c24c6a34ef4907ef15..e407b33e0e2e73aa5f0b266c1d40516acfaa8d9c 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 322b19c796a8c10191f3bfb261b293aba4348944..2c635320ff4ae6f68f33bb9da5ca545098851f65 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -16,12 +16,12 @@ use sp_runtime::traits::Block as BlockT; -use polkadot_node_primitives::AvailableData; +use polkadot_node_primitives::PoV; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use std::{collections::HashSet, pin::Pin}; +use std::{collections::HashSet, pin::Pin, sync::Arc}; use crate::RecoveryHandle; @@ -30,9 +30,8 @@ use crate::RecoveryHandle; /// This handles the candidate recovery and tracks the activate recoveries. pub(crate) struct ActiveCandidateRecovery { /// The recoveries that are currently being executed. - recoveries: FuturesUnordered< - Pin)> + Send>>, - >, + recoveries: + FuturesUnordered>)> + Send>>>, /// The block hashes of the candidates currently being recovered. candidates: HashSet, recovery_handle: Box, @@ -68,7 +67,7 @@ impl ActiveCandidateRecovery { self.recoveries.push( async move { match rx.await { - Ok(Ok(res)) => (block_hash, Some(res)), + Ok(Ok(res)) => (block_hash, Some(res.pov)), Ok(Err(error)) => { tracing::debug!( target: crate::LOG_TARGET, @@ -93,8 +92,8 @@ impl ActiveCandidateRecovery { /// Waits for the next recovery. /// - /// If the returned [`AvailableData`] is `None`, it means that the recovery failed. - pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option) { + /// If the returned [`PoV`] is `None`, it means that the recovery failed. + pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option>) { loop { if let Some(res) = self.recoveries.next().await { self.candidates.remove(&res.0); diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b050bc66799c7aa84db67374dc801890e0e7ce25..b9a140f55c60fbbad18bc2f662e7040df6c7de04 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -51,7 +51,7 @@ use sc_consensus::import_queue::{ImportQueueService, IncomingBlock}; use sp_consensus::{BlockOrigin, BlockStatus, SyncOracle}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use polkadot_node_primitives::{AvailableData, POV_BOMB_LIMIT}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ @@ -346,15 +346,11 @@ where } /// Handle a recovered candidate. - async fn handle_candidate_recovered( - &mut self, - block_hash: Block::Hash, - available_data: Option, - ) { - let available_data = match available_data { - Some(data) => { + async fn handle_candidate_recovered(&mut self, block_hash: Block::Hash, pov: Option<&PoV>) { + let pov = match pov { + Some(pov) => { self.candidates_in_retry.remove(&block_hash); - data + pov }, None => if self.candidates_in_retry.insert(block_hash) { @@ -373,18 +369,16 @@ where }, }; - let raw_block_data = match sp_maybe_compressed_blob::decompress( - &available_data.pov.block_data.0, - POV_BOMB_LIMIT, - ) { - Ok(r) => r, - Err(error) => { - tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); + let raw_block_data = + match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { + Ok(r) => r, + Err(error) => { + tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); - self.reset_candidate(block_hash); - return - }, - }; + self.reset_candidate(block_hash); + return + }, + }; let block_data = match ParachainBlockData::::decode(&mut &raw_block_data[..]) { Ok(d) => d, @@ -595,10 +589,10 @@ where next_to_recover = self.candidate_recovery_queue.next_recovery().fuse() => { self.recover_candidate(next_to_recover).await; }, - (block_hash, available_data) = + (block_hash, pov) = self.active_candidate_recovery.wait_for_recovery().fuse() => { - self.handle_candidate_recovered(block_hash, available_data).await; + self.handle_candidate_recovered(block_hash, pov.as_deref()).await; }, } } diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index bc8d0d430c774bb3255f9494cd1fdced5a2d944d..87f0eabd9b5cdc4f13ecb3e39acff305982bee6b 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -3,6 +3,8 @@ authors.workspace = true name = "cumulus-relay-chain-inprocess-interface" version = "0.1.0" edition.workspace = true +description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 3da7ab0b0e8212d7a05d972ca3fb7c82a0955bd3..c9d50afe8fa9d225ae9cd07396b065f0af0fda3a 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -3,6 +3,8 @@ authors.workspace = true name = "cumulus-relay-chain-interface" version = "0.1.0" edition.workspace = true +description = "Common interface for different relay chain datasources." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] polkadot-overseer = { path = "../../../polkadot/node/overseer" } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index a6970732447c34f485a8a066d1bec27b762352a9..3dda61635804d47440689872271751e0bba27d87 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -41,7 +41,7 @@ pub type RelayChainResult = Result; #[derive(thiserror::Error, Debug)] pub enum RelayChainError { - #[error("Error occured while calling relay chain runtime: {0}")] + #[error("Error occurred while calling relay chain runtime: {0}")] ApiError(#[from] ApiError), #[error("Timeout while waiting for relay-chain block `{0}` to be imported.")] WaitTimeout(PHash), @@ -53,7 +53,7 @@ pub enum RelayChainError { WaitBlockchainError(PHash, sp_blockchain::Error), #[error("Blockchain returned an error: {0}")] BlockchainError(#[from] sp_blockchain::Error), - #[error("State machine error occured: {0}")] + #[error("State machine error occurred: {0}")] StateMachineError(Box), #[error("Unable to call RPC method '{0}'")] RpcCallError(String), @@ -67,7 +67,7 @@ pub enum RelayChainError { Application(#[from] Box), #[error("Prometheus error: {0}")] PrometheusError(#[from] PrometheusError), - #[error("Unspecified error occured: {0}")] + #[error("Unspecified error occurred: {0}")] GenericError(String), } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 226474d3d38cd1b34c6d1304855338bf3c6f4ff9..53173fb4118944cf4dddf0cbaac8d626b51dcd25 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -3,6 +3,8 @@ authors.workspace = true name = "cumulus-relay-chain-minimal-node" version = "0.1.0" edition.workspace = true +description = "Minimal node implementation to be used in tandem with RPC or light-client mode." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] # polkadot deps @@ -17,12 +19,14 @@ polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-p polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } +polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } sc-network = { path = "../../../substrate/client/network" } sc-network-common = { path = "../../../substrate/client/network/common" } sc-service = { path = "../../../substrate/client/service" } +substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } sc-utils = { path = "../../../substrate/client/utils" } sp-api = { path = "../../../substrate/primitives/api" } @@ -36,7 +40,6 @@ cumulus-relay-chain-rpc-interface = { path = "../relay-chain-rpc-interface" } cumulus-primitives-core = { path = "../../primitives/core" } array-bytes = "6.1" -schnellru = "0.2.1" tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 3f4c08ecbb8341c0c758cb60c7eb3063d236e295..a473b3bced02b246c412659e5921b6a950b2086b 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -24,6 +24,7 @@ use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, + vstaging::NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -346,6 +347,13 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_minimum_backing_votes(at, session_index).await?) } + async fn disabled_validators( + &self, + at: Hash, + ) -> Result, ApiError> { + Ok(self.rpc_client.parachain_host_disabled_validators(at).await?) + } + async fn async_backing_params(&self, at: Hash) -> Result { Ok(self.rpc_client.parachain_host_async_backing_params(at).await?) } @@ -357,6 +365,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result, ApiError> { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } + + async fn node_features(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_node_features(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index a83a18f7cd965743cdb4490c3966b6ca8b6b7272..945344f85e975f54a1315ecf7dd48fc332dedc2e 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use schnellru::{ByLength, LruMap}; use std::sync::Arc; use polkadot_availability_recovery::AvailabilityRecoverySubsystem; @@ -25,6 +24,7 @@ use polkadot_network_bridge::{ NetworkBridgeTx as NetworkBridgeTxSubsystem, }; use polkadot_node_collation_generation::CollationGenerationSubsystem; +use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, @@ -36,7 +36,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; use polkadot_overseer::{ BlockInfo, DummySubsystem, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, - UnpinHandle, KNOWN_LEAVES_CACHE_SIZE, + UnpinHandle, }; use polkadot_primitives::CollatorPair; @@ -44,7 +44,6 @@ use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::NetworkStateInfo; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; -use sp_runtime::traits::Block as BlockT; use cumulus_primitives_core::relay_chain::{Block, Hash as PHash}; use cumulus_relay_chain_interface::RelayChainError; @@ -103,7 +102,7 @@ fn build_overseer( let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; let builder = Overseer::builder() .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::with_availability_store_skip( + .availability_recovery(AvailabilityRecoverySubsystem::for_collator( available_data_req_receiver, Metrics::register(registry)?, )) @@ -146,7 +145,7 @@ fn build_overseer( spawner.clone(), )) .statement_distribution(DummySubsystem) - .prospective_parachains(DummySubsystem) + .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) .approval_distribution(DummySubsystem) .approval_voting(DummySubsystem) .gossip_support(DummySubsystem) @@ -157,7 +156,6 @@ fn build_overseer( .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_client) - .known_leaves(LruMap::new(ByLength::new(KNOWN_LEAVES_CACHE_SIZE))) .metrics(Metrics::register(registry)?) .spawner(spawner); @@ -209,8 +207,6 @@ pub struct NewMinimalNode { pub task_manager: TaskManager, /// Overseer handle to interact with subsystems pub overseer_handle: Handle, - /// Network service - pub network: Arc::Hash>>, } /// Glues together the [`Overseer`] and `BlockchainEvents` by forwarding diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 08e4e8e34aba4219382b4554bfb8ff97762c2009..8801f93640c181cbf27084656d9d7f28cabff1d7 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -32,10 +32,10 @@ use polkadot_primitives::CollatorPair; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService}; -use sc_service::{Configuration, TaskManager}; +use sc_service::{config::PrometheusConfig, Configuration, TaskManager}; use sp_runtime::{app_crypto::Pair, traits::Block as BlockT}; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use std::sync::Arc; mod blockchain_rpc_client; @@ -69,7 +69,7 @@ fn build_authority_discovery_service( ..Default::default() }, client, - network.clone(), + network, Box::pin(dht_event_stream), authority_discovery_role, prometheus_registry, @@ -160,19 +160,18 @@ async fn new_minimal_relay_chain( let role = config.role.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); - let task_manager = { - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.tokio_handle.clone(), registry)? - }; - - let prometheus_registry = config.prometheus_registry().cloned(); + let prometheus_registry = config.prometheus_registry(); + let task_manager = TaskManager::new(config.tokio_handle.clone(), prometheus_registry)?; - let genesis_hash = relay_chain_rpc_client - .block_get_hash(Some(0)) - .await - .expect("Genesis block hash is always available; qed") - .unwrap_or_default(); + if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + task_manager.spawn_handle().spawn( + "prometheus-endpoint", + None, + substrate_prometheus_endpoint::init_prometheus(port, registry).map(drop), + ); + } + let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); let peer_set_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; @@ -203,18 +202,18 @@ async fn new_minimal_relay_chain( relay_chain_rpc_client.clone(), &config, network.clone(), - prometheus_registry.clone(), + prometheus_registry.cloned(), ); let overseer_args = CollatorOverseerGenArgs { runtime_client: relay_chain_rpc_client.clone(), - network_service: network.clone(), + network_service: network, sync_oracle, authority_discovery_service, collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver, - registry: prometheus_registry.as_ref(), + registry: prometheus_registry, spawner: task_manager.spawn_handle(), collator_pair, req_protocol_names: request_protocol_names, @@ -226,7 +225,7 @@ async fn new_minimal_relay_chain( network_starter.start_network(); - Ok(NewMinimalNode { task_manager, overseer_handle, network }) + Ok(NewMinimalNode { task_manager, overseer_handle }) } fn build_request_response_protocol_receivers( diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index f39d7a26dd8838d693d8c32d62d51fa147c26cda..813dca47a0398365f935424f79bcd95169d6f2a9 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -19,7 +19,8 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use sc_network::{ config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, }, peer_store::PeerStore, NetworkService, @@ -35,7 +36,7 @@ use std::{iter, sync::Arc}; /// Build the network service, the network status sinks and an RPC sender. pub(crate) fn build_collator_network( config: &Configuration, - network_config: FullNetworkConfiguration, + mut full_network_config: FullNetworkConfiguration, spawn_handle: SpawnTaskHandle, genesis_hash: Hash, best_header: Header, @@ -53,8 +54,12 @@ pub(crate) fn build_collator_network( genesis_hash, ); + // Since this node has no syncing, we do not want light-clients to connect to it. + // Here we set any potential light-client slots to 0. + adjust_network_config_light_in_peers(&mut full_network_config.network_config); + let peer_store = PeerStore::new( - network_config + full_network_config .network_config .boot_nodes .iter() @@ -75,7 +80,7 @@ pub(crate) fn build_collator_network( }) }, fork_id: None, - network_config, + network_config: full_network_config, peer_store: peer_store_handle, genesis_hash, protocol_id, @@ -114,6 +119,18 @@ pub(crate) fn build_collator_network( Ok((network_service, network_starter, Box::new(SyncOracle {}))) } +fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { + let light_client_in_peers = (config.default_peers_set.in_peers + + config.default_peers_set.out_peers) + .saturating_sub(config.default_peers_set_num_full); + if light_client_in_peers > 0 { + tracing::debug!(target: crate::LOG_TARGET, "Detected {light_client_in_peers} peer slots for light clients. Since this minimal node does support\ + neither syncing nor light-client request/response, we are setting them to 0."); + } + config.default_peers_set.in_peers = + config.default_peers_set.in_peers.saturating_sub(light_client_in_peers); +} + struct SyncOracle; impl sp_consensus::SyncOracle for SyncOracle { diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 0f09377e106ce4274ddf0d59028476af3127a51b..93fe57145b0c28e9f95a5b2cab1fad6105f61fad 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -3,6 +3,8 @@ authors.workspace = true name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" edition.workspace = true +description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] @@ -32,7 +34,7 @@ jsonrpsee = { version = "0.16.2", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.73" url = "2.4.0" -serde_json = "1.0.107" +serde_json = "1.0.108" serde = "1.0.188" schnellru = "0.2.1" smoldot = { version = "0.11.0", default_features = false, features = ["std"]} diff --git a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs index 84e66f95571096c319ab1aa05a12a11cdc4c1035..6fd057e170b715a1586dbd2dbf3f608268c539bd 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs @@ -48,7 +48,7 @@ const MAX_SUBSCRIPTIONS: u32 = 64; #[derive(thiserror::Error, Debug)] enum LightClientError { - #[error("Error occured while executing smoldot request: {0}")] + #[error("Error occurred while executing smoldot request: {0}")] SmoldotError(String), #[error("Nothing returned from json_rpc_responses")] EmptyResult, diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index b1fd7d1ab7d9cc4b9805c449d1b207d5126a8e5b..cc993c6ff9f6257b77a41e46e175c2fd7f752309 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -31,7 +31,9 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + slashing, + vstaging::NodeFeatures, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -201,7 +203,7 @@ impl RelayChainRpcClient { let value = rx.await.map_err(|err| { RelayChainError::WorkerCommunicationError(format!( - "Unexpected channel close on RPC worker side: {}", + "RPC worker channel closed. This can hint and connectivity issues with the supplied RPC endpoints. Message: {}", err )) })??; @@ -597,6 +599,22 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_node_features( + &self, + at: RelayHash, + ) -> Result { + self.call_remote_runtime_function("ParachainHost_node_features", at, None::<()>) + .await + } + + pub async fn parachain_host_disabled_validators( + &self, + at: RelayHash, + ) -> Result, RelayChainError> { + self.call_remote_runtime_function("ParachainHost_disabled_validators", at, None::<()>) + .await + } + #[allow(missing_docs)] pub async fn parachain_host_async_backing_params( &self, diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b7c274ceecdcbb524ee57d228ef475699402ea49..f80c65128d5263f4d7809383476557b9df4ab1b4 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-client-service" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Common functions used to assemble the components of a parachain node." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] futures = "0.3.28" diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 82890666ebae3f197e291019d215ea9d446ef71f..f8ebca11c8c1fbc05c290b436e3e8abd99771eb3 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -49,7 +49,7 @@ use sc_utils::mpsc::TracingUnboundedSender; use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::{traits::SpawnNamed, Decode}; -use sp_runtime::traits::{Block as BlockT, BlockIdTo}; +use sp_runtime::traits::{Block as BlockT, BlockIdTo, Header}; use std::{sync::Arc, time::Duration}; // Given the sporadic nature of the explicit recovery operation and the @@ -505,11 +505,11 @@ where None, async move { log::debug!( - target: "cumulus-network", + target: LOG_TARGET_SYNC, "waiting for announce block in a background task...", ); - let _ = wait_for_target_block::(sender, para_id, relay_chain_interface) + let _ = wait_for_finalized_para_head::(sender, para_id, relay_chain_interface) .await .map_err(|e| { log::error!( @@ -527,7 +527,7 @@ where /// Waits for the relay chain to have finished syncing and then gets the parachain header that /// corresponds to the last finalized relay chain block. -async fn wait_for_target_block( +async fn wait_for_finalized_para_head( sender: oneshot::Sender<::Header>, para_id: ParaId, relay_chain_interface: RCInterface, @@ -560,11 +560,15 @@ where .map_err(|e| format!("{e:?}"))? .ok_or("Could not find parachain head in relay chain")?; - let target_block = B::Header::decode(&mut &validation_data.parent_head.0[..]) + let finalized_header = B::Header::decode(&mut &validation_data.parent_head.0[..]) .map_err(|e| format!("Failed to decode parachain head: {e}"))?; - log::debug!(target: LOG_TARGET_SYNC, "Target block reached {:?}", target_block); - let _ = sender.send(target_block); + log::info!( + "🎉 Received target parachain header #{} ({}) from the relay chain.", + finalized_header.number(), + finalized_header.hash() + ); + let _ = sender.send(finalized_header); return Ok(()) } } diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index a804edb58b39cd81ac9b93fb876db087510d5dda..c9d82ead1ebcb9df30fc95ec2c257a8025dd5a44 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -4,10 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false} diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 1aba84aa29c7bde5ddedb2a16af7663d13f14b70..68e4a681c2b179fa5b2d7bf3fe0d90cdc9befd77 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { version = "0.4.20", default-features = false } codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 49999dc114df7bdc94dca2f8c563d7c1d8bb6083..fa95303495dd1039f93f79baf93dc5301a31840d 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -25,14 +25,11 @@ use codec::Decode; use frame_benchmarking::{ account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, }; -use frame_support::{ - dispatch::DispatchResult, - traits::{Currency, EnsureOrigin, Get, ReservableCurrency}, -}; +use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; -use sp_std::prelude::*; +use sp_std::{cmp, prelude::*}; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -94,7 +91,7 @@ fn register_candidates(count: u32) { assert!(>::get() > 0u32.into(), "Bond cannot be zero!"); for who in candidates { - T::Currency::make_free_balance_be(&who, >::get() * 2u32.into()); + T::Currency::make_free_balance_be(&who, >::get() * 3u32.into()); >::register_as_candidate(RawOrigin::Signed(who).into()).unwrap(); } } @@ -107,8 +104,11 @@ fn min_candidates() -> u32 { fn min_invulnerables() -> u32 { let min_collators = T::MinEligibleCollators::get(); - let candidates_length = >::get().len(); - min_collators.saturating_sub(candidates_length.try_into().unwrap()) + let candidates_length = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + min_collators.saturating_sub(candidates_length) } #[benchmarks(where T: pallet_authorship::Config + session::Config)] @@ -160,22 +160,19 @@ mod benchmarks { .unwrap(); } // ... and register them. - for (who, _) in candidates { + for (who, _) in candidates.iter() { let deposit = >::get(); - T::Currency::make_free_balance_be(&who, deposit * 1000_u32.into()); - let incoming = CandidateInfo { who: who.clone(), deposit }; - >::try_mutate(|candidates| -> DispatchResult { - if !candidates.iter().any(|candidate| candidate.who == who) { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).expect("we've respected the bounded vec limit"); - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - } - Ok(()) + T::Currency::make_free_balance_be(who, deposit * 1000_u32.into()); + >::try_mutate(|list| { + list.try_push(CandidateInfo { who: who.clone(), deposit }).unwrap(); + Ok::<(), BenchmarkError>(()) }) - .expect("only returns ok"); + .unwrap(); + T::Currency::reserve(who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); } // now we need to fill up invulnerables @@ -226,10 +223,27 @@ mod benchmarks { } #[benchmark] - fn set_candidacy_bond() -> Result<(), BenchmarkError> { - let bond_amount: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + fn set_candidacy_bond( + c: Linear<0, { T::MaxCandidates::get() }>, + k: Linear<0, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + let initial_bond_amount: BalanceOf = T::Currency::minimum_balance() * 2u32.into(); + >::put(initial_bond_amount); + register_validators::(c); + register_candidates::(c); + let kicked = cmp::min(k, c); let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let bond_amount = if k > 0 { + >::mutate(|candidates| { + for info in candidates.iter_mut().skip(kicked as usize) { + info.deposit = T::Currency::minimum_balance() * 3u32.into(); + } + }); + T::Currency::minimum_balance() * 3u32.into() + } else { + T::Currency::minimum_balance() + }; #[extrinsic_call] _(origin as T::RuntimeOrigin, bond_amount); @@ -238,6 +252,35 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn update_bond( + c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + >::put(T::Currency::minimum_balance()); + >::put(c); + + register_validators::(c); + register_candidates::(c); + + let caller = >::get()[0].who.clone(); + v2::whitelist!(caller); + + let bond_amount: BalanceOf = + T::Currency::minimum_balance() + T::Currency::minimum_balance(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond_amount); + + assert_last_event::( + Event::CandidateBondUpdated { account_id: caller, deposit: bond_amount }.into(), + ); + assert!( + >::get().iter().last().unwrap().deposit == + T::Currency::minimum_balance() * 2u32.into() + ); + Ok(()) + } + // worse case is when we have all the max-candidate slots filled except one, and we fill that // one. #[benchmark] @@ -267,6 +310,36 @@ mod benchmarks { ); } + #[benchmark] + fn take_candidate_slot(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { + >::put(T::Currency::minimum_balance()); + >::put(1); + + register_validators::(c); + register_candidates::(c); + + let caller: T::AccountId = whitelisted_caller(); + let bond: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&caller, bond); + + >::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys::(c + 1), + Vec::new(), + ) + .unwrap(); + + let target = >::get().iter().last().unwrap().who.clone(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond / 2u32.into(), target.clone()); + + assert_last_event::( + Event::CandidateReplaced { old: target, new: caller, deposit: bond / 2u32.into() } + .into(), + ); + } + // worse case is the last candidate leaving. #[benchmark] fn leave_intent(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { @@ -276,7 +349,7 @@ mod benchmarks { register_validators::(c); register_candidates::(c); - let leaving = >::get().last().unwrap().who.clone(); + let leaving = >::get().iter().last().unwrap().who.clone(); v2::whitelist!(leaving); #[extrinsic_call] @@ -323,31 +396,37 @@ mod benchmarks { let new_block: BlockNumberFor = 1800u32.into(); let zero_block: BlockNumberFor = 0u32.into(); - let candidates = >::get(); + let candidates: Vec = >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()) + .collect(); let non_removals = c.saturating_sub(r); for i in 0..c { - >::insert(candidates[i as usize].who.clone(), zero_block); + >::insert(candidates[i as usize].clone(), zero_block); } if non_removals > 0 { for i in 0..non_removals { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } else { for i in 0..c { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } let min_candidates = min_candidates::(); - let pre_length = >::get().len(); + let pre_length = >::decode_len().unwrap_or_default(); frame_system::Pallet::::set_block_number(new_block); - assert!(>::get().len() == c as usize); - + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(c == current_length); #[block] { as SessionManager<_>>::new_session(0); @@ -357,16 +436,20 @@ mod benchmarks { // candidates > removals and remaining candidates > min candidates // => remaining candidates should be shorter than before removal, i.e. some were // actually removed. - assert!(>::get().len() < pre_length); + assert!(>::decode_len().unwrap_or_default() < pre_length); } else if c > r && non_removals < min_candidates { // candidates > removals and remaining candidates would be less than min candidates // => remaining candidates should equal min candidates, i.e. some were removed up to // the minimum, but then any more were "forced" to stay in candidates. - assert!(>::get().len() == min_candidates as usize); + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(min_candidates == current_length); } else { // removals >= candidates, non removals must == 0 // can't remove more than exist - assert!(>::get().len() == pre_length); + assert!(>::decode_len().unwrap_or_default() == pre_length); } } diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 24493ce9d9cdc7f451f2e997615962ee5f0662e0..7449f4d68c7eacc8b07fa45f2f991c13f3d5713b 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -35,16 +35,36 @@ //! //! 1. [`Invulnerables`]: a set of collators appointed by governance. These accounts will always be //! collators. -//! 2. [`Candidates`]: these are *candidates to the collation task* and may or may not be elected as -//! a final collator. +//! 2. [`CandidateList`]: these are *candidates to the collation task* and may or may not be elected +//! as a final collator. //! -//! The current implementation resolves congestion of [`Candidates`] in a first-come-first-serve -//! manner. +//! The current implementation resolves congestion of [`CandidateList`] through a simple auction +//! mechanism. Candidates bid for the collator slots and at the end of the session, the auction ends +//! and the top candidates are selected to become collators. The number of selected candidates is +//! determined by the value of `DesiredCandidates`. +//! +//! Before the list reaches full capacity, candidates can register by placing the minimum bond +//! through `register_as_candidate`. Then, if an account wants to participate in the collator slot +//! auction, they have to replace an existing candidate by placing a greater deposit through +//! `take_candidate_slot`. Existing candidates can increase their bids through `update_bond`. +//! +//! At any point, an account can take the place of another account in the candidate list if they put +//! up a greater deposit than the target. While new joiners would like to deposit as little as +//! possible to participate in the auction, the replacement threat incentivizes candidates to bid as +//! close to their budget as possible in order to avoid being replaced. +//! +//! Candidates which are not on "winning" slots in the list can also decrease their deposits through +//! `update_bond`, but candidates who are on top slots and try to decrease their deposits will fail +//! in order to enforce auction mechanics and have meaningful bids. //! //! Candidates will not be allowed to get kicked or `leave_intent` if the total number of collators //! would fall below `MinEligibleCollators`. This is to ensure that some collators will always //! exist, i.e. someone is eligible to produce a block. //! +//! When a new session starts, candidates with the highest deposits will be selected in order until +//! the desired number of collators is reached. Candidates can increase or decrease their deposits +//! between sessions in order to ensure they receive a slot in the collator list. +//! //! ### Rewards //! //! The Collator Selection pallet maintains an on-chain account (the "Pot"). In each block, the @@ -56,8 +76,8 @@ //! //! To initiate rewards, an ED needs to be transferred to the pot address. //! -//! Note: Eventually the Pot distribution may be modified as discussed in -//! [this issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). +//! Note: Eventually the Pot distribution may be modified as discussed in [this +//! issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). #![cfg_attr(not(feature = "std"), no_std)] @@ -182,9 +202,12 @@ pub mod pallet { /// The (community, limited) collation candidates. `Candidates` and `Invulnerables` should be /// mutually exclusive. + /// + /// This list is sorted in ascending order by deposit and when the deposits are equal, the least + /// recently updated is considered greater. #[pallet::storage] - #[pallet::getter(fn candidates)] - pub type Candidates = StorageValue< + #[pallet::getter(fn candidate_list)] + pub type CandidateList = StorageValue< _, BoundedVec>, T::MaxCandidates>, ValueQuery, @@ -261,8 +284,12 @@ pub mod pallet { NewCandidacyBond { bond_amount: BalanceOf }, /// A new candidate joined. CandidateAdded { account_id: T::AccountId, deposit: BalanceOf }, + /// Bond of a candidate updated. + CandidateBondUpdated { account_id: T::AccountId, deposit: BalanceOf }, /// A candidate was removed. CandidateRemoved { account_id: T::AccountId }, + /// An account was replaced in the candidate list by another one. + CandidateReplaced { old: T::AccountId, new: T::AccountId, deposit: BalanceOf }, /// An account was unable to be added to the Invulnerables because they did not have keys /// registered. Other Invulnerables may have been set. InvalidInvulnerableSkipped { account_id: T::AccountId }, @@ -288,12 +315,38 @@ pub mod pallet { NoAssociatedValidatorId, /// Validator ID is not yet registered. ValidatorNotRegistered, + /// Could not insert in the candidate list. + InsertToCandidateListFailed, + /// Could not remove from the candidate list. + RemoveFromCandidateListFailed, + /// New deposit amount would be below the minimum candidacy bond. + DepositTooLow, + /// Could not update the candidate list. + UpdateCandidateListFailed, + /// Deposit amount is too low to take the target's slot in the candidate list. + InsufficientBond, + /// The target account to be replaced in the candidate list is not a candidate. + TargetIsNotCandidate, + /// The updated deposit amount is equal to the amount already reserved. + IdenticalDeposit, + /// Cannot lower candidacy bond while occupying a future collator slot in the list. + InvalidUnreserve, } #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { assert!(T::MinEligibleCollators::get() > 0, "chain must require at least one collator"); + assert!( + T::MaxInvulnerables::get().saturating_add(T::MaxCandidates::get()) >= + T::MinEligibleCollators::get(), + "invulnerables and candidates must be able to satisfy collator demand" + ); + } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() } } @@ -307,8 +360,8 @@ pub mod pallet { /// acceptable Invulnerables, and is not proposing a _set_ of new Invulnerables. /// /// This call does not maintain mutual exclusivity of `Invulnerables` and `Candidates`. It - /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. - /// A `batch_all` can also be used to enforce atomicity. If any candidates are included in + /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. A + /// `batch_all` can also be used to enforce atomicity. If any candidates are included in /// `new`, they should be removed with `remove_invulnerable_candidate` after execution. /// /// Must be called by the `UpdateOrigin`. @@ -319,8 +372,9 @@ pub mod pallet { // don't wipe out the collator set if new.is_empty() { + // Casting `u32` to `usize` should be safe on all machines running this. ensure!( - Candidates::::decode_len().unwrap_or_default() >= + CandidateList::::decode_len().unwrap_or_default() >= T::MinEligibleCollators::get() as usize, Error::::TooFewEligibleCollators ); @@ -401,17 +455,47 @@ pub mod pallet { /// Set the candidacy bond amount. /// + /// If the candidacy bond is increased by this call, all current candidates which have a + /// deposit lower than the new bond will be kicked from the list and get their deposits + /// back. + /// /// The origin for this call must be the `UpdateOrigin`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_candidacy_bond())] + #[pallet::weight(T::WeightInfo::set_candidacy_bond( + T::MaxCandidates::get(), + T::MaxCandidates::get() + ))] pub fn set_candidacy_bond( origin: OriginFor, bond: BalanceOf, ) -> DispatchResultWithPostInfo { T::UpdateOrigin::ensure_origin(origin)?; - >::put(bond); + let bond_increased = >::mutate(|old_bond| -> bool { + let bond_increased = *old_bond < bond; + *old_bond = bond; + bond_increased + }); + let initial_len = >::decode_len().unwrap_or_default(); + let kicked = (bond_increased && initial_len > 0) + .then(|| { + // Closure below returns the number of candidates which were kicked because + // their deposits were lower than the new candidacy bond. + >::mutate(|candidates| -> usize { + let first_safe_candidate = candidates + .iter() + .position(|candidate| candidate.deposit >= bond) + .unwrap_or(initial_len); + let kicked_candidates = candidates.drain(..first_safe_candidate); + for candidate in kicked_candidates { + T::Currency::unreserve(&candidate.who, candidate.deposit); + >::remove(candidate.who); + } + first_safe_candidate + }) + }) + .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(().into()) + Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) } /// Register this account as a collator candidate. The account must (a) already have @@ -424,8 +508,11 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure we are below limit. - let length = >::decode_len().unwrap_or_default(); - ensure!((length as u32) < Self::desired_candidates(), Error::::TooManyCandidates); + let length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + ensure!(length < T::MaxCandidates::get(), Error::::TooManyCandidates); ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); let validator_key = T::ValidatorIdOf::convert(who.clone()) @@ -437,25 +524,27 @@ pub mod pallet { let deposit = Self::candidacy_bond(); // First authored block is current block plus kick threshold to handle session delay - let incoming = CandidateInfo { who: who.clone(), deposit }; - - let current_count = - >::try_mutate(|candidates| -> Result { - if candidates.iter().any(|candidate| candidate.who == who) { - Err(Error::::AlreadyCandidate)? - } else { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).map_err(|_| Error::::TooManyCandidates)?; - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - Ok(candidates.len()) - } - })?; + >::try_mutate(|candidates| -> Result<(), DispatchError> { + ensure!( + !candidates.iter().any(|candidate_info| candidate_info.who == who), + Error::::AlreadyCandidate + ); + T::Currency::reserve(&who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + candidates + .try_insert(0, CandidateInfo { who: who.clone(), deposit }) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + Ok(()) + })?; Self::deposit_event(Event::CandidateAdded { account_id: who, deposit }); - Ok(Some(T::WeightInfo::register_as_candidate(current_count as u32)).into()) + // Safe to do unchecked add here because we ensure above that `length < + // T::MaxCandidates::get()`, and since `T::MaxCandidates` is `u32` it can be at most + // `u32::MAX`, therefore `length + 1` cannot overflow. + Ok(Some(T::WeightInfo::register_as_candidate(length + 1)).into()) } /// Deregister `origin` as a collator candidate. Note that the collator can only leave on @@ -468,13 +557,14 @@ pub mod pallet { pub fn leave_intent(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); + let length = >::decode_len().unwrap_or_default(); // Do remove their last authored block. - let current_count = Self::try_remove_candidate(&who, true)?; + Self::try_remove_candidate(&who, true)?; - Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into()) + Ok(Some(T::WeightInfo::leave_intent(length.saturating_sub(1) as u32)).into()) } /// Add a new account `who` to the list of `Invulnerables` collators. `who` must have @@ -521,7 +611,7 @@ pub mod pallet { .unwrap_or_default() .try_into() .unwrap_or(T::MaxInvulnerables::get().saturating_sub(1)), - Candidates::::decode_len() + >::decode_len() .unwrap_or_default() .try_into() .unwrap_or(T::MaxCandidates::get()), @@ -540,7 +630,7 @@ pub mod pallet { T::UpdateOrigin::ensure_origin(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); @@ -554,6 +644,154 @@ pub mod pallet { Self::deposit_event(Event::InvulnerableRemoved { account_id: who }); Ok(()) } + + /// Update the candidacy bond of collator candidate `origin` to a new amount `new_deposit`. + /// + /// Setting a `new_deposit` that is lower than the current deposit while `origin` is + /// occupying a top-`DesiredCandidates` slot is not allowed. + /// + /// This call will fail if `origin` is not a collator candidate, the updated bond is lower + /// than the minimum candidacy bond, and/or the amount cannot be reserved. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::update_bond(T::MaxCandidates::get()))] + pub fn update_bond( + origin: OriginFor, + new_deposit: BalanceOf, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(new_deposit >= >::get(), Error::::DepositTooLow); + // The function below will try to mutate the `CandidateList` entry for the caller to + // update their deposit to the new value of `new_deposit`. The return value is the + // position of the entry in the list, used for weight calculation. + let length = + >::try_mutate(|candidates| -> Result { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == who) + .ok_or_else(|| Error::::NotCandidate)?; + let candidate_count = candidates.len(); + // Remove the candidate from the list. + let mut info = candidates.remove(idx); + let old_deposit = info.deposit; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + // Casting `u32` to `usize` should be safe on all machines running this. + ensure!( + idx.saturating_add(>::get() as usize) < + candidate_count, + Error::::InvalidUnreserve + ); + T::Currency::unreserve(&who, old_deposit - new_deposit); + } else { + return Err(Error::::IdenticalDeposit.into()) + } + + // Update the deposit and insert the candidate in the correct spot in the list. + info.deposit = new_deposit; + let new_pos = candidates + .iter() + .position(|candidate| candidate.deposit >= new_deposit) + .unwrap_or_else(|| candidates.len()); + candidates + .try_insert(new_pos, info) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + + Ok(candidate_count) + })?; + + Self::deposit_event(Event::CandidateBondUpdated { + account_id: who, + deposit: new_deposit, + }); + Ok(Some(T::WeightInfo::update_bond(length as u32)).into()) + } + + /// The caller `origin` replaces a candidate `target` in the collator candidate list by + /// reserving `deposit`. The amount `deposit` reserved by the caller must be greater than + /// the existing bond of the target it is trying to replace. + /// + /// This call will fail if the caller is already a collator candidate or invulnerable, the + /// caller does not have registered session keys, the target is not a collator candidate, + /// and/or the `deposit` amount cannot be reserved. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::take_candidate_slot(T::MaxCandidates::get()))] + pub fn take_candidate_slot( + origin: OriginFor, + deposit: BalanceOf, + target: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); + ensure!(deposit >= Self::candidacy_bond(), Error::::InsufficientBond); + + let validator_key = T::ValidatorIdOf::convert(who.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + + let length = >::decode_len().unwrap_or_default(); + // The closure below iterates through all elements of the candidate list to ensure that + // the caller isn't already a candidate and to find the target it's trying to replace in + // the list. The return value is a tuple of the position of the candidate to be replaced + // in the list along with its candidate information. + let target_info = >::try_mutate( + |candidates| -> Result>, DispatchError> { + // Find the position in the list of the candidate that is being replaced. + let mut target_info_idx = None; + let mut new_info_idx = None; + for (idx, candidate_info) in candidates.iter().enumerate() { + // While iterating through the candidates trying to find the target, + // also ensure on the same pass that our caller isn't already a + // candidate. + ensure!(candidate_info.who != who, Error::::AlreadyCandidate); + // If we find our target, update the position but do not stop the + // iteration since we're also checking that the caller isn't already a + // candidate. + if candidate_info.who == target { + target_info_idx = Some(idx); + } + // Find the spot where the new candidate would be inserted in the current + // version of the list. + if new_info_idx.is_none() && candidate_info.deposit >= deposit { + new_info_idx = Some(idx); + } + } + let target_info_idx = + target_info_idx.ok_or(Error::::TargetIsNotCandidate)?; + + // Remove the old candidate from the list. + let target_info = candidates.remove(target_info_idx); + ensure!(deposit > target_info.deposit, Error::::InsufficientBond); + + // We have removed one element before `new_info_idx`, so the position we have to + // insert to is reduced by 1. + let new_pos = new_info_idx + .map(|i| i.saturating_sub(1)) + .unwrap_or_else(|| candidates.len()); + let new_info = CandidateInfo { who: who.clone(), deposit }; + // Insert the new candidate in the correct spot in the list. + candidates + .try_insert(new_pos, new_info) + .expect("candidate count previously decremented; qed"); + + Ok(target_info) + }, + )?; + T::Currency::reserve(&who, deposit)?; + T::Currency::unreserve(&target_info.who, target_info.deposit); + >::remove(target_info.who.clone()); + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + + Self::deposit_event(Event::CandidateReplaced { old: target, new: who, deposit }); + Ok(Some(T::WeightInfo::take_candidate_slot(length as u32)).into()) + } } impl Pallet { @@ -564,84 +802,122 @@ pub mod pallet { /// Return the total number of accounts that are eligible collators (candidates and /// invulnerables). - fn eligible_collators() -> usize { - Candidates::::decode_len() + fn eligible_collators() -> u32 { + >::decode_len() .unwrap_or_default() .saturating_add(Invulnerables::::decode_len().unwrap_or_default()) + .try_into() + .unwrap_or(u32::MAX) } /// Removes a candidate if they exist and sends them back their deposit. fn try_remove_candidate( who: &T::AccountId, remove_last_authored: bool, - ) -> Result { - let current_count = - >::try_mutate(|candidates| -> Result { - let index = candidates - .iter() - .position(|candidate| candidate.who == *who) - .ok_or(Error::::NotCandidate)?; - let candidate = candidates.remove(index); - T::Currency::unreserve(who, candidate.deposit); - if remove_last_authored { - >::remove(who.clone()) - }; - Ok(candidates.len()) - })?; + ) -> Result<(), DispatchError> { + >::try_mutate(|candidates| -> Result<(), DispatchError> { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == *who) + .ok_or(Error::::NotCandidate)?; + let deposit = candidates[idx].deposit; + T::Currency::unreserve(who, deposit); + candidates.remove(idx); + if remove_last_authored { + >::remove(who.clone()) + }; + Ok(()) + })?; Self::deposit_event(Event::CandidateRemoved { account_id: who.clone() }); - Ok(current_count) + Ok(()) } /// Assemble the current set of candidates and invulnerables into the next collator set. /// /// This is done on the fly, as frequent as we are told to do so, as the session manager. - pub fn assemble_collators( - candidates: BoundedVec, - ) -> Vec { + pub fn assemble_collators() -> Vec { + // Casting `u32` to `usize` should be safe on all machines running this. + let desired_candidates = >::get() as usize; let mut collators = Self::invulnerables().to_vec(); - collators.extend(candidates); + collators.extend( + >::get() + .iter() + .rev() + .cloned() + .take(desired_candidates) + .map(|candidate_info| candidate_info.who), + ); collators } /// Kicks out candidates that did not produce a block in the kick threshold and refunds /// their deposits. - pub fn kick_stale_candidates( - candidates: BoundedVec>, T::MaxCandidates>, - ) -> BoundedVec { + /// + /// Return value is the number of candidates left in the list. + pub fn kick_stale_candidates(candidates: impl IntoIterator) -> u32 { let now = frame_system::Pallet::::block_number(); let kick_threshold = T::KickThreshold::get(); let min_collators = T::MinEligibleCollators::get(); candidates .into_iter() .filter_map(|c| { - let last_block = >::get(c.who.clone()); + let last_block = >::get(c.clone()); let since_last = now.saturating_sub(last_block); - let is_invulnerable = Self::invulnerables().contains(&c.who); + let is_invulnerable = Self::invulnerables().contains(&c); let is_lazy = since_last >= kick_threshold; if is_invulnerable { - // They are invulnerable. No reason for them to be in Candidates also. + // They are invulnerable. No reason for them to be in `CandidateList` also. // We don't even care about the min collators here, because an Account // should not be a collator twice. - let _ = Self::try_remove_candidate(&c.who, false); + let _ = Self::try_remove_candidate(&c, false); None } else { - if Self::eligible_collators() <= min_collators as usize || !is_lazy { + if Self::eligible_collators() <= min_collators || !is_lazy { // Either this is a good collator (not lazy) or we are at the minimum // that the system needs. They get to stay. - Some(c.who) + Some(c) } else { // This collator has not produced a block recently enough. Bye bye. - let _ = Self::try_remove_candidate(&c.who, true); + let _ = Self::try_remove_candidate(&c, true); None } } }) - .collect::>() + .count() .try_into() .expect("filter_map operation can't result in a bounded vec larger than its original; qed") } + + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + /// + /// # Invariants + /// + /// ## `DesiredCandidates` + /// + /// * The current desired candidate count should not exceed the candidate list capacity. + /// * The number of selected candidates together with the invulnerables must be greater than + /// or equal to the minimum number of eligible collators. + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + let desired_candidates = >::get(); + + frame_support::ensure!( + desired_candidates <= T::MaxCandidates::get(), + "Shouldn't demand more candidates than the pallet config allows." + ); + + frame_support::ensure!( + desired_candidates.saturating_add(T::MaxInvulnerables::get()) >= + T::MinEligibleCollators::get(), + "Invulnerable set together with desired candidates should be able to meet the collator quota." + ); + + Ok(()) + } } /// Keep track of number of authored blocks per authority, uncles are counted as well since @@ -677,14 +953,23 @@ pub mod pallet { >::block_number(), ); - let candidates = Self::candidates(); - let candidates_len_before = candidates.len(); - let active_candidates = Self::kick_stale_candidates(candidates); - let removed = candidates_len_before - active_candidates.len(); - let result = Self::assemble_collators(active_candidates); + // The `expect` below is safe because the list is a `BoundedVec` with a max size of + // `T::MaxCandidates`, which is a `u32`. When `decode_len` returns `Some(len)`, `len` + // must be valid and at most `u32::MAX`, which must always be able to convert to `u32`. + let candidates_len_before: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .expect("length is at most `T::MaxCandidates`, so it must fit in `u32`; qed"); + let active_candidates_count = Self::kick_stale_candidates( + >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()), + ); + let removed = candidates_len_before.saturating_sub(active_candidates_count); + let result = Self::assemble_collators(); frame_system::Pallet::::register_extra_weight_unchecked( - T::WeightInfo::new_session(candidates_len_before as u32, removed as u32), + T::WeightInfo::new_session(candidates_len_before, removed), DispatchClass::Mandatory, ); Some(result) diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 44d531c971ee3bee08f572ac797a93d1759a5cfa..46143674bb39991f55394b1c730bae38d9da29e1 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -92,6 +92,7 @@ impl pallet_balances::Config for Test { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; diff --git a/cumulus/pallets/collator-selection/src/tests.rs b/cumulus/pallets/collator-selection/src/tests.rs index d4dae513df375145c35a83d7af274e581a94a60b..ed2044ccdfad7def46ab92180e9b177643ac8679 100644 --- a/cumulus/pallets/collator-selection/src/tests.rs +++ b/cumulus/pallets/collator-selection/src/tests.rs @@ -28,7 +28,7 @@ fn basic_setup_works() { assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert!(CollatorSelection::candidates().is_empty()); + assert_eq!(>::get().iter().count(), 0); // genesis should sort input assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); }); @@ -202,7 +202,8 @@ fn candidate_to_invulnerable_works() { initialize_to_block(1); assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_eq!(Balances::free_balance(3), 100); @@ -226,7 +227,7 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&3)); assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); assert_ok!(CollatorSelection::add_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -240,7 +241,8 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&4)); assert_eq!(Balances::free_balance(4), 100); - assert_eq!(CollatorSelection::candidates().len(), 0); + + assert_eq!(>::get().iter().count(), 0); }); } @@ -266,42 +268,230 @@ fn set_desired_candidates_works() { } #[test] -fn set_candidacy_bond() { +fn set_candidacy_bond_empty_candidate_list() { new_test_ext().execute_with(|| { // given assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can set + // can decrease without candidates assert_ok!(CollatorSelection::set_candidacy_bond( RuntimeOrigin::signed(RootAccount::get()), 7 )); assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert!(>::get().is_empty()); // rejects bad origin. assert_noop!(CollatorSelection::set_candidacy_bond(RuntimeOrigin::signed(1), 8), BadOrigin); + + // can increase without candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + assert_eq!(CollatorSelection::candidacy_bond(), 20); }); } #[test] -fn cannot_register_candidate_if_too_many() { +fn set_candidacy_bond_with_one_candidate() { new_test_ext().execute_with(|| { - // reset desired candidates: - >::put(0); + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can't accept anyone anymore. - assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), - Error::::TooManyCandidates, + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can decrease with one candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase past initial deposit, should kick existing candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_same_deposit() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] ); - // reset desired candidates: - >::put(1); + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase past initial deposit, should kick existing candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_different_deposits() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 20 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 30 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase to 4's deposit, should kick 3 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 20); + assert_eq!( + >::get(), + vec![candidate_4.clone(), candidate_5.clone()] + ); + + // can increase past 4's deposit, should kick 4 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 25 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 25); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // lowering the minimum deposit should have no effect + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 5 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 5); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // add 3 and 4 back but with higher deposits than minimum + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase the deposit above the current max in the list, all candidates should be + // kicked + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 40 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 40); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn cannot_register_candidate_if_too_many() { + new_test_ext().execute_with(|| { + >::put(1); + + // MaxCandidates: u32 = 20 + // Aside from 3, 4, and 5, create enough accounts to have 21 potential + // candidates. + for i in 6..=23 { + Balances::make_free_balance_be(&i, 100); + let key = MockSessionKeys { aura: UintAuthorityId(i) }; + Session::set_keys(RuntimeOrigin::signed(i).into(), key, Vec::new()).unwrap(); + } + + for c in 3..=22 { + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(c))); + } - // but no more assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5)), + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(23)), Error::::TooManyCandidates, ); }) @@ -310,7 +500,7 @@ fn cannot_register_candidate_if_too_many() { #[test] fn cannot_unregister_candidate_if_too_few() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -368,8 +558,12 @@ fn cannot_register_dupe_candidate() { new_test_ext().execute_with(|| { // can add 3 as candidate assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). let addition = CandidateInfo { who: 3, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![addition]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![addition] + ); assert_eq!(CollatorSelection::last_authored_block(3), 10); assert_eq!(Balances::free_balance(3), 90); @@ -404,7 +598,8 @@ fn register_as_candidate_works() { // given assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); // take two endowed, non-invulnerables accounts. @@ -417,140 +612,888 @@ fn register_as_candidate_works() { assert_eq!(Balances::free_balance(3), 90); assert_eq!(Balances::free_balance(4), 90); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); }); } #[test] -fn leave_intent() { +fn cannot_take_candidate_slot_if_invulnerable() { new_test_ext().execute_with(|| { - // register a candidate. - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 90); - - // register too so can leave above min candidates - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); - assert_eq!(Balances::free_balance(5), 90); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); - // cannot leave if not candidate. + // can't 1 because it is invulnerable. assert_noop!( - CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), - Error::::NotCandidate + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(1), 50u64.into(), 2), + Error::::AlreadyInvulnerable, ); - - // bond is returned - assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::last_authored_block(3), 0); - }); + }) } #[test] -fn authorship_event_handler() { +fn cannot_take_candidate_slot_if_keys_not_registered() { new_test_ext().execute_with(|| { - // put 100 in the pot + 5 for ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(42), 50u64.into(), 3), + Error::::ValidatorNotRegistered + ); + }) +} - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); +#[test] +fn cannot_take_candidate_slot_if_duplicate() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![candidate_4, candidate_3]); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(CollatorSelection::last_authored_block(4), 10); + assert_eq!(Balances::free_balance(3), 90); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // but no more + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(3), 50u64.into(), 4), + Error::::AlreadyCandidate, + ); + }) +} - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); +#[test] +fn cannot_take_candidate_slot_if_target_invalid() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3] + ); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); - // half of the pot goes to the collator who's the author (4 in tests). - assert_eq!(Balances::free_balance(4), 140); - // half + ED stays. - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); - }); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 50u64.into(), 5), + Error::::TargetIsNotCandidate, + ); + }) } #[test] -fn fees_edgecases() { +fn cannot_take_candidate_slot_if_poor() { new_test_ext().execute_with(|| { - // Nothing panics, no reward when no ED in balance - Authorship::on_initialize(1); - // put some money into the pot at ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(33), 0); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // works + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(3), + 20u64.into(), + 4 + )); - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); - // Nothing received - assert_eq!(Balances::free_balance(4), 90); - // all fee stays - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + // poor + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(33), 30u64.into(), 3), + BalancesError::::InsufficientBalance, + ); }); } #[test] -fn session_management_works() { +fn cannot_take_candidate_slot_if_insufficient_deposit() { new_test_ext().execute_with(|| { - initialize_to_block(1); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(4); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - - // session won't see this. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - // but we have a new candidate. - assert_eq!(CollatorSelection::candidates().len(), 1); - - initialize_to_block(10); - assert_eq!(SessionChangeBlock::get(), 10); - // pallet-session has 1 session delay; current validators are the same. - assert_eq!(Session::validators(), vec![1, 2]); - // queued ones are changed, and now we have 3. - assert_eq!(Session::queued_keys().len(), 3); - // session handlers (aura, et. al.) cannot see this yet. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // changed are now reflected to session handlers. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 5u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); }); } #[test] -fn kick_mechanism() { +fn cannot_take_candidate_slot_if_deposit_less_than_target() { new_test_ext().execute_with(|| { - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // 4 authored this block, gets to stay 3 was kicked - assert_eq!(CollatorSelection::candidates().len(), 1); - // 3 will be kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); - let collator = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 20); - initialize_to_block(30); - // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); - // kicked collator gets funds back + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 20u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + }); +} + +#[test] +fn take_candidate_slot_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_eq!(>::get().iter().count(), 3); + + Balances::make_free_balance_be(&6, 100); + let key = MockSessionKeys { aura: UintAuthorityId(6) }; + Session::set_keys(RuntimeOrigin::signed(6).into(), key, Vec::new()).unwrap(); + + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(6), + 50u64.into(), + 4 + )); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 90); + assert_eq!(Balances::free_balance(6), 50); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_6 = CandidateInfo { who: 6, deposit: 50 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + let mut actual_candidates = + >::get().iter().cloned().collect::>(); + actual_candidates.sort_by(|info_1, info_2| info_1.deposit.cmp(&info_2.deposit)); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_3, candidate_6] + ); + }); +} + +#[test] +fn increase_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20), + Error::::NotCandidate + ); + }); +} + +#[test] +fn increase_candidacy_bond_insufficient_balance() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 110), + BalancesError::::InsufficientBalance + ); + + assert_eq!(Balances::free_balance(3), 90); + }); +} + +#[test] +fn increase_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 40)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 60); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn decrease_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_eq!(Balances::free_balance(5), 100); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10), + Error::::NotCandidate + ); + assert_eq!(Balances::free_balance(5), 100); + }); +} + +#[test] +fn decrease_candidacy_bond_insufficient_funds() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 0), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 5), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 9), + Error::::DepositTooLow + ); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + }); +} + +#[test] +fn decrease_candidacy_bond_occupying_top_slot() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::desired_candidates(), 2); + // Register 3 candidates. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + // And update their bids. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 30 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_4, candidate_3, candidate_5] + ); + + // Candidates 5 and 3 can't decrease their deposits because they are the 2 top candidates. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 29), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 29), + Error::::InvalidUnreserve, + ); + // But candidate 4 should have be able to decrease the deposit up to the minimum. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 29u64.into())); + + // Make candidate 4 outbid candidate 3, taking their spot as the second highest bid. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 35u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 35 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3, candidate_4, candidate_5] + ); + + // Now candidates 5 and 4 are the 2 top candidates, so they can't decrease their deposits. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 34), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 34), + Error::::InvalidUnreserve, + ); + // Candidate 3 should have be able to decrease the deposit up to the minimum now that + // they've fallen out of the top spots. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10u64.into())); + }); +} + +#[test] +fn decrease_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn update_candidacy_bond_with_identical_amount() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20), + Error::::IdenticalDeposit + ); + assert_eq!(Balances::free_balance(3), 80); + }); +} + +#[test] +fn candidate_list_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 25)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 25 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_4, candidate_3] + ); + }); +} + +#[test] +fn leave_intent() { + new_test_ext().execute_with(|| { + // register a candidate. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 90); + + // register too so can leave above min candidates + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!(Balances::free_balance(5), 90); + + // cannot leave if not candidate. + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::NotCandidate + ); + + // bond is returned + assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(CollatorSelection::last_authored_block(3), 0); + }); +} + +#[test] +fn authorship_event_handler() { + new_test_ext().execute_with(|| { + // put 100 in the pot + 5 for ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + + // half of the pot goes to the collator who's the author (4 in tests). + assert_eq!(Balances::free_balance(4), 140); + // half + ED stays. + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); + }); +} + +#[test] +fn fees_edgecases() { + new_test_ext().execute_with(|| { + // Nothing panics, no reward when no ED in balance + Authorship::on_initialize(1); + // put some money into the pot at ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + // Nothing received + assert_eq!(Balances::free_balance(4), 90); + // all fee stays + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + }); +} + +#[test] +fn session_management_single_candidate() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 1); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 3. + assert_eq!(Session::queued_keys().len(), 3); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + }); +} + +#[test] +fn session_management_max_candidates() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + }); +} + +#[test] +fn session_management_increase_bid_with_list_update() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_candidate_list_eager_sort() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_reciprocal_outbidding() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + // candidates 3 and 4 saw they were outbid and preemptively bid more + // than 5 in the next block. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn session_management_decrease_bid_after_auction() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + initialize_to_block(5); + + // candidate 5 saw it was outbid and wants to take back its bid, but + // not entirely so they still keep their place in the candidate list + // in case there is an opportunity in the future. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn kick_mechanism() { + new_test_ext().execute_with(|| { + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + initialize_to_block(10); + assert_eq!(>::get().iter().count(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, gets to stay 3 was kicked + assert_eq!(>::get().iter().count(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); + // kicked collator gets funds back assert_eq!(Balances::free_balance(3), 100); }); } @@ -559,7 +1502,8 @@ fn kick_mechanism() { fn should_not_kick_mechanism_too_few() { new_test_ext().execute_with(|| { // remove the invulnerables and add new collators 3 and 5 - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -573,30 +1517,34 @@ fn should_not_kick_mechanism_too_few() { )); initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); initialize_to_block(20); assert_eq!(SessionChangeBlock::get(), 20); // 4 authored this block, 3 is kicked, 5 stays because of too few collators - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); // 3 will be kicked after 1 session delay assert_eq!(SessionHandlerCollators::get(), vec![3, 5]); - let collator = CandidateInfo { who: 5, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); assert_eq!(CollatorSelection::last_authored_block(4), 20); initialize_to_block(30); // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![5]); + assert_eq!(SessionHandlerCollators::get(), vec![3]); // kicked collator gets funds back - assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(5), 100); }); } #[test] fn should_kick_invulnerables_from_candidates_on_session_change() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(3), 90); @@ -606,16 +1554,22 @@ fn should_kick_invulnerables_from_candidates_on_session_change() { vec![1, 2, 3] )); + // tuple of (id, deposit). let collator_3 = CandidateInfo { who: 3, deposit: 10 }; let collator_4 = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator_3, collator_4.clone()]); + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![collator_4.clone(), collator_3]); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // session change initialize_to_block(10); // 3 is removed from candidates - assert_eq!(CollatorSelection::candidates(), vec![collator_4]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator_4] + ); // but not from invulnerables assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // and it got its deposit back diff --git a/cumulus/pallets/collator-selection/src/weights.rs b/cumulus/pallets/collator-selection/src/weights.rs index f8f86fb7dec2c08601eebe4d8b345a0f7c930b70..1c01ad6cd6fe8e8ed4bc02c3c2d6703eb2882df4 100644 --- a/cumulus/pallets/collator-selection/src/weights.rs +++ b/cumulus/pallets/collator-selection/src/weights.rs @@ -30,9 +30,11 @@ pub trait WeightInfo { fn add_invulnerable(_b: u32, _c: u32) -> Weight; fn remove_invulnerable(_b: u32) -> Weight; fn set_desired_candidates() -> Weight; - fn set_candidacy_bond() -> Weight; + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight; fn register_as_candidate(_c: u32) -> Weight; fn leave_intent(_c: u32) -> Weight; + fn update_bond(_c: u32) -> Weight; + fn take_candidate_slot(_c: u32) -> Weight; fn note_author() -> Weight; fn new_session(_c: u32, _r: u32) -> Weight; } @@ -49,7 +51,7 @@ impl WeightInfo for SubstrateWeight { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -66,6 +68,20 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } fn note_author() -> Weight { Weight::from_parts(71_461_000_u64, 0) .saturating_add(T::DbWeight::get().reads(3_u64)) @@ -136,7 +152,7 @@ impl WeightInfo for () { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -158,6 +174,20 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } fn new_session(r: u32, c: u32) -> Weight { Weight::from_parts(0_u64, 0) // Standard Error: 1_010_000 diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 9d61a2c99fd35af24ac0bdd51a4dfb5aa4b5fd65..43fb131aec2a7d3241b76190196b620c207f7b8a 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -3,18 +3,24 @@ name = "cumulus-pallet-dmp-queue" version = "0.1.0" authors.workspace = true edition.workspace = true +repository.workspace = true +description = "Migrates messages from the old DMP queue pallet." +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -# Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} @@ -23,25 +29,34 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +sp-core = { path = "../../../substrate/primitives/core" } +sp-tracing = { path = "../../../substrate/primitives/tracing" } [features] default = [ "std" ] + std = [ "codec/std", "cumulus-primitives-core/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", "scale-info/std", - "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", - "sp-version/std", "xcm/std", ] + +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/cumulus/pallets/dmp-queue/src/benchmarking.rs b/cumulus/pallets/dmp-queue/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..91d1e0eab7e406a214088d8c0e47d03e20cd0286 --- /dev/null +++ b/cumulus/pallets/dmp-queue/src/benchmarking.rs @@ -0,0 +1,108 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for the `cumulus-pallet-dmp-queue`. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; + +use frame_benchmarking::v2::*; +use frame_support::{pallet_prelude::*, traits::Hooks}; +use sp_std::vec; + +#[benchmarks] +mod benchmarks { + use super::*; + + /// This benchmark uses the proper maximal message length. + #[benchmark] + fn on_idle_good_msg() { + let msg = vec![123; MaxDmpMessageLenOf::::get() as usize]; + + Pages::::insert(0, vec![(123, msg.clone())]); + PageIndex::::put(PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0 }); + MigrationStatus::::set(MigrationState::StartedExport { next_begin_used: 0 }); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + + assert_last_event::(Event::Exported { page: 0 }.into()); + } + + /// This benchmark uses 64 KiB messages to emulate a large old message. + #[benchmark] + fn on_idle_large_msg() { + let msg = vec![123; 1 << 16]; + + Pages::::insert(0, vec![(123, msg.clone())]); + PageIndex::::put(PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0 }); + MigrationStatus::::set(MigrationState::StartedExport { next_begin_used: 0 }); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + + assert_last_event::(Event::Exported { page: 0 }.into()); + } + + #[benchmark] + fn on_idle_overweight_good_msg() { + let msg = vec![123; MaxDmpMessageLenOf::::get() as usize]; + + Overweight::::insert(0, (123, msg.clone())); + PageIndex::::put(PageIndexData { begin_used: 0, end_used: 1, overweight_count: 1 }); + MigrationStatus::::set(MigrationState::StartedOverweightExport { + next_overweight_index: 0, + }); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + + assert_last_event::(Event::ExportedOverweight { index: 0 }.into()); + } + + #[benchmark] + fn on_idle_overweight_large_msg() { + let msg = vec![123; 1 << 16]; + + Overweight::::insert(0, (123, msg.clone())); + PageIndex::::put(PageIndexData { begin_used: 0, end_used: 1, overweight_count: 1 }); + MigrationStatus::::set(MigrationState::StartedOverweightExport { + next_overweight_index: 0, + }); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + + assert_last_event::(Event::ExportOverweightFailed { index: 0 }.into()); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + let frame_system::EventRecord { event, .. } = events.last().expect("Event expected"); + assert_eq!(event, &system_event.into()); +} diff --git a/cumulus/pallets/dmp-queue/src/lib.rs b/cumulus/pallets/dmp-queue/src/lib.rs index eff4a625ef1b6ff88ae23cee2cc252af78921a67..79cc4bc895ec29b87eb95e629e0a9624fd7f62a5 100644 --- a/cumulus/pallets/dmp-queue/src/lib.rs +++ b/cumulus/pallets/dmp-queue/src/lib.rs @@ -1,915 +1,284 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Pallet implementing a message queue for downward messages from the relay-chain. -//! Executes downward messages if there is enough weight available and schedules the rest for later -//! execution (by `on_idle` or another `handle_dmp_messages` call). Individual overweight messages -//! are scheduled into a separate queue that is only serviced by explicit extrinsic calls. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet used to implement a message queue for downward messages from the relay-chain. +//! +//! It is now deprecated and has been refactored to simply drain any remaining messages into +//! something implementing `HandleMessage`. It proceeds in the state of +//! [`MigrationState`] one by one by their listing in the source code. The pallet can be removed +//! from the runtime once `Completed` was emitted. #![cfg_attr(not(feature = "std"), no_std)] -pub mod migration; - -use codec::{Decode, DecodeLimit, Encode}; -use cumulus_primitives_core::{relay_chain::BlockNumber as RelayBlockNumber, DmpMessageHandler}; -use frame_support::{ - traits::EnsureOrigin, - weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, -}; +use migration::*; pub use pallet::*; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, prelude::*}; -use xcm::{latest::prelude::*, VersionedXcm, MAX_XCM_DECODE_DEPTH}; - -const DEFAULT_POV_SIZE: u64 = 64 * 1024; // 64 KB - -// Maximum amount of messages to process per block. This is a temporary measure until we properly -// account for proof size weights. -const MAX_MESSAGES_PER_BLOCK: u8 = 10; -// Maximum amount of messages that can exist in the overweight queue at any given time. -const MAX_OVERWEIGHT_MESSAGES: u32 = 1000; - -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct ConfigData { - /// The maximum amount of weight any individual message may consume. Messages above this weight - /// go into the overweight queue and may only be serviced explicitly by the - /// `ExecuteOverweightOrigin`. - max_individual: Weight, -} -impl Default for ConfigData { - fn default() -> Self { - Self { - max_individual: Weight::from_parts( - 10u64 * WEIGHT_REF_TIME_PER_MILLIS, // 10 ms of execution time maximum by default - DEFAULT_POV_SIZE, // 64 KB of proof size by default - ), - } - } -} +mod benchmarking; +mod migration; +mod mock; +mod tests; +pub mod weights; -/// Information concerning our message pages. -#[derive(Copy, Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct PageIndexData { - /// The lowest used page index. - begin_used: PageCounter, - /// The lowest unused page index. - end_used: PageCounter, - /// The number of overweight messages ever recorded (and thus the lowest free index). - overweight_count: OverweightIndex, -} - -/// Simple type used to identify messages for the purpose of reporting events. Secure if and only -/// if the message content is unique. -pub type MessageId = XcmHash; +pub use weights::WeightInfo; -/// Index used to identify overweight messages. -pub type OverweightIndex = u64; - -/// Index used to identify normal pages. -pub type PageCounter = u32; +/// The maximal length of a DMP message. +pub type MaxDmpMessageLenOf = + <::DmpSink as frame_support::traits::HandleMessage>::MaxMessageLen; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, traits::HandleMessage, weights::WeightMeter}; use frame_system::pallet_prelude::*; + use sp_io::hashing::twox_128; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] - #[pallet::storage_version(migration::STORAGE_VERSION)] - #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); - /// The module configuration trait. #[pallet::config] pub trait Config: frame_system::Config { - /// The overarching event type. + /// The overarching event type of the runtime. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type XcmExecutor: ExecuteXcm; + /// The sink for all DMP messages that the lazy migration will use. + type DmpSink: HandleMessage; - /// Origin which is allowed to execute overweight messages. - type ExecuteOverweightOrigin: EnsureOrigin; + /// Weight info for this pallet (only needed for the lazy migration). + type WeightInfo: WeightInfo; } - /// The configuration. - #[pallet::storage] - pub(super) type Configuration = StorageValue<_, ConfigData, ValueQuery>; - - /// The page index. - #[pallet::storage] - pub(super) type PageIndex = StorageValue<_, PageIndexData, ValueQuery>; - - /// The queue pages. + /// The migration state of this pallet. #[pallet::storage] - pub(super) type Pages = - StorageMap<_, Blake2_128Concat, PageCounter, Vec<(RelayBlockNumber, Vec)>, ValueQuery>; - - /// The overweight messages. - #[pallet::storage] - pub(super) type Overweight = CountedStorageMap< - _, - Blake2_128Concat, - OverweightIndex, - (RelayBlockNumber, Vec), - OptionQuery, - >; - - #[pallet::error] - pub enum Error { - /// The message index given is unknown. - Unknown, - /// The amount of weight given is possibly not enough for executing the message. - OverLimit, + pub type MigrationStatus = StorageValue<_, MigrationState, ValueQuery>; + + /// The lazy-migration state of the pallet. + #[derive( + codec::Encode, codec::Decode, Debug, PartialEq, Eq, Clone, MaxEncodedLen, TypeInfo, + )] + pub enum MigrationState { + /// Migration has not started yet. + NotStarted, + /// The export of pages started. + StartedExport { + /// The next page that should be exported. + next_begin_used: PageCounter, + }, + /// The page export completed. + CompletedExport, + /// The export of overweight messages started. + StartedOverweightExport { + /// The next overweight index that should be exported. + next_overweight_index: u64, + }, + /// The export of overweight messages completed. + CompletedOverweightExport, + /// The storage cleanup started. + StartedCleanup { cursor: Option>> }, + /// The migration finished. The pallet can now be removed from the runtime. + Completed, } - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_idle(_now: BlockNumberFor, max_weight: Weight) -> Weight { - // on_idle processes additional messages with any remaining block weight. - Self::service_queue(max_weight) - } - } - - #[pallet::call] - impl Pallet { - /// Service a single overweight message. - #[pallet::call_index(0)] - #[pallet::weight(weight_limit.saturating_add(Weight::from_parts(1_000_000, 0)))] - pub fn service_overweight( - origin: OriginFor, - index: OverweightIndex, - weight_limit: Weight, - ) -> DispatchResultWithPostInfo { - T::ExecuteOverweightOrigin::ensure_origin(origin)?; - - let (sent_at, data) = Overweight::::get(index).ok_or(Error::::Unknown)?; - let weight_used = Self::try_service_message(weight_limit, sent_at, &data[..]) - .map_err(|_| Error::::OverLimit)?; - Overweight::::remove(index); - Self::deposit_event(Event::OverweightServiced { overweight_index: index, weight_used }); - Ok(Some(weight_used.saturating_add(Weight::from_parts(1_000_000, 0))).into()) + impl Default for MigrationState { + fn default() -> Self { + Self::NotStarted } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Downward message is invalid XCM. - InvalidFormat { message_hash: XcmHash }, - /// Downward message is unsupported version of XCM. - UnsupportedVersion { message_hash: XcmHash }, - /// Downward message executed with the given outcome. - ExecutedDownward { message_hash: XcmHash, message_id: XcmHash, outcome: Outcome }, - /// The weight limit for handling downward messages was reached. - WeightExhausted { - message_hash: XcmHash, - message_id: XcmHash, - remaining_weight: Weight, - required_weight: Weight, - }, - /// Downward message is overweight and was placed in the overweight queue. - OverweightEnqueued { - message_hash: XcmHash, - message_id: XcmHash, - overweight_index: OverweightIndex, - required_weight: Weight, - }, - /// Downward message from the overweight queue was executed. - OverweightServiced { overweight_index: OverweightIndex, weight_used: Weight }, - /// The maximum number of downward messages was reached. - MaxMessagesExhausted { message_hash: XcmHash }, - } + /// The export of pages started. + StartedExport, - /// Error type when a message was failed to be serviced. - pub(crate) struct ServiceMessageError { - /// The message's hash. - message_hash: XcmHash, - /// The message's ID (which could also be its hash if nothing overrides it). - message_id: XcmHash, - /// Weight required for the message to be executed. - required_weight: Weight, - } + /// The export of a page completed. + Exported { page: PageCounter }, - impl Pallet { - /// Service the message queue up to some given weight `limit`. + /// The export of a page failed. /// - /// Returns the weight consumed by executing messages in the queue. - fn service_queue(limit: Weight) -> Weight { - let mut messages_processed = 0; - PageIndex::::mutate(|page_index| { - Self::do_service_queue(limit, page_index, &mut messages_processed) - }) - } + /// This should never be emitted. + ExportFailed { page: PageCounter }, - /// Exactly equivalent to `service_queue` but expects a mutable `page_index` to be passed - /// in and any changes stored. - fn do_service_queue( - limit: Weight, - page_index: &mut PageIndexData, - messages_processed: &mut u8, - ) -> Weight { - let mut used = Weight::zero(); - while page_index.begin_used < page_index.end_used { - let page = Pages::::take(page_index.begin_used); - for (i, &(sent_at, ref data)) in page.iter().enumerate() { - if *messages_processed >= MAX_MESSAGES_PER_BLOCK { - // Exceeded block message limit - put the remaining messages back and bail - Pages::::insert(page_index.begin_used, &page[i..]); - return used - } - *messages_processed += 1; - match Self::try_service_message(limit.saturating_sub(used), sent_at, &data[..]) - { - Ok(w) => used += w, - Err(..) => { - // Too much weight needed - put the remaining messages back and bail - Pages::::insert(page_index.begin_used, &page[i..]); - return used - }, - } - } - page_index.begin_used += 1; - } - if page_index.begin_used == page_index.end_used { - // Reset if there's no pages left. - page_index.begin_used = 0; - page_index.end_used = 0; - } - used - } + /// The export of pages completed. + CompletedExport, - /// Attempt to service an individual message. Will return `Ok` with the execution weight - /// consumed unless the message was found to need more weight than `limit`. - /// - /// NOTE: This will return `Ok` in the case of an error decoding, weighing or executing - /// the message. This is why it's called message "servicing" rather than "execution". - pub(crate) fn try_service_message( - limit: Weight, - _sent_at: RelayBlockNumber, - mut data: &[u8], - ) -> Result { - let message_hash = sp_io::hashing::blake2_256(data); - let mut message_id = message_hash; - let maybe_msg = VersionedXcm::::decode_all_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data, - ) - .map(Xcm::::try_from); - match maybe_msg { - Err(_) => { - Self::deposit_event(Event::InvalidFormat { message_hash }); - Ok(Weight::zero()) - }, - Ok(Err(())) => { - Self::deposit_event(Event::UnsupportedVersion { message_hash }); - Ok(Weight::zero()) - }, - Ok(Ok(x)) => { - let outcome = T::XcmExecutor::prepare_and_execute( - Parent, - x, - &mut message_id, - limit, - Weight::zero(), - ); - match outcome { - Outcome::Error(XcmError::WeightLimitReached(required_weight)) => - Err(ServiceMessageError { message_hash, message_id, required_weight }), - outcome => { - let weight_used = outcome.weight_used(); - Self::deposit_event(Event::ExecutedDownward { - message_hash, - message_id, - outcome, - }); - Ok(weight_used) - }, - } - }, - } - } - } + /// The export of overweight messages started. + StartedOverweightExport, - /// For an incoming downward message, this just adapts an XCM executor and executes DMP messages - /// immediately up until some `MaxWeight` at which point it errors. Their origin is asserted to - /// be the `Parent` location. - impl DmpMessageHandler for Pallet { - fn handle_dmp_messages( - iter: impl Iterator)>, - limit: Weight, - ) -> Weight { - let mut messages_processed = 0; - let mut page_index = PageIndex::::get(); - let config = Configuration::::get(); - - // First try to use `max_weight` to service the current queue. - let mut used = Self::do_service_queue(limit, &mut page_index, &mut messages_processed); - - // Then if the queue is empty, use the weight remaining to service the incoming messages - // and once we run out of weight, place them in the queue. - let item_count = iter.size_hint().0; - let mut maybe_enqueue_page = if page_index.end_used > page_index.begin_used { - // queue is already non-empty - start a fresh page. - Some(Vec::with_capacity(item_count)) - } else { - None - }; - - for (i, (sent_at, data)) in iter.enumerate() { - if maybe_enqueue_page.is_none() { - if messages_processed >= MAX_MESSAGES_PER_BLOCK { - let item_count_left = item_count.saturating_sub(i); - maybe_enqueue_page = Some(Vec::with_capacity(item_count_left)); - - Self::deposit_event(Event::MaxMessagesExhausted { - message_hash: sp_io::hashing::blake2_256(&data), - }); - } else { - // We're not currently enqueuing - try to execute inline. - let remaining_weight = limit.saturating_sub(used); - messages_processed += 1; - match Self::try_service_message(remaining_weight, sent_at, &data[..]) { - Ok(consumed) => used += consumed, - Err(ServiceMessageError { - message_hash, - message_id, - required_weight, - }) => - // Too much weight required right now. - { - let is_under_limit = - Overweight::::count() < MAX_OVERWEIGHT_MESSAGES; - used.saturating_accrue(T::DbWeight::get().reads(1)); - if required_weight.any_gt(config.max_individual) && is_under_limit { - // overweight - add to overweight queue and continue with - // message execution. - let overweight_index = page_index.overweight_count; - Overweight::::insert(overweight_index, (sent_at, data)); - Self::deposit_event(Event::OverweightEnqueued { - message_hash, - message_id, - overweight_index, - required_weight, - }); - page_index.overweight_count += 1; - // Not needed for control flow, but only to ensure that the - // compiler understands that we won't attempt to re-use `data` - // later. - continue - } else { - // not overweight. stop executing inline and enqueue normally - // from here on. - let item_count_left = item_count.saturating_sub(i); - maybe_enqueue_page = Some(Vec::with_capacity(item_count_left)); - Self::deposit_event(Event::WeightExhausted { - message_hash, - message_id, - remaining_weight, - required_weight, - }); - } - }, - } - } - } - // Cannot be an `else` here since the `maybe_enqueue_page` may have changed. - if let Some(ref mut enqueue_page) = maybe_enqueue_page { - enqueue_page.push((sent_at, data)); - } - } - - // Deposit the enqueued page if any and save the index. - if let Some(enqueue_page) = maybe_enqueue_page { - Pages::::insert(page_index.end_used, enqueue_page); - page_index.end_used += 1; - } - PageIndex::::put(page_index); - - used - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate as dmp_queue; - - use codec::Encode; - use cumulus_primitives_core::ParaId; - use frame_support::{assert_noop, parameter_types, traits::OnIdle}; - use sp_core::H256; - use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, - DispatchError::BadOrigin, - }; - use sp_version::RuntimeVersion; - use std::cell::RefCell; - use xcm::latest::{MultiLocation, OriginKind}; - - type Block = frame_system::mocking::MockBlock; - type Xcm = xcm::latest::Xcm; - - frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - DmpQueue: dmp_queue::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_version::create_apis_vec!([]), - transaction_version: 1, - state_version: 1, - }; - pub const ParachainId: ParaId = ParaId::new(200); - pub const ReservedXcmpWeight: Weight = Weight::zero(); - pub const ReservedDmpWeight: Weight = Weight::zero(); - } - - type AccountId = u64; - - impl frame_system::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockLength = (); - type BlockWeights = (); - type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } + /// The export of an overweight message completed. + ExportedOverweight { index: OverweightIndex }, - thread_local! { - pub static TRACE: RefCell> = RefCell::new(Vec::new()); - } - pub fn take_trace() -> Vec<(Xcm, Outcome)> { - TRACE.with(|q| { - let q = &mut *q.borrow_mut(); - let r = q.clone(); - q.clear(); - r - }) - } - - pub struct MockPrepared(Xcm); - impl PreparedMessage for MockPrepared { - fn weight_of(&self) -> Weight { - match ((self.0).0.len(), &(self.0).0.first()) { - (1, Some(Transact { require_weight_at_most, .. })) => *require_weight_at_most, - _ => Weight::from_parts(1, 1), - } - } - } + /// The export of an overweight message failed. + /// + /// This should never be emitted. + ExportOverweightFailed { index: OverweightIndex }, - pub struct MockExec; - impl ExecuteXcm for MockExec { - type Prepared = MockPrepared; + /// The export of overweight messages completed. + CompletedOverweightExport, - fn prepare(message: Xcm) -> Result { - Ok(MockPrepared(message)) - } + /// The cleanup of remaining pallet storage started. + StartedCleanup, - fn execute( - _origin: impl Into, - prepared: MockPrepared, - _id: &mut XcmHash, - _weight_credit: Weight, - ) -> Outcome { - let message = prepared.0; - let o = match (message.0.len(), &message.0.first()) { - (1, Some(Transact { require_weight_at_most, .. })) => - Outcome::Complete(*require_weight_at_most), - // use 1000 to decide that it's not supported. - _ => Outcome::Incomplete(Weight::from_parts(1, 1), XcmError::Unimplemented), - }; - TRACE.with(|q| q.borrow_mut().push((message, o.clone()))); - o - } + /// Some debris was cleaned up. + CleanedSome { keys_removed: u32 }, - fn charge_fees(_location: impl Into, _fees: MultiAssets) -> XcmResult { - Err(XcmError::Unimplemented) - } + /// The cleanup of remaining pallet storage completed. + Completed { error: bool }, } - impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = MockExec; - type ExecuteOverweightOrigin = frame_system::EnsureRoot; - } - - pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::::default().build_storage().unwrap().into() - } + #[pallet::call] + impl Pallet {} - fn enqueue(enqueued: &[Xcm]) { - if !enqueued.is_empty() { - let mut index = PageIndex::::get(); - Pages::::insert( - index.end_used, - enqueued - .iter() - .map(|m| (0, VersionedXcm::::from(m.clone()).encode())) - .collect::>(), - ); - index.end_used += 1; - PageIndex::::put(index); + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + let w = Self::on_idle_weight(); + assert!(w != Weight::zero()); + assert!(w.all_lte(T::BlockWeights::get().max_block)); } - } - fn handle_messages(incoming: &[Xcm], limit: Weight) -> Weight { - let iter = incoming - .iter() - .map(|m| (0, VersionedXcm::::from(m.clone()).encode())); - DmpQueue::handle_dmp_messages(iter, limit) - } + fn on_idle(now: BlockNumberFor, limit: Weight) -> Weight { + let mut meter = WeightMeter::with_limit(limit); - fn msg(weight: u64) -> Xcm { - Xcm(vec![Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(weight, weight), - call: Vec::new().into(), - }]) - } - - fn msg_complete(weight: u64) -> (Xcm, Outcome) { - (msg(weight), Outcome::Complete(Weight::from_parts(weight, weight))) - } - - fn pages_queued() -> PageCounter { - PageIndex::::get().end_used - PageIndex::::get().begin_used - } - - fn queue_is_empty() -> bool { - pages_queued() == 0 - } - - fn overweights() -> Vec { - (0..PageIndex::::get().overweight_count) - .filter(|i| Overweight::::contains_key(i)) - .collect::>() - } - - #[test] - fn basic_setup_works() { - new_test_ext().execute_with(|| { - let weight_used = handle_messages(&[], Weight::from_parts(1000, 1000)); - assert_eq!(weight_used, Weight::zero()); - assert_eq!(take_trace(), Vec::new()); - assert!(queue_is_empty()); - }); - } + if meter.try_consume(Self::on_idle_weight()).is_err() { + log::debug!(target: LOG, "Not enough weight for on_idle. {} < {}", Self::on_idle_weight(), limit); + return meter.consumed() + } - #[test] - fn service_inline_complete_works() { - new_test_ext().execute_with(|| { - let incoming = vec![msg(1000), msg(1001)]; - let weight_used = handle_messages(&incoming, Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(2001, 2001)); - assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001)]); - assert!(queue_is_empty()); - }); - } + let state = MigrationStatus::::get(); + let index = PageIndex::::get(); + log::debug!(target: LOG, "on_idle: block={:?}, state={:?}, index={:?}", now, state, index); - #[test] - fn service_enqueued_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(1001), msg(1002)]; - enqueue(&enqueued); - let weight_used = handle_messages(&[], Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(2001, 2001)); - assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001),]); - }); - } + match state { + MigrationState::NotStarted => { + log::debug!(target: LOG, "Init export at page {}", index.begin_used); - #[test] - fn enqueue_works() { - new_test_ext().execute_with(|| { - let incoming = vec![msg(1000), msg(1001), msg(1002)]; - let weight_used = handle_messages(&incoming, Weight::from_parts(999, 999)); - assert_eq!(weight_used, Weight::zero()); - assert_eq!( - PageIndex::::get(), - PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0 } - ); - assert_eq!(Pages::::get(0).len(), 3); - assert_eq!(take_trace(), vec![]); - - let weight_used = handle_messages(&[], Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(2001, 2001)); - assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001)]); - - let weight_used = handle_messages(&[], Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(1002, 1002)); - assert_eq!(take_trace(), vec![msg_complete(1002)]); - assert!(queue_is_empty()); - }); - } + MigrationStatus::::put(MigrationState::StartedExport { + next_begin_used: index.begin_used, + }); + Self::deposit_event(Event::StartedExport); + }, + MigrationState::StartedExport { next_begin_used } => { + log::debug!(target: LOG, "Exporting page {}", next_begin_used); - #[test] - fn service_inline_then_enqueue_works() { - new_test_ext().execute_with(|| { - let incoming = vec![msg(1000), msg(1001), msg(1002)]; - let weight_used = handle_messages(&incoming, Weight::from_parts(1500, 1500)); - assert_eq!(weight_used, Weight::from_parts(1000, 1000)); - assert_eq!(pages_queued(), 1); - assert_eq!(Pages::::get(0).len(), 2); - assert_eq!(take_trace(), vec![msg_complete(1000)]); - - let weight_used = handle_messages(&[], Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(2003, 2003)); - assert_eq!(take_trace(), vec![msg_complete(1001), msg_complete(1002),]); - assert!(queue_is_empty()); - }); - } + if next_begin_used == index.end_used { + MigrationStatus::::put(MigrationState::CompletedExport); + log::debug!(target: LOG, "CompletedExport"); + Self::deposit_event(Event::CompletedExport); + } else { + let res = migration::migrate_page::(next_begin_used); - #[test] - fn service_enqueued_and_inline_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(1001)]; - let incoming = vec![msg(1002), msg(1003)]; - enqueue(&enqueued); - let weight_used = handle_messages(&incoming, Weight::from_parts(5000, 5000)); - assert_eq!(weight_used, Weight::from_parts(4006, 4006)); - assert_eq!( - take_trace(), - vec![ - msg_complete(1000), - msg_complete(1001), - msg_complete(1002), - msg_complete(1003), - ] - ); - assert!(queue_is_empty()); - }); - } + MigrationStatus::::put(MigrationState::StartedExport { + next_begin_used: next_begin_used.saturating_add(1), + }); - #[test] - fn service_enqueued_partially_and_then_enqueue_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(10001)]; - let incoming = vec![msg(1002), msg(1003)]; - enqueue(&enqueued); - let weight_used = handle_messages(&incoming, Weight::from_parts(5000, 5000)); - assert_eq!(weight_used, Weight::from_parts(1000, 1000)); - assert_eq!(take_trace(), vec![msg_complete(1000)]); - assert_eq!(pages_queued(), 2); - - // 5000 is not enough to process the 10001 blocker, so nothing happens. - let weight_used = handle_messages(&[], Weight::from_parts(5000, 5000)); - assert_eq!(weight_used, Weight::zero()); - assert_eq!(take_trace(), vec![]); - - // 20000 is now enough to process everything. - let weight_used = handle_messages(&[], Weight::from_parts(20000, 20000)); - assert_eq!(weight_used, Weight::from_parts(12006, 12006)); - assert_eq!( - take_trace(), - vec![msg_complete(10001), msg_complete(1002), msg_complete(1003),] - ); - assert!(queue_is_empty()); - }); - } + if let Ok(()) = res { + log::debug!(target: LOG, "Exported page {}", next_begin_used); + Self::deposit_event(Event::Exported { page: next_begin_used }); + } else { + Self::deposit_event(Event::ExportFailed { page: next_begin_used }); + } + } + }, + MigrationState::CompletedExport => { + log::debug!(target: LOG, "Init export overweight at index 0"); - #[test] - fn service_enqueued_completely_and_then_enqueue_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(1001)]; - let incoming = vec![msg(10002), msg(1003)]; - enqueue(&enqueued); - let weight_used = handle_messages(&incoming, Weight::from_parts(5000, 5000)); - assert_eq!(weight_used, Weight::from_parts(2001, 2001)); - assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001)]); - assert_eq!(pages_queued(), 1); - - // 20000 is now enough to process everything. - let weight_used = handle_messages(&[], Weight::from_parts(20000, 20000)); - assert_eq!(weight_used, Weight::from_parts(11005, 11005)); - assert_eq!(take_trace(), vec![msg_complete(10002), msg_complete(1003),]); - assert!(queue_is_empty()); - }); - } + MigrationStatus::::put(MigrationState::StartedOverweightExport { + next_overweight_index: 0, + }); + Self::deposit_event(Event::StartedOverweightExport); + }, + MigrationState::StartedOverweightExport { next_overweight_index } => { + log::debug!(target: LOG, "Exporting overweight index {}", next_overweight_index); - #[test] - fn service_enqueued_then_inline_then_enqueue_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(1001)]; - let incoming = vec![msg(1002), msg(10003)]; - enqueue(&enqueued); - let weight_used = handle_messages(&incoming, Weight::from_parts(5000, 5000)); - assert_eq!(weight_used, Weight::from_parts(3003, 3003)); - assert_eq!( - take_trace(), - vec![msg_complete(1000), msg_complete(1001), msg_complete(1002),] - ); - assert_eq!(pages_queued(), 1); - - // 20000 is now enough to process everything. - let weight_used = handle_messages(&[], Weight::from_parts(20000, 20000)); - assert_eq!(weight_used, Weight::from_parts(10003, 10003)); - assert_eq!(take_trace(), vec![msg_complete(10003),]); - assert!(queue_is_empty()); - }); - } + if next_overweight_index == index.overweight_count { + MigrationStatus::::put(MigrationState::CompletedOverweightExport); + log::debug!(target: LOG, "CompletedOverweightExport"); + Self::deposit_event(Event::CompletedOverweightExport); + } else { + let res = migration::migrate_overweight::(next_overweight_index); - #[test] - fn page_crawling_works() { - new_test_ext().execute_with(|| { - let enqueued = vec![msg(1000), msg(1001)]; - enqueue(&enqueued); - let weight_used = handle_messages(&[msg(1002)], Weight::from_parts(1500, 1500)); - assert_eq!(weight_used, Weight::from_parts(1000, 1000)); - assert_eq!(take_trace(), vec![msg_complete(1000)]); - assert_eq!(pages_queued(), 2); - assert_eq!(PageIndex::::get().begin_used, 0); - - let weight_used = handle_messages(&[msg(1003)], Weight::from_parts(1500, 1500)); - assert_eq!(weight_used, Weight::from_parts(1001, 1001)); - assert_eq!(take_trace(), vec![msg_complete(1001)]); - assert_eq!(pages_queued(), 2); - assert_eq!(PageIndex::::get().begin_used, 1); - - let weight_used = handle_messages(&[msg(1004)], Weight::from_parts(1500, 1500)); - assert_eq!(weight_used, Weight::from_parts(1002, 1002)); - assert_eq!(take_trace(), vec![msg_complete(1002)]); - assert_eq!(pages_queued(), 2); - assert_eq!(PageIndex::::get().begin_used, 2); - }); - } + MigrationStatus::::put(MigrationState::StartedOverweightExport { + next_overweight_index: next_overweight_index.saturating_add(1), + }); - #[test] - fn overweight_should_not_block_queue() { - new_test_ext().execute_with(|| { - // Set the overweight threshold to 9999. - Configuration::::put(ConfigData { - max_individual: Weight::from_parts(9999, 9999), - }); - - let incoming = vec![msg(1000), msg(10001), msg(1002)]; - let weight_used = handle_messages(&incoming, Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::from_parts(2002, 2002)); - assert!(queue_is_empty()); - assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1002),]); - - assert_eq!(overweights(), vec![0]); - }); - } + if let Ok(()) = res { + log::debug!(target: LOG, "Exported overweight index {next_overweight_index}"); + Self::deposit_event(Event::ExportedOverweight { + index: next_overweight_index, + }); + } else { + Self::deposit_event(Event::ExportOverweightFailed { + index: next_overweight_index, + }); + } + } + }, + MigrationState::CompletedOverweightExport => { + log::debug!(target: LOG, "Init cleanup"); - #[test] - fn overweights_should_be_manually_executable() { - new_test_ext().execute_with(|| { - // Set the overweight threshold to 9999. - Configuration::::put(ConfigData { - max_individual: Weight::from_parts(9999, 9999), - }); - - let incoming = vec![msg(10000)]; - let weight_used = handle_messages(&incoming, Weight::from_parts(2500, 2500)); - assert_eq!(weight_used, Weight::zero()); - assert_eq!(take_trace(), vec![]); - assert_eq!(overweights(), vec![0]); - - assert_noop!( - DmpQueue::service_overweight( - RuntimeOrigin::signed(1), - 0, - Weight::from_parts(20000, 20000) - ), - BadOrigin - ); - assert_noop!( - DmpQueue::service_overweight( - RuntimeOrigin::root(), - 1, - Weight::from_parts(20000, 20000) - ), - Error::::Unknown - ); - assert_noop!( - DmpQueue::service_overweight( - RuntimeOrigin::root(), - 0, - Weight::from_parts(9999, 9999) - ), - Error::::OverLimit - ); - assert_eq!(take_trace(), vec![]); - - let base_weight = - super::Call::::service_overweight { index: 0, weight_limit: Weight::zero() } - .get_dispatch_info() - .weight; - use frame_support::dispatch::GetDispatchInfo; - let info = DmpQueue::service_overweight( - RuntimeOrigin::root(), - 0, - Weight::from_parts(20000, 20000), - ) - .unwrap(); - let actual_weight = info.actual_weight.unwrap(); - assert_eq!(actual_weight, base_weight + Weight::from_parts(10000, 10000)); - assert_eq!(take_trace(), vec![msg_complete(10000)]); - assert!(overweights().is_empty()); - - assert_noop!( - DmpQueue::service_overweight( - RuntimeOrigin::root(), - 0, - Weight::from_parts(20000, 20000) - ), - Error::::Unknown - ); - }); - } + MigrationStatus::::put(MigrationState::StartedCleanup { cursor: None }); + Self::deposit_event(Event::StartedCleanup); + }, + MigrationState::StartedCleanup { cursor } => { + log::debug!(target: LOG, "Cleaning up"); + let hashed_prefix = + twox_128( as PalletInfoAccess>::name().as_bytes()); + + let result = frame_support::storage::unhashed::clear_prefix( + &hashed_prefix, + Some(2), // Somehow it does nothing when set to 1, so we set it to 2. + cursor.as_ref().map(|c| c.as_ref()), + ); + Self::deposit_event(Event::CleanedSome { keys_removed: result.backend }); + + // GOTCHA! We deleted *all* pallet storage; hence we also our own + // `MigrationState`. BUT we insert it back: + if let Some(unbound_cursor) = result.maybe_cursor { + if let Ok(cursor) = unbound_cursor.try_into() { + log::debug!(target: LOG, "Next cursor: {:?}", &cursor); + MigrationStatus::::put(MigrationState::StartedCleanup { + cursor: Some(cursor), + }); + } else { + MigrationStatus::::put(MigrationState::Completed); + log::error!(target: LOG, "Completed with error: could not bound cursor"); + Self::deposit_event(Event::Completed { error: true }); + } + } else { + MigrationStatus::::put(MigrationState::Completed); + log::debug!(target: LOG, "Completed"); + Self::deposit_event(Event::Completed { error: false }); + } + }, + MigrationState::Completed => { + log::debug!(target: LOG, "Idle; you can remove this pallet"); + }, + } - #[test] - fn on_idle_should_service_queue() { - new_test_ext().execute_with(|| { - enqueue(&[msg(1000), msg(1001)]); - enqueue(&[msg(1002), msg(1003)]); - enqueue(&[msg(1004), msg(1005)]); - - let weight_used = DmpQueue::on_idle(1, Weight::from_parts(6000, 6000)); - assert_eq!(weight_used, Weight::from_parts(5010, 5010)); - assert_eq!( - take_trace(), - vec![ - msg_complete(1000), - msg_complete(1001), - msg_complete(1002), - msg_complete(1003), - msg_complete(1004), - ] - ); - assert_eq!(pages_queued(), 1); - }); + meter.consumed() + } } - #[test] - fn handle_max_messages_per_block() { - new_test_ext().execute_with(|| { - enqueue(&[msg(1000), msg(1001)]); - enqueue(&[msg(1002), msg(1003)]); - enqueue(&[msg(1004), msg(1005)]); - - let incoming = - (0..MAX_MESSAGES_PER_BLOCK).map(|i| msg(1006 + i as u64)).collect::>(); - handle_messages(&incoming, Weight::from_parts(25000, 25000)); - - assert_eq!( - take_trace(), - (0..MAX_MESSAGES_PER_BLOCK) - .map(|i| msg_complete(1000 + i as u64)) - .collect::>(), - ); - assert_eq!(pages_queued(), 1); - - handle_messages(&[], Weight::from_parts(25000, 25000)); - assert_eq!( - take_trace(), - (MAX_MESSAGES_PER_BLOCK..MAX_MESSAGES_PER_BLOCK + 6) - .map(|i| msg_complete(1000 + i as u64)) - .collect::>(), - ); - }); + impl Pallet { + /// The worst-case weight of [`Self::on_idle`]. + pub fn on_idle_weight() -> Weight { + ::WeightInfo::on_idle_good_msg() + .max(::WeightInfo::on_idle_large_msg()) + } } } diff --git a/cumulus/pallets/dmp-queue/src/migration.rs b/cumulus/pallets/dmp-queue/src/migration.rs index 63457ee493697cc7e0b6cb32433cd0d55b9e2244..349635cce547d81c1be527cb366f67679a471945 100644 --- a/cumulus/pallets/dmp-queue/src/migration.rs +++ b/cumulus/pallets/dmp-queue/src/migration.rs @@ -14,110 +14,100 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A module that is responsible for migration of storage. - -use crate::{Config, Configuration, Overweight, Pallet, DEFAULT_POV_SIZE}; -use frame_support::{ - pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, - weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, -}; - -/// The current storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); - -/// Migrates the pallet storage to the most recent version. -pub struct Migration(PhantomData); - -impl OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - if StorageVersion::get::>() == 0 { - weight.saturating_accrue(migrate_to_v1::()); - StorageVersion::new(1).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - if StorageVersion::get::>() == 1 { - weight.saturating_accrue(migrate_to_v2::()); - StorageVersion::new(2).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - weight - } +//! Migrates the storage from the previously deleted DMP pallet. + +use crate::*; +use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber; +use frame_support::{pallet_prelude::*, storage_alias, traits::HandleMessage}; +use sp_std::vec::Vec; + +pub(crate) const LOG: &str = "runtime::dmp-queue-export-xcms"; + +/// The old `PageIndexData` struct. +#[derive(Copy, Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct PageIndexData { + /// The lowest used page index. + pub begin_used: PageCounter, + /// The lowest unused page index. + pub end_used: PageCounter, + /// The number of overweight messages ever recorded (and thus the lowest free index). + pub overweight_count: OverweightIndex, } -mod v0 { +/// The old `MigrationState` type. +pub type OverweightIndex = u64; +/// The old `MigrationState` type. +pub type PageCounter = u32; + +/// The old `PageIndex` storage item. +#[storage_alias] +pub type PageIndex = StorageValue, PageIndexData, ValueQuery>; + +/// The old `Pages` storage item. +#[storage_alias] +pub type Pages = StorageMap< + Pallet, + Blake2_128Concat, + PageCounter, + Vec<(RelayBlockNumber, Vec)>, + ValueQuery, +>; + +/// The old `Overweight` storage item. +#[storage_alias] +pub type Overweight = CountedStorageMap< + Pallet, + Blake2_128Concat, + OverweightIndex, + (RelayBlockNumber, Vec), + OptionQuery, +>; + +pub(crate) mod testing_only { use super::*; - use codec::{Decode, Encode}; - - #[derive(Decode, Encode, Debug)] - pub struct ConfigData { - pub max_individual: u64, - } - impl Default for ConfigData { - fn default() -> Self { - ConfigData { max_individual: 10u64 * WEIGHT_REF_TIME_PER_MILLIS } - } - } + /// This alias is not used by the migration but only for testing. + /// + /// Note that the alias type is wrong on purpose. + #[storage_alias] + pub type Configuration = StorageValue, u32>; } -/// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with -/// 2D weights). -/// -/// NOTE: Only use this function if you know what you're doing. Default to using -/// `migrate_to_latest`. -pub fn migrate_to_v1() -> Weight { - let translate = |pre: v0::ConfigData| -> super::ConfigData { - super::ConfigData { - max_individual: Weight::from_parts(pre.max_individual, DEFAULT_POV_SIZE), - } - }; - - if Configuration::::translate(|pre| pre.map(translate)).is_err() { - log::error!( - target: "dmp_queue", - "unexpected error when performing translation of the QueueConfig type during storage upgrade to v2" - ); +/// Migrates a single page to the `DmpSink`. +pub(crate) fn migrate_page(p: PageCounter) -> Result<(), ()> { + let page = Pages::::take(p); + log::debug!(target: LOG, "Migrating page #{p} with {} messages ...", page.len()); + if page.is_empty() { + log::error!(target: LOG, "Page #{p}: EMPTY - storage corrupted?"); + return Err(()) } - T::DbWeight::get().reads_writes(1, 1) -} + for (m, (block, msg)) in page.iter().enumerate() { + let Ok(bound) = BoundedVec::::try_from(msg.clone()) else { + log::error!(target: LOG, "[Page {p}] Message #{m}: TOO LONG - dropping"); + continue + }; -/// Migrates `Overweight` so that it initializes the storage map's counter. -/// -/// NOTE: Only use this function if you know what you're doing. Default to using -/// `migrate_to_latest`. -pub fn migrate_to_v2() -> Weight { - let overweight_messages = Overweight::::initialize_counter() as u64; + T::DmpSink::handle_message(bound.as_bounded_slice()); + log::debug!(target: LOG, "[Page {p}] Migrated message #{m} from block {block}"); + } - T::DbWeight::get().reads_writes(overweight_messages, 1) + Ok(()) } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - - #[test] - fn test_migration_to_v1() { - let v0 = v0::ConfigData { max_individual: 30_000_000_000 }; - - new_test_ext().execute_with(|| { - frame_support::storage::unhashed::put_raw( - &crate::Configuration::::hashed_key(), - &v0.encode(), - ); - - migrate_to_v1::(); +/// Migrates a single overweight message to the `DmpSink`. +pub(crate) fn migrate_overweight(i: OverweightIndex) -> Result<(), ()> { + let Some((block, msg)) = Overweight::::take(i) else { + log::error!(target: LOG, "[Overweight {i}] Message: EMPTY - storage corrupted?"); + return Err(()) + }; + let Ok(bound) = BoundedVec::::try_from(msg) else { + log::error!(target: LOG, "[Overweight {i}] Message: TOO LONG - dropping"); + return Err(()) + }; - let v1 = crate::Configuration::::get(); + T::DmpSink::handle_message(bound.as_bounded_slice()); + log::debug!(target: LOG, "[Overweight {i}] Migrated message from block {block}"); - assert_eq!(v0.max_individual, v1.max_individual.ref_time()); - assert_eq!(v1.max_individual.proof_size(), DEFAULT_POV_SIZE); - }); - } + Ok(()) } diff --git a/cumulus/pallets/dmp-queue/src/mock.rs b/cumulus/pallets/dmp-queue/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..6bd74a047eb7459b309705d5f4d5a7d2e1e775f4 --- /dev/null +++ b/cumulus/pallets/dmp-queue/src/mock.rs @@ -0,0 +1,83 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use frame_support::{ + derive_impl, parameter_types, + traits::{HandleMessage, QueueFootprint}, +}; +use sp_core::{bounded_vec::BoundedSlice, ConstU32}; +use sp_runtime::traits::IdentityLookup; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + DmpQueue: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; +} + +impl crate::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type DmpSink = RecordingDmpSink; + type WeightInfo = (); +} + +parameter_types! { + /// All messages that came into the `DmpSink`. + pub static RecordedMessages: Vec> = vec![]; +} + +/// Can be used as [`Config::DmpSink`] to record all messages that came in. +pub struct RecordingDmpSink; +impl HandleMessage for RecordingDmpSink { + type MaxMessageLen = ConstU32<16>; + + fn handle_message(msg: BoundedSlice) { + RecordedMessages::mutate(|n| n.push(msg.to_vec())); + } + + fn handle_messages<'a>(_: impl Iterator>) { + unimplemented!() + } + + fn sweep_queue() { + unimplemented!() + } + + fn footprint() -> QueueFootprint { + unimplemented!() + } +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_io::TestExternalities::new(Default::default()) +} diff --git a/cumulus/pallets/dmp-queue/src/tests.rs b/cumulus/pallets/dmp-queue/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..a157d0584f2550bb9cacac730a0e942e3c91503e --- /dev/null +++ b/cumulus/pallets/dmp-queue/src/tests.rs @@ -0,0 +1,234 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Test the lazy migration. + +#![cfg(test)] + +use super::{migration::*, mock::*}; +use crate::*; + +use frame_support::{ + pallet_prelude::*, + traits::{OnFinalize, OnIdle, OnInitialize}, + StorageNoopGuard, +}; + +#[test] +fn migration_works() { + let mut ext = new_test_ext(); + ext.execute_with(|| { + sp_tracing::try_init_simple(); + // Insert some storage: + PageIndex::::set(PageIndexData { + begin_used: 10, + end_used: 20, + overweight_count: 5, + }); + for p in 10..20 { + let msgs = (0..16).map(|i| (p, vec![i as u8; 1])).collect::>(); + Pages::::insert(p, msgs); + } + for i in 0..5 { + Overweight::::insert(i, (0, vec![i as u8; 1])); + } + testing_only::Configuration::::put(123); + }); + // We need to commit, otherwise the keys are removed from the overlay; not the backend. + ext.commit_all().unwrap(); + ext.execute_with(|| { + // Run one step of the migration: + pre_upgrade_checks::(); + run_to_block(1); + // First we expect a StartedExport event: + assert_only_event(Event::StartedExport); + + // Then we expect 10 Exported events: + for page in 0..10 { + run_to_block(2 + page); + assert_only_event(Event::Exported { page: page as u32 + 10 }); + assert!(!Pages::::contains_key(page as u32), "Page is gone"); + assert_eq!( + MigrationStatus::::get(), + MigrationState::StartedExport { next_begin_used: page as u32 + 11 } + ); + } + + // Then we expect a CompletedExport event: + run_to_block(12); + assert_only_event(Event::CompletedExport); + assert_eq!(MigrationStatus::::get(), MigrationState::CompletedExport); + + // Then we expect a StartedOverweightExport event: + run_to_block(13); + assert_only_event(Event::StartedOverweightExport); + assert_eq!( + MigrationStatus::::get(), + MigrationState::StartedOverweightExport { next_overweight_index: 0 } + ); + + // Then we expect 5 ExportedOverweight events: + for index in 0..5 { + run_to_block(14 + index); + assert_only_event(Event::ExportedOverweight { index }); + assert!(!Overweight::::contains_key(index), "Overweight msg is gone"); + assert_eq!( + MigrationStatus::::get(), + MigrationState::StartedOverweightExport { next_overweight_index: index + 1 } + ); + } + + // Then we expect a CompletedOverweightExport event: + run_to_block(19); + assert_only_event(Event::CompletedOverweightExport); + assert_eq!(MigrationStatus::::get(), MigrationState::CompletedOverweightExport); + + // Then we expect a StartedCleanup event: + run_to_block(20); + assert_only_event(Event::StartedCleanup); + assert_eq!( + MigrationStatus::::get(), + MigrationState::StartedCleanup { cursor: None } + ); + }); + ext.commit_all().unwrap(); + // Then it cleans up the remaining storage items: + ext.execute_with(|| { + run_to_block(21); + assert_only_event(Event::CleanedSome { keys_removed: 2 }); + }); + ext.commit_all().unwrap(); + ext.execute_with(|| { + run_to_block(22); + assert_only_event(Event::CleanedSome { keys_removed: 2 }); + }); + ext.commit_all().unwrap(); + ext.execute_with(|| { + run_to_block(24); + assert_eq!( + System::events().into_iter().map(|e| e.event).collect::>(), + vec![ + Event::CleanedSome { keys_removed: 2 }.into(), + Event::Completed { error: false }.into() + ] + ); + System::reset_events(); + assert_eq!(MigrationStatus::::get(), MigrationState::Completed); + + post_upgrade_checks::(); + assert_eq!(RecordedMessages::take().len(), 10 * 16 + 5); + + // Test the storage removal: + assert!(!PageIndex::::exists()); + assert!(!testing_only::Configuration::::exists()); + assert_eq!(Pages::::iter_keys().count(), 0); + assert_eq!(Overweight::::iter_keys().count(), 0); + + // The `MigrationStatus` never disappears and there are no more storage changes: + { + let _g = StorageNoopGuard::default(); + + run_to_block(100); + assert_eq!(MigrationStatus::::get(), MigrationState::Completed); + assert!(System::events().is_empty()); + // ... besides the block number + System::set_block_number(24); + } + }); +} + +/// Too long messages are dropped by the migration. +#[test] +fn migration_too_long_ignored() { + new_test_ext().execute_with(|| { + // Setup the storage: + PageIndex::::set(PageIndexData { + begin_used: 10, + end_used: 11, + overweight_count: 2, + }); + + let short = vec![1; 16]; + let long = vec![0; 17]; + Pages::::insert(10, vec![(10, short.clone()), (10, long.clone())]); + // Insert one good and one bad overweight msg: + Overweight::::insert(0, (0, short.clone())); + Overweight::::insert(1, (0, long.clone())); + + // Run the migration: + pre_upgrade_checks::(); + run_to_block(100); + post_upgrade_checks::(); + + assert_eq!(RecordedMessages::take(), vec![short.clone(), short]); + + // Test the storage removal: + assert!(!PageIndex::::exists()); + assert_eq!(Pages::::iter_keys().count(), 0); + assert_eq!(Overweight::::iter_keys().count(), 0); + }); +} + +fn run_to_block(n: u64) { + assert!(n > System::block_number(), "Cannot go back in time"); + + while System::block_number() < n { + AllPalletsWithSystem::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + AllPalletsWithSystem::on_initialize(System::block_number()); + AllPalletsWithSystem::on_idle(System::block_number(), Weight::MAX); + } +} + +fn assert_only_event(e: Event) { + assert_eq!(System::events().pop().expect("Event expected").event, e.clone().into()); + assert_eq!(System::events().len(), 1, "Got events: {:?} but wanted {:?}", System::events(), e); + System::reset_events(); +} + +/// TESTING ONLY +fn pre_upgrade_checks() { + let index = PageIndex::::get(); + + // Check that all pages are present. + assert!(index.begin_used <= index.end_used, "Invalid page index"); + for p in index.begin_used..index.end_used { + assert!(Pages::::contains_key(p), "Missing page"); + assert!(Pages::::get(p).len() > 0, "Empty page"); + } + + // Check that all overweight messages are present. + for i in 0..index.overweight_count { + assert!(Overweight::::contains_key(i), "Missing overweight message"); + } +} + +/// TESTING ONLY +fn post_upgrade_checks() { + let index = PageIndex::::get(); + + // Check that all pages are removed. + for p in index.begin_used..index.end_used { + assert!(!Pages::::contains_key(p), "Page should be gone"); + } + assert!(Pages::::iter_keys().next().is_none(), "Un-indexed pages"); + + // Check that all overweight messages are removed. + for i in 0..index.overweight_count { + assert!(!Overweight::::contains_key(i), "Overweight message should be gone"); + } + assert!(Overweight::::iter_keys().next().is_none(), "Un-indexed overweight messages"); +} diff --git a/cumulus/pallets/dmp-queue/src/weights.rs b/cumulus/pallets/dmp-queue/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..e095d46a0739dd251a6bafef77c37374e6dc4f64 --- /dev/null +++ b/cumulus/pallets/dmp-queue/src/weights.rs @@ -0,0 +1,222 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-dmp-queue +// --chain +// asset-hub-kusama-dev +// --output +// cumulus/pallets/dmp-queue/src/weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `cumulus_pallet_dmp_queue`. +pub trait WeightInfo { + fn on_idle_good_msg() -> Weight; + fn on_idle_large_msg() -> Weight; + fn on_idle_overweight_good_msg() -> Weight; + fn on_idle_overweight_large_msg() -> Weight; +} + +/// Weights for `cumulus_pallet_dmp_queue` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(3)) + } +} diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 5470dce47480e3d5f3287ec078f886eb0302aaa9..5600c95a2a6058dba008a9a28cff7c53b02ed4cd 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" +license = "Apache-2.0" [dependencies] bytes = { version = "1.4.0", default-features = false } @@ -12,11 +13,13 @@ environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { version = "0.4.20", default-features = false } trie-db = { version = "0.28.0", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} +pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false} sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} @@ -29,6 +32,7 @@ sp-version = { path = "../../../substrate/primitives/version", default-features # Polkadot polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = [ "wasm-api" ]} +polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} # Cumulus @@ -40,6 +44,8 @@ cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inh assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" +rand = "0.8.5" +futures = "0.3.28" # Substrate sc-client-api = { path = "../../../substrate/client/api" } @@ -54,15 +60,19 @@ cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } [features] default = [ "std" ] std = [ + "bytes/std", "codec/std", "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", "environmental/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", + "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", "sp-externalities/std", @@ -71,21 +81,29 @@ std = [ "sp-runtime/std", "sp-state-machine/std", "sp-std/std", + "sp-tracing/std", "sp-trie/std", "trie-db/std", "xcm/std", ] runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-test-client/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-message-queue/try-runtime", + "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index cb5d9904c7cf3c37e4a0d8fe13fc175888cad20d..6accfa92c572c57193cbe975d731664b26d623aa 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" +license = "Apache-2.0" [lib] proc-macro = true diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cde8eb5b78857618d7e6a44f777fe7f19305929 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for the parachain-system pallet. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use cumulus_primitives_core::relay_chain::Hash as RelayHash; +use frame_benchmarking::v2::*; +use sp_runtime::traits::BlakeTwo256; + +#[benchmarks] +mod benchmarks { + use super::*; + + /// Enqueue `n` messages via `enqueue_inbound_downward_messages`. + /// + /// The limit is set to `1000` for benchmarking purposes as the actual limit is only known at + /// runtime. However, the limit (and default) for Dotsama are magnitudes smaller. + #[benchmark] + fn enqueue_inbound_downward_messages(n: Linear<0, 1000>) { + let msg = InboundDownwardMessage { + sent_at: n, // The block number does not matter. + msg: vec![0u8; MaxDmpMessageLenOf::::get() as usize], + }; + let msgs = vec![msg; n as usize]; + let head = mqp_head(&msgs); + + #[block] + { + Pallet::::enqueue_inbound_downward_messages(head, msgs); + } + + assert_eq!(ProcessedDownwardMessages::::get(), n); + assert_eq!(LastDmqMqcHead::::get().head(), head); + } + + /// Re-implements an easy version of the `MessageQueueChain` for testing purposes. + fn mqp_head(msgs: &Vec) -> RelayHash { + let mut head = Default::default(); + for msg in msgs.iter() { + let msg_hash = BlakeTwo256::hash_of(&msg.msg); + head = BlakeTwo256::hash_of(&(head, msg.sent_at, msg_hash)); + } + head + } + + impl_benchmark_test_suite! { + Pallet, + crate::mock::new_test_ext(), + crate::mock::Test + } +} diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index eaf15768e290de18e7d7c22cb9ca8abec9afa53d..bac1ee28a7ca3abe17bea66ad3bac4830d62bdc1 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -29,22 +29,23 @@ use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_primitives_core::{ - relay_chain, AbridgedHostConfiguration, ChannelStatus, CollationInfo, DmpMessageHandler, + relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData}; use frame_support::{ + defensive, dispatch::{DispatchResult, Pays, PostDispatchInfo}, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, - storage, - traits::Get, + traits::{Get, HandleMessage}, weights::Weight, }; use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor}; use polkadot_parachain_primitives::primitives::RelayChainBlockNumber; +use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; use sp_runtime::{ traits::{Block as BlockT, BlockNumberProvider, Hash}, @@ -52,15 +53,20 @@ use sp_runtime::{ InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity, ValidTransaction, }, - DispatchError, RuntimeDebug, + BoundedSlice, DispatchError, FixedU128, RuntimeDebug, Saturating, }; use sp_std::{cmp, collections::btree_map::BTreeMap, prelude::*}; use xcm::latest::XcmHash; +mod benchmarking; pub mod migration; - +mod mock; #[cfg(test)] mod tests; +pub mod weights; + +pub use weights::WeightInfo; + mod unincluded_segment; pub mod consensus_hook; @@ -177,6 +183,23 @@ where check_version: bool, } +/// The max length of a DMP message. +pub type MaxDmpMessageLenOf = <::DmpQueue as HandleMessage>::MaxMessageLen; + +pub mod ump_constants { + use super::FixedU128; + + /// `host_config.max_upward_queue_size / THRESHOLD_FACTOR` is the threshold after which delivery + /// starts getting exponentially more expensive. + /// `2` means the price starts to increase when queue is half full. + pub const THRESHOLD_FACTOR: u32 = 2; + /// The base number the delivery fee factor gets multiplied by every time it is increased. + /// Also the number it gets divided by when decreased. + pub const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); // 1.05 + /// The base number message size in KB is multiplied by before increasing the fee factor. + pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001 +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -202,17 +225,18 @@ pub mod pallet { /// The place where outbound XCMP messages come from. This is queried in `finalize_block`. type OutboundXcmpMessageSource: XcmpMessageSource; - /// The message handler that will be invoked when messages are received via DMP. - type DmpMessageHandler: DmpMessageHandler; + /// Queues inbound downward messages for delayed processing. + /// + /// All inbound DMP messages from the relay are pushed into this. The handler is expected to + /// eventually process all the messages that are pushed to it. + type DmpQueue: HandleMessage; /// The weight we reserve at the beginning of the block for processing DMP messages. type ReservedDmpWeight: Get; /// The message handler that will be invoked when messages are received via XCMP. /// - /// The messages are dispatched in the order they were relayed by the relay chain. If - /// multiple messages were relayed at one block, these will be dispatched in ascending - /// order of the sender's para ID. + /// This should normally link to the XCMP Queue pallet. type XcmpMessageHandler: XcmpMessageHandler; /// The weight we reserve at the beginning of the block for processing XCMP messages. @@ -221,6 +245,9 @@ pub mod pallet { /// Something that can check the associated relay parent block number. type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber; + /// Weight info for functions and calls. + type WeightInfo: WeightInfo; + /// An entry-point for higher-level logic to manage the backlog of unincluded parachain /// blocks and authorship rights for those blocks. /// @@ -240,6 +267,10 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { + /// Handles actually sending upward messages by moving them from `PendingUpwardMessages` to + /// `UpwardMessages`. Decreases the delivery fee factor if after sending messages, the queue + /// total size is less than the threshold (see [`ump_constants::THRESHOLD_FACTOR`]). + /// Also does the sending for HRMP messages it takes from `OutboundXcmpMessageSource`. fn on_finalize(_: BlockNumberFor) { >::kill(); >::kill(); @@ -326,6 +357,17 @@ pub mod pallet { UpwardMessages::::put(&up[..num as usize]); *up = up.split_off(num as usize); + // If the total size of the pending messages is less than the threshold, + // we decrease the fee factor, since the queue is less congested. + // This makes delivery of new messages cheaper. + let threshold = host_config + .max_upward_queue_size + .saturating_div(ump_constants::THRESHOLD_FACTOR); + let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum(); + if remaining_total_size <= threshold as usize { + Self::decrease_fee_factor(()); + } + (num, total_size) }); @@ -568,7 +610,7 @@ pub mod pallet { ); let validation_code = >::take(); - Self::put_parachain_code(&validation_code); + frame_system::Pallet::::update_code_in_storage(&validation_code); ::on_validation_code_applied(); Self::deposit_event(Event::ValidationFunctionApplied { relay_chain_block_num: vfp.relay_parent_number, @@ -602,15 +644,15 @@ pub mod pallet { ::on_validation_data(&vfp); - total_weight += Self::process_inbound_downward_messages( + total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( relevant_messaging_state.dmq_mqc_head, downward_messages, - ); - total_weight += Self::process_inbound_horizontal_messages( + )); + total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages( &relevant_messaging_state.ingress_channels, horizontal_messages, vfp.relay_parent_number, - ); + )); Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No }) } @@ -721,7 +763,7 @@ pub mod pallet { StorageValue<_, Vec>, ValueQuery>; /// Storage field that keeps track of bandwidth used by the unincluded segment along with the - /// latest the latest HRMP watermark. Used for limiting the acceptance of new blocks with + /// latest HRMP watermark. Used for limiting the acceptance of new blocks with /// respect to relay chain constraints. #[pallet::storage] pub(super) type AggregatedUnincludedSegment = @@ -860,6 +902,17 @@ pub mod pallet { pub(super) type PendingUpwardMessages = StorageValue<_, Vec, ValueQuery>; + /// Initialization value for the delivery fee factor for UMP. + #[pallet::type_value] + pub fn UpwardInitialDeliveryFeeFactor() -> FixedU128 { + FixedU128::from_u32(1) + } + + /// The factor to multiply the base delivery fee by for UMP. + #[pallet::storage] + pub(super) type UpwardDeliveryFeeFactor = + StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>; + /// The number of HRMP messages we observed in `on_initialize` and thus used that number for /// announcing the weight of `on_initialize` and `on_finalize`. #[pallet::storage] @@ -976,6 +1029,31 @@ impl Pallet { } } +impl FeeTracker for Pallet { + type Id = (); + + fn get_fee_factor(_: Self::Id) -> FixedU128 { + UpwardDeliveryFeeFactor::::get() + } + + fn increase_fee_factor(_: Self::Id, message_size_factor: FixedU128) -> FixedU128 { + >::mutate(|f| { + *f = f.saturating_mul( + ump_constants::EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor), + ); + *f + }) + } + + fn decrease_fee_factor(_: Self::Id) -> FixedU128 { + >::mutate(|f| { + *f = + UpwardInitialDeliveryFeeFactor::get().max(*f / ump_constants::EXPONENTIAL_FEE_BASE); + *f + }) + } +} + impl GetChannelInfo for Pallet { fn get_channel_status(id: ParaId) -> ChannelStatus { // Note, that we are using `relevant_messaging_state` which may be from the previous @@ -1019,10 +1097,17 @@ impl GetChannelInfo for Pallet { ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize) } - fn get_channel_max(id: ParaId) -> Option { + fn get_channel_info(id: ParaId) -> Option { let channels = Self::relevant_messaging_state()?.egress_channels; let index = channels.binary_search_by_key(&id, |item| item.0).ok()?; - Some(channels[index].1.max_message_size as usize) + let info = ChannelInfo { + max_capacity: channels[index].1.max_capacity, + max_total_size: channels[index].1.max_total_size, + max_message_size: channels[index].1.max_message_size, + msg_count: channels[index].1.msg_count, + total_size: channels[index].1.total_size, + }; + Some(info) } } @@ -1058,7 +1143,7 @@ impl Pallet { // inherent. } - /// Process all inbound downward messages relayed by the collator. + /// Enqueue all inbound downward messages relayed by the collator into the MQ pallet. /// /// Checks if the sequence of the messages is valid, dispatches them and communicates the /// number of processed messages to the collator via a storage update. @@ -1067,26 +1152,33 @@ impl Pallet { /// /// If it turns out that after processing all messages the Message Queue Chain /// hash doesn't match the expected. - fn process_inbound_downward_messages( + fn enqueue_inbound_downward_messages( expected_dmq_mqc_head: relay_chain::Hash, downward_messages: Vec, ) -> Weight { let dm_count = downward_messages.len() as u32; let mut dmq_head = >::get(); - let mut weight_used = Weight::zero(); + let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(dm_count); if dm_count != 0 { Self::deposit_event(Event::DownwardMessagesReceived { count: dm_count }); - let max_weight = - >::get().unwrap_or_else(T::ReservedDmpWeight::get); - - let message_iter = downward_messages - .into_iter() - .inspect(|m| { - dmq_head.extend_downward(m); - }) - .map(|m| (m.sent_at, m.msg)); - weight_used += T::DmpMessageHandler::handle_dmp_messages(message_iter, max_weight); + + // Eagerly update the MQC head hash: + for m in &downward_messages { + dmq_head.extend_downward(m); + } + let bounded = downward_messages + .iter() + // Note: we are not using `.defensive()` here since that prints the whole value to + // console. In case that the message is too long, this clogs up the log quite badly. + .filter_map(|m| match BoundedSlice::try_from(&m.msg[..]) { + Ok(bounded) => Some(bounded), + Err(_) => { + defensive!("Inbound Downward message was too long; dropping"); + None + }, + }); + T::DmpQueue::handle_messages(bounded); >::put(&dmq_head); Self::deposit_event(Event::DownwardMessagesProcessed { @@ -1109,14 +1201,15 @@ impl Pallet { /// Process all inbound horizontal messages relayed by the collator. /// - /// This is similar to `Pallet::process_inbound_downward_messages`, but works on multiple - /// inbound channels. + /// This is similar to [`enqueue_inbound_downward_messages`], but works with multiple inbound + /// channels. It immediately dispatches signals and queues all other XCMs. Blob messages are + /// ignored. /// /// **Panics** if either any of horizontal messages submitted by the collator was sent from /// a para which has no open channel to this parachain or if after processing /// messages across all inbound channels MQCs were obtained which do not /// correspond to the ones found on the relay-chain. - fn process_inbound_horizontal_messages( + fn enqueue_inbound_horizontal_messages( ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)], horizontal_messages: BTreeMap>, relay_parent_number: relay_chain::BlockNumber, @@ -1326,12 +1419,6 @@ impl Pallet { >::put(true); } - /// Put a new validation function into a particular location where this - /// parachain will execute it on subsequent blocks. - fn put_parachain_code(code: &[u8]) { - storage::unhashed::put_raw(sp_core::storage::well_known_keys::CODE, code); - } - /// The maximum code size permitted, in bytes. /// /// Returns `None` if the relay chain parachain host configuration hasn't been submitted yet. @@ -1427,6 +1514,23 @@ impl Pallet { }) } + /// Open HRMP channel for using it in benchmarks or tests. + /// + /// The caller assumes that the pallet will accept regular outbound message to the sibling + /// `target_parachain` after this call. No other assumptions are made. + #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] + pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests( + target_parachain: ParaId, + channel: cumulus_primitives_core::AbridgedHrmpChannel, + ) { + RelevantMessagingState::::put(MessagingStateSnapshot { + dmq_mqc_head: Default::default(), + relay_dispatch_queue_remaining_capacity: Default::default(), + ingress_channels: Default::default(), + egress_channels: vec![(target_parachain, channel)], + }) + } + /// Prepare/insert relevant data for `schedule_code_upgrade` for benchmarks. #[cfg(feature = "runtime-benchmarks")] pub fn initialize_for_set_code_benchmark(max_code_size: u32) { @@ -1468,7 +1572,13 @@ impl frame_system::SetCode for ParachainSetCode { } impl Pallet { + /// Puts a message in the `PendingUpwardMessages` storage item. + /// The message will be later sent in `on_finalize`. + /// Checks host configuration to see if message is too big. + /// Increases the delivery fee factor if the queue is sufficiently (see + /// [`ump_constants::THRESHOLD_FACTOR`]) congested. pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> { + let message_len = message.len(); // Check if the message fits into the relay-chain constraints. // // Note, that we are using `host_configuration` here which may be from the previous @@ -1482,9 +1592,22 @@ impl Pallet { // // However, changing this setting is expected to be rare. if let Some(cfg) = Self::host_configuration() { - if message.len() > cfg.max_upward_message_size as usize { + if message_len > cfg.max_upward_message_size as usize { return Err(MessageSendError::TooBig) } + let threshold = + cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR); + // We check the threshold against total size and not number of messages since messages + // could be big or small. + >::append(message.clone()); + let pending_messages = PendingUpwardMessages::::get(); + let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum(); + if total_size > threshold as usize { + // We increase the fee factor by a factor based on the new message's size in KB + let message_size_factor = FixedU128::from((message_len / 1024) as u128) + .saturating_mul(ump_constants::MESSAGE_SIZE_FEE_BASE); + Self::increase_fee_factor((), message_size_factor); + } } else { // This storage field should carry over from the previous block. So if it's None // then it must be that this is an edge-case where a message is attempted to be @@ -1495,8 +1618,8 @@ impl Pallet { // returned back to the sender. // // Thus fall through here. + >::append(message.clone()); }; - >::append(message.clone()); // The relay ump does not use using_encoded // We apply the same this to use the same hash diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..b76553a53841dcb131e7f1a7f4017987250c739c --- /dev/null +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -0,0 +1,485 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Test setup and helpers. + +#![cfg(test)] + +use super::*; + +use codec::Encode; +use cumulus_primitives_core::{ + relay_chain::BlockNumber as RelayBlockNumber, AggregateMessageOrigin, InboundDownwardMessage, + InboundHrmpMessage, PersistedValidationData, +}; +use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +use frame_support::{ + derive_impl, + inherent::{InherentData, ProvideInherent}, + parameter_types, + traits::{ + OnFinalize, OnInitialize, ProcessMessage, ProcessMessageError, UnfilteredDispatchable, + }, + weights::{Weight, WeightMeter}, +}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use sp_runtime::{traits::BlakeTwo256, BuildStorage}; +use sp_std::{collections::vec_deque::VecDeque, num::NonZeroU32}; +use sp_version::RuntimeVersion; +use std::cell::RefCell; + +use crate as parachain_system; +use crate::consensus_hook::UnincludedSegmentCapacity; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + ParachainSystem: parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub Version: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("system-test"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_version::create_apis_vec!([]), + transaction_version: 1, + state_version: 1, + }; + pub const ParachainId: ParaId = ParaId::new(200); + pub const ReservedXcmpWeight: Weight = Weight::zero(); + pub const ReservedDmpWeight: Weight = Weight::zero(); +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type BlockHashCount = BlockHashCount; + type Version = Version; + type OnSetCode = ParachainSetCode; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = ParachainId; + type OutboundXcmpMessageSource = FromThreadLocal; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = SaveIntoThreadLocal; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = AnyRelayNumber; + type ConsensusHook = TestConsensusHook; + type WeightInfo = (); +} + +std::thread_local! { + pub static CONSENSUS_HOOK: RefCell (Weight, UnincludedSegmentCapacity)>> + = RefCell::new(Box::new(|_| (Weight::zero(), NonZeroU32::new(1).unwrap().into()))); +} + +pub struct TestConsensusHook; + +impl ConsensusHook for TestConsensusHook { + fn on_state_proof(s: &RelayChainStateProof) -> (Weight, UnincludedSegmentCapacity) { + CONSENSUS_HOOK.with(|f| f.borrow_mut()(s)) + } +} + +parameter_types! { + pub const MaxWeight: Weight = Weight::MAX; +} + +impl pallet_message_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + // NOTE that normally for benchmarking we should use the No-OP message processor, but in this + // case its a mocked runtime and will only be used to generate insecure default weights. + type MessageProcessor = SaveIntoThreadLocal; + type Size = u32; + type QueueChangeHandler = (); + type QueuePausedQuery = (); + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MaxWeight; + type WeightInfo = (); +} + +/// A `XcmpMessageSource` that takes messages from thread-local. +pub struct FromThreadLocal; + +/// A `MessageProcessor` that stores all messages in thread-local. +pub struct SaveIntoThreadLocal; + +std::thread_local! { + pub static HANDLED_DMP_MESSAGES: RefCell>> = RefCell::new(Vec::new()); + pub static HANDLED_XCMP_MESSAGES: RefCell)>> = RefCell::new(Vec::new()); + pub static SENT_MESSAGES: RefCell)>> = RefCell::new(Vec::new()); +} + +pub fn send_message(dest: ParaId, message: Vec) { + SENT_MESSAGES.with(|m| m.borrow_mut().push((dest, message))); +} + +impl XcmpMessageSource for FromThreadLocal { + fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { + let mut ids = std::collections::BTreeSet::::new(); + let mut taken_messages = 0; + let mut taken_bytes = 0; + let mut result = Vec::new(); + SENT_MESSAGES.with(|ms| { + ms.borrow_mut().retain(|m| { + let status = as GetChannelInfo>::get_channel_status(m.0); + let (max_size_now, max_size_ever) = match status { + ChannelStatus::Ready(now, ever) => (now, ever), + ChannelStatus::Closed => return false, // drop message + ChannelStatus::Full => return true, // keep message queued. + }; + + let msg_len = m.1.len(); + + if !ids.contains(&m.0) && + taken_messages < maximum_channels && + msg_len <= max_size_ever && + taken_bytes + msg_len <= max_size_now + { + ids.insert(m.0); + taken_messages += 1; + taken_bytes += msg_len; + result.push(m.clone()); + false + } else { + true + } + }) + }); + result + } +} + +impl ProcessMessage for SaveIntoThreadLocal { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + _meter: &mut WeightMeter, + _id: &mut [u8; 32], + ) -> Result { + assert_eq!(origin, Self::Origin::Parent); + + HANDLED_DMP_MESSAGES.with(|m| { + m.borrow_mut().push(message.to_vec()); + Weight::zero() + }); + Ok(true) + } +} + +impl XcmpMessageHandler for SaveIntoThreadLocal { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + _max_weight: Weight, + ) -> Weight { + HANDLED_XCMP_MESSAGES.with(|m| { + for (sender, sent_at, message) in iter { + m.borrow_mut().push((sender, sent_at, message.to_vec())); + } + Weight::zero() + }) + } +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + HANDLED_DMP_MESSAGES.with(|m| m.borrow_mut().clear()); + HANDLED_XCMP_MESSAGES.with(|m| m.borrow_mut().clear()); + + frame_system::GenesisConfig::::default().build_storage().unwrap().into() +} + +#[allow(dead_code)] +pub fn mk_dmp(sent_at: u32) -> InboundDownwardMessage { + InboundDownwardMessage { sent_at, msg: format!("down{}", sent_at).into_bytes() } +} + +pub fn mk_hrmp(sent_at: u32) -> InboundHrmpMessage { + InboundHrmpMessage { sent_at, data: format!("{}", sent_at).into_bytes() } +} + +pub struct ReadRuntimeVersion(pub Vec); + +impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { + fn read_runtime_version( + &self, + _wasm_code: &[u8], + _ext: &mut dyn sp_externalities::Externalities, + ) -> Result, String> { + Ok(self.0.clone()) + } +} + +pub fn wasm_ext() -> sp_io::TestExternalities { + let version = RuntimeVersion { + spec_name: "test".into(), + spec_version: 2, + impl_version: 1, + ..Default::default() + }; + + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion( + version.encode(), + ))); + ext +} + +pub struct BlockTest { + n: BlockNumberFor, + within_block: Box, + after_block: Option>, +} + +/// BlockTests exist to test blocks with some setup: we have to assume that +/// `validate_block` will mutate and check storage in certain predictable +/// ways, for example, and we want to always ensure that tests are executed +/// in the context of some particular block number. +#[derive(Default)] +pub struct BlockTests { + tests: Vec, + without_externalities: bool, + pending_upgrade: Option, + ran: bool, + relay_sproof_builder_hook: + Option>, + inherent_data_hook: + Option>, + inclusion_delay: Option, + relay_block_number: Option) -> RelayChainBlockNumber>>, + + included_para_head: Option, + pending_blocks: VecDeque, +} + +impl BlockTests { + pub fn new() -> BlockTests { + Default::default() + } + + pub fn new_without_externalities() -> BlockTests { + let mut tests = BlockTests::new(); + tests.without_externalities = true; + tests + } + + pub fn add_raw(mut self, test: BlockTest) -> Self { + self.tests.push(test); + self + } + + pub fn add(self, n: BlockNumberFor, within_block: F) -> Self + where + F: 'static + Fn(), + { + self.add_raw(BlockTest { n, within_block: Box::new(within_block), after_block: None }) + } + + pub fn add_with_post_test( + self, + n: BlockNumberFor, + within_block: F1, + after_block: F2, + ) -> Self + where + F1: 'static + Fn(), + F2: 'static + Fn(), + { + self.add_raw(BlockTest { + n, + within_block: Box::new(within_block), + after_block: Some(Box::new(after_block)), + }) + } + + pub fn with_relay_sproof_builder(mut self, f: F) -> Self + where + F: 'static + Fn(&BlockTests, RelayChainBlockNumber, &mut RelayStateSproofBuilder), + { + self.relay_sproof_builder_hook = Some(Box::new(f)); + self + } + + pub fn with_relay_block_number(mut self, f: F) -> Self + where + F: 'static + Fn(&BlockNumberFor) -> RelayChainBlockNumber, + { + self.relay_block_number = Some(Box::new(f)); + self + } + + pub fn with_inherent_data(mut self, f: F) -> Self + where + F: 'static + Fn(&BlockTests, RelayChainBlockNumber, &mut ParachainInherentData), + { + self.inherent_data_hook = Some(Box::new(f)); + self + } + + pub fn with_inclusion_delay(mut self, inclusion_delay: usize) -> Self { + self.inclusion_delay.replace(inclusion_delay); + self + } + + pub fn run(&mut self) { + wasm_ext().execute_with(|| { + self.run_without_ext(); + }); + } + + pub fn run_without_ext(&mut self) { + self.ran = true; + + let mut parent_head_data = { + let header = HeaderFor::::new_from_number(0); + relay_chain::HeadData(header.encode()) + }; + + self.included_para_head = Some(parent_head_data.clone()); + + for BlockTest { n, within_block, after_block } in self.tests.iter() { + let relay_parent_number = self + .relay_block_number + .as_ref() + .map(|f| f(n)) + .unwrap_or(*n as RelayChainBlockNumber); + // clear pending updates, as applicable + if let Some(upgrade_block) = self.pending_upgrade { + if n >= &upgrade_block.into() { + self.pending_upgrade = None; + } + } + + // begin initialization + let parent_hash = BlakeTwo256::hash(&parent_head_data.0); + System::reset_events(); + System::initialize(n, &parent_hash, &Default::default()); + + // now mess with the storage the way validate_block does + let mut sproof_builder = RelayStateSproofBuilder::default(); + sproof_builder.included_para_head = self + .included_para_head + .clone() + .unwrap_or_else(|| parent_head_data.clone()) + .into(); + if let Some(ref hook) = self.relay_sproof_builder_hook { + hook(self, relay_parent_number, &mut sproof_builder); + } + let (relay_parent_storage_root, relay_chain_state) = + sproof_builder.into_state_root_and_proof(); + let vfp = PersistedValidationData { + relay_parent_number, + relay_parent_storage_root, + ..Default::default() + }; + + >::put(&vfp); + NewValidationCode::::kill(); + + // It is insufficient to push the validation function params + // to storage; they must also be included in the inherent data. + let inherent_data = { + let mut inherent_data = InherentData::default(); + let mut system_inherent_data = ParachainInherentData { + validation_data: vfp.clone(), + relay_chain_state, + downward_messages: Default::default(), + horizontal_messages: Default::default(), + }; + if let Some(ref hook) = self.inherent_data_hook { + hook(self, relay_parent_number, &mut system_inherent_data); + } + inherent_data + .put_data( + cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER, + &system_inherent_data, + ) + .expect("failed to put VFP inherent"); + inherent_data + }; + + // execute the block + ParachainSystem::on_initialize(*n); + ParachainSystem::create_inherent(&inherent_data) + .expect("got an inherent") + .dispatch_bypass_filter(RawOrigin::None.into()) + .expect("dispatch succeeded"); + MessageQueue::on_initialize(*n); + within_block(); + MessageQueue::on_finalize(*n); + ParachainSystem::on_finalize(*n); + + // did block execution set new validation code? + if NewValidationCode::::exists() && self.pending_upgrade.is_some() { + panic!("attempted to set validation code while upgrade was pending"); + } + + // clean up + let header = System::finalize(); + let head_data = relay_chain::HeadData(header.encode()); + parent_head_data = head_data.clone(); + match self.inclusion_delay { + Some(delay) if delay > 0 => { + self.pending_blocks.push_back(head_data); + if self.pending_blocks.len() > delay { + let included = self.pending_blocks.pop_front().unwrap(); + + self.included_para_head.replace(included); + } + }, + _ => { + self.included_para_head.replace(head_data); + }, + } + + if let Some(after_block) = after_block { + after_block(); + } + } + } +} + +impl Drop for BlockTests { + fn drop(&mut self) { + if !self.ran { + if self.without_externalities { + self.run_without_ext(); + } else { + self.run(); + } + } + } +} diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 626196790bc99f611c099a815019e336b4dd7df0..ab1775b40a84451e150ad979cbf8b3f7888e8ccf 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -13,433 +13,20 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . + +#![cfg(test)] + use super::*; +use crate::mock::*; -use codec::Encode; -use cumulus_primitives_core::{ - relay_chain::BlockNumber as RelayBlockNumber, AbridgedHrmpChannel, InboundDownwardMessage, - InboundHrmpMessage, PersistedValidationData, -}; -use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; -use frame_support::{ - assert_ok, - inherent::{InherentData, ProvideInherent}, - parameter_types, - traits::{OnFinalize, OnInitialize, UnfilteredDispatchable}, - weights::Weight, -}; -use frame_system::{ - pallet_prelude::{BlockNumberFor, HeaderFor}, - RawOrigin, -}; +use cumulus_primitives_core::{AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage}; +use frame_support::{assert_ok, parameter_types, weights::Weight}; +use frame_system::RawOrigin; use hex_literal::hex; +use rand::Rng; use relay_chain::HrmpChannelId; -use sp_core::{blake2_256, H256}; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, DispatchErrorWithPostInfo, -}; -use sp_std::{collections::vec_deque::VecDeque, num::NonZeroU32}; -use sp_version::RuntimeVersion; -use std::cell::RefCell; - -use crate as parachain_system; -use crate::consensus_hook::UnincludedSegmentCapacity; - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - ParachainSystem: parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned}, - } -); - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_version::create_apis_vec!([]), - transaction_version: 1, - state_version: 1, - }; - pub const ParachainId: ParaId = ParaId::new(200); - pub const ReservedXcmpWeight: Weight = Weight::zero(); - pub const ReservedDmpWeight: Weight = Weight::zero(); -} -impl frame_system::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockLength = (); - type BlockWeights = (); - type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} -impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = ParachainId; - type OutboundXcmpMessageSource = FromThreadLocal; - type DmpMessageHandler = SaveIntoThreadLocal; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = SaveIntoThreadLocal; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = AnyRelayNumber; - type ConsensusHook = TestConsensusHook; -} - -pub struct FromThreadLocal; -pub struct SaveIntoThreadLocal; - -std::thread_local! { - static HANDLED_DMP_MESSAGES: RefCell)>> = RefCell::new(Vec::new()); - static HANDLED_XCMP_MESSAGES: RefCell)>> = RefCell::new(Vec::new()); - static SENT_MESSAGES: RefCell)>> = RefCell::new(Vec::new()); - static CONSENSUS_HOOK: RefCell (Weight, UnincludedSegmentCapacity)>> - = RefCell::new(Box::new(|_| (Weight::zero(), NonZeroU32::new(1).unwrap().into()))); -} - -pub struct TestConsensusHook; - -impl ConsensusHook for TestConsensusHook { - fn on_state_proof(s: &RelayChainStateProof) -> (Weight, UnincludedSegmentCapacity) { - CONSENSUS_HOOK.with(|f| f.borrow_mut()(s)) - } -} - -fn send_message(dest: ParaId, message: Vec) { - SENT_MESSAGES.with(|m| m.borrow_mut().push((dest, message))); -} - -impl XcmpMessageSource for FromThreadLocal { - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { - let mut ids = std::collections::BTreeSet::::new(); - let mut taken_messages = 0; - let mut taken_bytes = 0; - let mut result = Vec::new(); - SENT_MESSAGES.with(|ms| { - ms.borrow_mut().retain(|m| { - let status = as GetChannelInfo>::get_channel_status(m.0); - let (max_size_now, max_size_ever) = match status { - ChannelStatus::Ready(now, ever) => (now, ever), - ChannelStatus::Closed => return false, // drop message - ChannelStatus::Full => return true, // keep message queued. - }; - - let msg_len = m.1.len(); - - if !ids.contains(&m.0) && - taken_messages < maximum_channels && - msg_len <= max_size_ever && - taken_bytes + msg_len <= max_size_now - { - ids.insert(m.0); - taken_messages += 1; - taken_bytes += msg_len; - result.push(m.clone()); - false - } else { - true - } - }) - }); - result - } -} - -impl DmpMessageHandler for SaveIntoThreadLocal { - fn handle_dmp_messages( - iter: impl Iterator)>, - _max_weight: Weight, - ) -> Weight { - HANDLED_DMP_MESSAGES.with(|m| { - for i in iter { - m.borrow_mut().push(i); - } - Weight::zero() - }) - } -} - -impl XcmpMessageHandler for SaveIntoThreadLocal { - fn handle_xcmp_messages<'a, I: Iterator>( - iter: I, - _max_weight: Weight, - ) -> Weight { - HANDLED_XCMP_MESSAGES.with(|m| { - for (sender, sent_at, message) in iter { - m.borrow_mut().push((sender, sent_at, message.to_vec())); - } - Weight::zero() - }) - } -} - -// This function basically just builds a genesis storage key/value store according to -// our desired mockup. -fn new_test_ext() -> sp_io::TestExternalities { - HANDLED_DMP_MESSAGES.with(|m| m.borrow_mut().clear()); - HANDLED_XCMP_MESSAGES.with(|m| m.borrow_mut().clear()); - - frame_system::GenesisConfig::::default().build_storage().unwrap().into() -} - -struct ReadRuntimeVersion(Vec); - -impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { - fn read_runtime_version( - &self, - _wasm_code: &[u8], - _ext: &mut dyn sp_externalities::Externalities, - ) -> Result, String> { - Ok(self.0.clone()) - } -} - -fn wasm_ext() -> sp_io::TestExternalities { - let version = RuntimeVersion { - spec_name: "test".into(), - spec_version: 2, - impl_version: 1, - ..Default::default() - }; - - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion( - version.encode(), - ))); - ext -} - -struct BlockTest { - n: BlockNumberFor, - within_block: Box, - after_block: Option>, -} - -/// BlockTests exist to test blocks with some setup: we have to assume that -/// `validate_block` will mutate and check storage in certain predictable -/// ways, for example, and we want to always ensure that tests are executed -/// in the context of some particular block number. -#[derive(Default)] -struct BlockTests { - tests: Vec, - pending_upgrade: Option, - ran: bool, - relay_sproof_builder_hook: - Option>, - inherent_data_hook: - Option>, - inclusion_delay: Option, - relay_block_number: Option) -> RelayChainBlockNumber>>, - - included_para_head: Option, - pending_blocks: VecDeque, -} - -impl BlockTests { - fn new() -> BlockTests { - Default::default() - } - - fn add_raw(mut self, test: BlockTest) -> Self { - self.tests.push(test); - self - } - - fn add(self, n: BlockNumberFor, within_block: F) -> Self - where - F: 'static + Fn(), - { - self.add_raw(BlockTest { n, within_block: Box::new(within_block), after_block: None }) - } - - fn add_with_post_test( - self, - n: BlockNumberFor, - within_block: F1, - after_block: F2, - ) -> Self - where - F1: 'static + Fn(), - F2: 'static + Fn(), - { - self.add_raw(BlockTest { - n, - within_block: Box::new(within_block), - after_block: Some(Box::new(after_block)), - }) - } - - fn with_relay_sproof_builder(mut self, f: F) -> Self - where - F: 'static + Fn(&BlockTests, RelayChainBlockNumber, &mut RelayStateSproofBuilder), - { - self.relay_sproof_builder_hook = Some(Box::new(f)); - self - } - - fn with_relay_block_number(mut self, f: F) -> Self - where - F: 'static + Fn(&BlockNumberFor) -> RelayChainBlockNumber, - { - self.relay_block_number = Some(Box::new(f)); - self - } - - fn with_inherent_data(mut self, f: F) -> Self - where - F: 'static + Fn(&BlockTests, RelayChainBlockNumber, &mut ParachainInherentData), - { - self.inherent_data_hook = Some(Box::new(f)); - self - } - - fn with_inclusion_delay(mut self, inclusion_delay: usize) -> Self { - self.inclusion_delay.replace(inclusion_delay); - self - } - - fn run(&mut self) { - self.ran = true; - wasm_ext().execute_with(|| { - let mut parent_head_data = { - let header = HeaderFor::::new_from_number(0); - relay_chain::HeadData(header.encode()) - }; - - self.included_para_head = Some(parent_head_data.clone()); - - for BlockTest { n, within_block, after_block } in self.tests.iter() { - let relay_parent_number = self - .relay_block_number - .as_ref() - .map(|f| f(n)) - .unwrap_or(*n as RelayChainBlockNumber); - // clear pending updates, as applicable - if let Some(upgrade_block) = self.pending_upgrade { - if n >= &upgrade_block.into() { - self.pending_upgrade = None; - } - } - - // begin initialization - let parent_hash = BlakeTwo256::hash(&parent_head_data.0); - System::reset_events(); - System::initialize(n, &parent_hash, &Default::default()); - - // now mess with the storage the way validate_block does - let mut sproof_builder = RelayStateSproofBuilder::default(); - sproof_builder.included_para_head = self - .included_para_head - .clone() - .unwrap_or_else(|| parent_head_data.clone()) - .into(); - if let Some(ref hook) = self.relay_sproof_builder_hook { - hook(self, relay_parent_number, &mut sproof_builder); - } - let (relay_parent_storage_root, relay_chain_state) = - sproof_builder.into_state_root_and_proof(); - let vfp = PersistedValidationData { - relay_parent_number, - relay_parent_storage_root, - ..Default::default() - }; - - >::put(&vfp); - NewValidationCode::::kill(); - - // It is insufficient to push the validation function params - // to storage; they must also be included in the inherent data. - let inherent_data = { - let mut inherent_data = InherentData::default(); - let mut system_inherent_data = ParachainInherentData { - validation_data: vfp.clone(), - relay_chain_state, - downward_messages: Default::default(), - horizontal_messages: Default::default(), - }; - if let Some(ref hook) = self.inherent_data_hook { - hook(self, relay_parent_number, &mut system_inherent_data); - } - inherent_data - .put_data( - cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER, - &system_inherent_data, - ) - .expect("failed to put VFP inherent"); - inherent_data - }; - - // execute the block - ParachainSystem::on_initialize(*n); - ParachainSystem::create_inherent(&inherent_data) - .expect("got an inherent") - .dispatch_bypass_filter(RawOrigin::None.into()) - .expect("dispatch succeeded"); - within_block(); - ParachainSystem::on_finalize(*n); - - // did block execution set new validation code? - if NewValidationCode::::exists() && self.pending_upgrade.is_some() { - panic!("attempted to set validation code while upgrade was pending"); - } - - // clean up - let header = System::finalize(); - let head_data = relay_chain::HeadData(header.encode()); - parent_head_data = head_data.clone(); - match self.inclusion_delay { - Some(delay) if delay > 0 => { - self.pending_blocks.push_back(head_data); - if self.pending_blocks.len() > delay { - let included = self.pending_blocks.pop_front().unwrap(); - - self.included_para_head.replace(included); - } - }, - _ => { - self.included_para_head.replace(head_data); - }, - } - - if let Some(after_block) = after_block { - after_block(); - } - } - }); - } -} - -impl Drop for BlockTests { - fn drop(&mut self) { - if !self.ran { - self.run(); - } - } -} +use sp_core::H256; +use sp_std::num::NonZeroU32; #[test] #[should_panic] @@ -659,30 +246,6 @@ fn inherent_processed_messages_are_ignored() { CONSENSUS_HOOK.with(|c| { *c.borrow_mut() = Box::new(|_| (Weight::zero(), NonZeroU32::new(2).unwrap().into())) }); - lazy_static::lazy_static! { - static ref DMQ_MSG: InboundDownwardMessage = InboundDownwardMessage { - sent_at: 3, - msg: b"down".to_vec(), - }; - - static ref XCMP_MSG_1: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 2, - data: b"h1".to_vec(), - }; - - static ref XCMP_MSG_2: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 3, - data: b"h2".to_vec(), - }; - - static ref EXPECTED_PROCESSED_DMQ: Vec<(RelayChainBlockNumber, Vec)> = vec![ - (DMQ_MSG.sent_at, DMQ_MSG.msg.clone()) - ]; - static ref EXPECTED_PROCESSED_XCMP: Vec<(ParaId, RelayChainBlockNumber, Vec)> = vec![ - (ParaId::from(200), XCMP_MSG_1.sent_at, XCMP_MSG_1.data.clone()), - (ParaId::from(200), XCMP_MSG_2.sent_at, XCMP_MSG_2.data.clone()), - ]; - } BlockTests::new() .with_inclusion_delay(1) @@ -690,11 +253,11 @@ fn inherent_processed_messages_are_ignored() { .with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num { 3 => { sproof.dmq_mqc_head = - Some(MessageQueueChain::default().extend_downward(&DMQ_MSG).head()); + Some(MessageQueueChain::default().extend_downward(&mk_dmp(3)).head()); sproof.upsert_inbound_channel(ParaId::from(200)).mqc_head = Some( MessageQueueChain::default() - .extend_hrmp(&XCMP_MSG_1) - .extend_hrmp(&XCMP_MSG_2) + .extend_hrmp(&mk_hrmp(2)) + .extend_hrmp(&mk_hrmp(3)) .head(), ); }, @@ -702,9 +265,8 @@ fn inherent_processed_messages_are_ignored() { }) .with_inherent_data(|_, relay_block_num, data| match relay_block_num { 3 => { - data.downward_messages.push(DMQ_MSG.clone()); - data.horizontal_messages - .insert(ParaId::from(200), vec![XCMP_MSG_1.clone(), XCMP_MSG_2.clone()]); + data.downward_messages.push(mk_dmp(3)); + data.horizontal_messages.insert(ParaId::from(200), vec![mk_hrmp(2), mk_hrmp(3)]); }, _ => unreachable!(), }) @@ -712,22 +274,29 @@ fn inherent_processed_messages_are_ignored() { // Don't drop processed messages for this test. HANDLED_DMP_MESSAGES.with(|m| { let m = m.borrow(); - assert_eq!(&*m, EXPECTED_PROCESSED_DMQ.as_slice()); + // NOTE: if this fails, then run the test without benchmark features. + assert_eq!(&*m, &[mk_dmp(3).msg]); }); HANDLED_XCMP_MESSAGES.with(|m| { let m = m.borrow_mut(); - assert_eq!(&*m, EXPECTED_PROCESSED_XCMP.as_slice()); + assert_eq!( + &*m, + &[(ParaId::from(200), 2, b"2".to_vec()), (ParaId::from(200), 3, b"3".to_vec()),] + ); }); }) .add(2, || {}) .add(3, || { HANDLED_DMP_MESSAGES.with(|m| { let m = m.borrow(); - assert_eq!(&*m, EXPECTED_PROCESSED_DMQ.as_slice()); + assert_eq!(&*m, &[mk_dmp(3).msg]); }); HANDLED_XCMP_MESSAGES.with(|m| { let m = m.borrow_mut(); - assert_eq!(&*m, EXPECTED_PROCESSED_XCMP.as_slice()); + assert_eq!( + &*m, + &[(ParaId::from(200), 2, b"2".to_vec()), (ParaId::from(200), 3, b"3".to_vec()),] + ); }); }); } @@ -869,7 +438,7 @@ fn hrmp_outbound_respects_used_bandwidth() { } #[test] -fn events() { +fn runtime_upgrade_events() { BlockTests::new() .with_relay_sproof_builder(|_, block_number, builder| { if block_number > 123 { @@ -894,12 +463,20 @@ fn events() { || {}, || { let events = System::events(); + + assert_eq!(events[0].event, RuntimeEvent::System(frame_system::Event::CodeUpdated)); + assert_eq!( - events[0].event, + events[1].event, RuntimeEvent::ParachainSystem(crate::Event::ValidationFunctionApplied { relay_chain_block_num: 1234 }) ); + + assert!(System::digest() + .logs() + .iter() + .any(|d| *d == sp_runtime::generic::DigestItem::RuntimeEnvironmentUpdated)); }, ); } @@ -1175,6 +752,7 @@ fn message_queue_chain() { } #[test] +#[cfg(not(feature = "runtime-benchmarks"))] fn receive_dmp() { lazy_static::lazy_static! { static ref MSG: InboundDownwardMessage = InboundDownwardMessage { @@ -1200,41 +778,31 @@ fn receive_dmp() { .add(1, || { HANDLED_DMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(MSG.sent_at, MSG.msg.clone())]); + assert_eq!(&*m, &[(MSG.msg.clone())]); m.clear(); }); }); } #[test] +#[cfg(not(feature = "runtime-benchmarks"))] fn receive_dmp_after_pause() { - lazy_static::lazy_static! { - static ref MSG_1: InboundDownwardMessage = InboundDownwardMessage { - sent_at: 1, - msg: b"down1".to_vec(), - }; - static ref MSG_2: InboundDownwardMessage = InboundDownwardMessage { - sent_at: 3, - msg: b"down2".to_vec(), - }; - } - BlockTests::new() .with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num { 1 => { sproof.dmq_mqc_head = - Some(MessageQueueChain::default().extend_downward(&MSG_1).head()); + Some(MessageQueueChain::default().extend_downward(&mk_dmp(1)).head()); }, 2 => { // no new messages, mqc stayed the same. sproof.dmq_mqc_head = - Some(MessageQueueChain::default().extend_downward(&MSG_1).head()); + Some(MessageQueueChain::default().extend_downward(&mk_dmp(1)).head()); }, 3 => { sproof.dmq_mqc_head = Some( MessageQueueChain::default() - .extend_downward(&MSG_1) - .extend_downward(&MSG_2) + .extend_downward(&mk_dmp(1)) + .extend_downward(&mk_dmp(3)) .head(), ); }, @@ -1242,20 +810,20 @@ fn receive_dmp_after_pause() { }) .with_inherent_data(|_, relay_block_num, data| match relay_block_num { 1 => { - data.downward_messages.push(MSG_1.clone()); + data.downward_messages.push(mk_dmp(1)); }, 2 => { // no new messages }, 3 => { - data.downward_messages.push(MSG_2.clone()); + data.downward_messages.push(mk_dmp(3)); }, _ => unreachable!(), }) .add(1, || { HANDLED_DMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(MSG_1.sent_at, MSG_1.msg.clone())]); + assert_eq!(&*m, &[(mk_dmp(1).msg.clone())]); m.clear(); }); }) @@ -1263,54 +831,88 @@ fn receive_dmp_after_pause() { .add(3, || { HANDLED_DMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(MSG_2.sent_at, MSG_2.msg.clone())]); + assert_eq!(&*m, &[(mk_dmp(3).msg.clone())]); m.clear(); }); }); } +// Sent up to 100 DMP messages per block over a period of 100 blocks. #[test] -fn receive_hrmp() { - lazy_static::lazy_static! { - static ref MSG_1: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 1, - data: b"1".to_vec(), - }; +#[cfg(not(feature = "runtime-benchmarks"))] +fn receive_dmp_many() { + wasm_ext().execute_with(|| { + parameter_types! { + pub storage MqcHead: MessageQueueChain = Default::default(); + pub storage SentInBlock: Vec> = Default::default(); + } - static ref MSG_2: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 2, - data: b"2".to_vec(), - }; + let mut sent_in_block = vec![vec![]]; + let mut rng = rand::thread_rng(); - static ref MSG_3: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 2, - data: b"3".to_vec(), - }; + for block in 1..100 { + let mut msgs = vec![]; + for _ in 1..=rng.gen_range(1..=100) { + // Just use the same message multiple times per block. + msgs.push(mk_dmp(block)); + } + sent_in_block.push(msgs); + } + SentInBlock::set(&sent_in_block); - static ref MSG_4: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 2, - data: b"4".to_vec(), - }; - } + let mut tester = BlockTests::new_without_externalities() + .with_relay_sproof_builder(|_, relay_block_num, sproof| { + let mut new_hash = MqcHead::get(); + + for msg in SentInBlock::get()[relay_block_num as usize].iter() { + new_hash.extend_downward(&msg); + } + sproof.dmq_mqc_head = Some(new_hash.head()); + MqcHead::set(&new_hash); + }) + .with_inherent_data(|_, relay_block_num, data| { + for msg in SentInBlock::get()[relay_block_num as usize].iter() { + data.downward_messages.push(msg.clone()); + } + }); + + for block in 1..100 { + tester = tester.add(block, move || { + HANDLED_DMP_MESSAGES.with(|m| { + let mut m = m.borrow_mut(); + let msgs = SentInBlock::get()[block as usize] + .iter() + .map(|m| m.msg.clone()) + .collect::>(); + assert_eq!(&*m, &msgs); + m.clear(); + }); + }); + } + }); +} + +#[test] +fn receive_hrmp() { BlockTests::new() .with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num { 1 => { // 200 - doesn't exist yet // 300 - one new message sproof.upsert_inbound_channel(ParaId::from(300)).mqc_head = - Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head()); + Some(MessageQueueChain::default().extend_hrmp(&mk_hrmp(1)).head()); }, 2 => { // 200 - now present with one message // 300 - two new messages sproof.upsert_inbound_channel(ParaId::from(200)).mqc_head = - Some(MessageQueueChain::default().extend_hrmp(&MSG_4).head()); + Some(MessageQueueChain::default().extend_hrmp(&mk_hrmp(4)).head()); sproof.upsert_inbound_channel(ParaId::from(300)).mqc_head = Some( MessageQueueChain::default() - .extend_hrmp(&MSG_1) - .extend_hrmp(&MSG_2) - .extend_hrmp(&MSG_3) + .extend_hrmp(&mk_hrmp(1)) + .extend_hrmp(&mk_hrmp(2)) + .extend_hrmp(&mk_hrmp(3)) .head(), ); }, @@ -1318,13 +920,13 @@ fn receive_hrmp() { // 200 - no new messages // 300 - is gone sproof.upsert_inbound_channel(ParaId::from(200)).mqc_head = - Some(MessageQueueChain::default().extend_hrmp(&MSG_4).head()); + Some(MessageQueueChain::default().extend_hrmp(&mk_hrmp(4)).head()); }, _ => unreachable!(), }) .with_inherent_data(|_, relay_block_num, data| match relay_block_num { 1 => { - data.horizontal_messages.insert(ParaId::from(300), vec![MSG_1.clone()]); + data.horizontal_messages.insert(ParaId::from(300), vec![mk_hrmp(1)]); }, 2 => { data.horizontal_messages.insert( @@ -1333,11 +935,11 @@ fn receive_hrmp() { // can't be sent at the block 1 actually. However, we cheat here // because we want to test the case where there are multiple messages // but the harness at the moment doesn't support block skipping. - MSG_2.clone(), - MSG_3.clone(), + mk_hrmp(2).clone(), + mk_hrmp(3).clone(), ], ); - data.horizontal_messages.insert(ParaId::from(200), vec![MSG_4.clone()]); + data.horizontal_messages.insert(ParaId::from(200), vec![mk_hrmp(4)]); }, 3 => {}, _ => unreachable!(), @@ -1355,9 +957,9 @@ fn receive_hrmp() { assert_eq!( &*m, &[ - (ParaId::from(200), 2, b"4".to_vec()), (ParaId::from(300), 2, b"2".to_vec()), - (ParaId::from(300), 2, b"3".to_vec()), + (ParaId::from(300), 3, b"3".to_vec()), + (ParaId::from(200), 4, b"4".to_vec()), ] ); m.clear(); @@ -1386,55 +988,46 @@ fn receive_hrmp_empty_channel() { #[test] fn receive_hrmp_after_pause() { - lazy_static::lazy_static! { - static ref MSG_1: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 1, - data: b"mikhailinvanovich".to_vec(), - }; - - static ref MSG_2: InboundHrmpMessage = InboundHrmpMessage { - sent_at: 3, - data: b"1000000000".to_vec(), - }; - } - const ALICE: ParaId = ParaId::new(300); BlockTests::new() .with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num { 1 => { sproof.upsert_inbound_channel(ALICE).mqc_head = - Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head()); + Some(MessageQueueChain::default().extend_hrmp(&mk_hrmp(1)).head()); }, 2 => { // 300 - no new messages, mqc stayed the same. sproof.upsert_inbound_channel(ALICE).mqc_head = - Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head()); + Some(MessageQueueChain::default().extend_hrmp(&mk_hrmp(1)).head()); }, 3 => { // 300 - new message. sproof.upsert_inbound_channel(ALICE).mqc_head = Some( - MessageQueueChain::default().extend_hrmp(&MSG_1).extend_hrmp(&MSG_2).head(), + MessageQueueChain::default() + .extend_hrmp(&mk_hrmp(1)) + .extend_hrmp(&mk_hrmp(3)) + .head(), ); }, _ => unreachable!(), }) .with_inherent_data(|_, relay_block_num, data| match relay_block_num { 1 => { - data.horizontal_messages.insert(ALICE, vec![MSG_1.clone()]); + data.horizontal_messages.insert(ALICE, vec![mk_hrmp(1)]); }, 2 => { // no new messages }, 3 => { - data.horizontal_messages.insert(ALICE, vec![MSG_2.clone()]); + data.horizontal_messages.insert(ALICE, vec![mk_hrmp(3)]); }, _ => unreachable!(), }) .add(1, || { HANDLED_XCMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(ALICE, 1, b"mikhailinvanovich".to_vec())]); + assert_eq!(&*m, &[(ALICE, 1, b"1".to_vec())]); m.clear(); }); }) @@ -1442,14 +1035,75 @@ fn receive_hrmp_after_pause() { .add(3, || { HANDLED_XCMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(ALICE, 3, b"1000000000".to_vec())]); + assert_eq!(&*m, &[(ALICE, 3, b"3".to_vec())]); m.clear(); }); }); } +// Sent up to 100 HRMP messages per block over a period of 100 blocks. +#[test] +fn receive_hrmp_many() { + const ALICE: ParaId = ParaId::new(300); + + wasm_ext().execute_with(|| { + parameter_types! { + pub storage MqcHead: MessageQueueChain = Default::default(); + pub storage SentInBlock: Vec> = Default::default(); + } + + let mut sent_in_block = vec![vec![]]; + let mut rng = rand::thread_rng(); + + for block in 1..100 { + let mut msgs = vec![]; + for _ in 1..=rng.gen_range(1..=100) { + // Just use the same message multiple times per block. + msgs.push(mk_hrmp(block)); + } + sent_in_block.push(msgs); + } + SentInBlock::set(&sent_in_block); + + let mut tester = BlockTests::new_without_externalities() + .with_relay_sproof_builder(|_, relay_block_num, sproof| { + let mut new_hash = MqcHead::get(); + + for msg in SentInBlock::get()[relay_block_num as usize].iter() { + new_hash.extend_hrmp(&msg); + } + + sproof.upsert_inbound_channel(ALICE).mqc_head = Some(new_hash.head()); + MqcHead::set(&new_hash); + }) + .with_inherent_data(|_, relay_block_num, data| { + // TODO use vector for dmp as well + data.horizontal_messages + .insert(ALICE, SentInBlock::get()[relay_block_num as usize].clone()); + }); + + for block in 1..100 { + tester = tester.add(block, move || { + HANDLED_XCMP_MESSAGES.with(|m| { + let mut m = m.borrow_mut(); + let msgs = SentInBlock::get()[block as usize] + .iter() + .map(|m| (ALICE, m.sent_at, m.data.clone())) + .collect::>(); + assert_eq!(&*m, &msgs); + m.clear(); + }); + }); + } + }); +} + #[test] fn upgrade_version_checks_should_work() { + use codec::Encode; + use sp_runtime::DispatchErrorWithPostInfo; + use sp_version::RuntimeVersion; + let test_data = vec![ ("test", 0, 1, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), ("test", 1, 0, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), @@ -1471,7 +1125,7 @@ fn upgrade_version_checks_should_work() { ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(read_runtime_version)); ext.execute_with(|| { let new_code = vec![1, 2, 3, 4]; - let new_code_hash = sp_core::H256(blake2_256(&new_code)); + let new_code_hash = H256(sp_core::blake2_256(&new_code)); let _authorize = ParachainSystem::authorize_upgrade(RawOrigin::Root.into(), new_code_hash, true); @@ -1496,3 +1150,53 @@ fn deposits_relay_parent_storage_root() { }, ); } + +#[test] +fn ump_fee_factor_increases_and_decreases() { + BlockTests::new() + .with_relay_sproof_builder(|_, _, sproof| { + sproof.host_config.max_upward_queue_size = 100; + sproof.host_config.max_upward_message_num_per_candidate = 1; + }) + .add_with_post_test( + 1, + || { + // Fee factor increases in `send_upward_message` + ParachainSystem::send_upward_message(b"Test".to_vec()).unwrap(); + assert_eq!(UpwardDeliveryFeeFactor::::get(), FixedU128::from_u32(1)); + + ParachainSystem::send_upward_message( + b"This message will be enough to increase the fee factor".to_vec(), + ) + .unwrap(); + assert_eq!( + UpwardDeliveryFeeFactor::::get(), + FixedU128::from_rational(105, 100) + ); + }, + || { + // Factor decreases in `on_finalize`, but only if we are below the threshold + let messages = UpwardMessages::::get(); + assert_eq!(messages, vec![b"Test".to_vec()]); + assert_eq!( + UpwardDeliveryFeeFactor::::get(), + FixedU128::from_rational(105, 100) + ); + }, + ) + .add_with_post_test( + 2, + || { + // We do nothing here + }, + || { + let messages = UpwardMessages::::get(); + assert_eq!( + messages, + vec![b"This message will be enough to increase the fee factor".to_vec(),] + ); + // Now the delivery fee factor is decreased, since we are below the threshold + assert_eq!(UpwardDeliveryFeeFactor::::get(), FixedU128::from_u32(1)); + }, + ); +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 0cf68f25cc34b7be302b5983d5f7f27fcfcf13f1..f17ac6007a09bf02011473f95a537855eb43f3b0 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -21,12 +21,13 @@ use cumulus_test_client::{ runtime::{ self as test_runtime, Block, Hash, Header, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, - transfer, BlockData, BuildParachainBlockData, Client, DefaultTestClientBuilderExt, HeadData, - InitBlockBuilder, TestClientBuilder, TestClientBuilderExt, ValidationParams, + transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, ClientBlockImportExt, + DefaultTestClientBuilderExt, HeadData, InitBlockBuilder, TestClientBuilder, + TestClientBuilderExt, ValidationParams, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sp_keyring::AccountKeyring::*; -use sp_runtime::traits::Header as HeaderT; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{env, process::Command}; use crate::validate_block::MemoryOptimizedValidationParams; @@ -100,7 +101,7 @@ fn build_block_with_witness( } #[test] -fn validate_block_no_extra_extrinsics() { +fn validate_block_works() { sp_tracing::try_init_simple(); let (client, parent_head) = create_test_client(); @@ -290,3 +291,42 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode() let decoded = ValidationParams::decode_all(&mut &encoded[..]).unwrap(); assert_eq!(decoded, validation_params); } + +/// Test for ensuring that we are differentiating in the `validation::trie_cache` between different +/// child tries. +/// +/// This is achieved by first building a block using `read_and_write_child_tries` that should set +/// the values in the child tries. In the second step we are building a second block with the same +/// extrinsic that reads the values from the child tries and it asserts that we read the correct +/// data from the state. +#[test] +fn validate_block_works_with_child_tries() { + sp_tracing::try_init_simple(); + + let (mut client, parent_head) = create_test_client(); + let TestBlockData { block, .. } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Charlie, TestPalletCall::read_and_write_child_tries {})], + parent_head.clone(), + Default::default(), + ); + let block = block.into_block(); + + futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + + let parent_head = block.header().clone(); + + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Alice, TestPalletCall::read_and_write_child_tries {})], + parent_head.clone(), + Default::default(), + ); + + let header = block.header().clone(); + + let res_header = + call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 57876b87876feedad9f0ac8a7ccf3d5c99f6e704..5d785910fbe026fba487980d1244262146f86a5b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -67,7 +67,13 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { /// Provider of [`TrieCache`] instances. pub(crate) struct CacheProvider { node_cache: RefCell>>, - value_cache: RefCell, trie_db::CachedValue>>, + /// Cache: `storage_root` => `storage_key` => `value`. + /// + /// One `block` can for example use multiple tries (child tries) and we need to distinguish the + /// cached (`storage_key`, `value`) between them. For this we are using the `storage_root` to + /// distinguish them (even if the storage root is the same for two child tries, it just means + /// that both are exactly the same trie and there would happen no collision). + value_cache: RefCell, trie_db::CachedValue>>>, } impl CacheProvider { @@ -81,22 +87,26 @@ impl CacheProvider { impl TrieCacheProvider for CacheProvider { type Cache<'a> = TrieCache<'a, H> where H: 'a; - fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { + fn as_trie_db_cache(&self, storage_root: ::Out) -> Self::Cache<'_> { TrieCache { - value_cache: Some(self.value_cache.borrow_mut()), + value_cache: Some(RefMut::map(self.value_cache.borrow_mut(), |c| { + c.entry(storage_root).or_default() + })), node_cache: self.node_cache.borrow_mut(), } } fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { // This method is called when we calculate the storage root. - // Since we are using a simplified cache architecture, - // we do not have separate key spaces for different storage roots. - // The value cache is therefore disabled here. + // We are not interested in caching new values (as we would throw them away directly after a + // block is validated) and thus, we don't pass any `value_cache`. TrieCache { value_cache: None, node_cache: self.node_cache.borrow_mut() } } - fn merge<'a>(&'a self, _other: Self::Cache<'a>, _new_root: ::Out) {} + fn merge<'a>(&'a self, _other: Self::Cache<'a>, _new_root: ::Out) { + // This is called to merge the `value_cache` from `as_trie_db_mut_cache`, which is not + // activated, so we don't need to do anything here. + } } // This is safe here since we are single-threaded in WASM diff --git a/cumulus/pallets/parachain-system/src/weights.rs b/cumulus/pallets/parachain-system/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..da7f64237e9b6a8b39096b761f6ff923f903529a --- /dev/null +++ b/cumulus/pallets/parachain-system/src/weights.rs @@ -0,0 +1,115 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for cumulus_pallet_parachain_system +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// westmint-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// pallets/parachain-system/src/weights.rs +// --steps +// 50 +// --repeat +// 20 +// --template +// ../substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for cumulus_pallet_parachain_system. +pub trait WeightInfo { + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight; +} + +/// Weights for cumulus_pallet_parachain_system using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_625_000 picoseconds. + Weight::from_parts(1_735_000, 8013) + // Standard Error: 14_563 + .saturating_add(Weight::from_parts(25_300_108, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_625_000 picoseconds. + Weight::from_parts(1_735_000, 8013) + // Standard Error: 14_563 + .saturating_add(Weight::from_parts(25_300_108, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 6a3fe59b4020e44c53bd0d4f0a5696f63b602282..e4ef72965c732135176a3c46df8481a65335f977 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -4,10 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false} diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 229edaaab4c881c7399ff6036dafbb2f0d7ba1e2..c8e819979bdb1f9841632227e2ad2f5e7d3cbd63 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -3,10 +3,12 @@ authors.workspace = true edition.workspace = true name = "cumulus-pallet-xcm" version = "0.1.0" +license = "Apache-2.0" +description = "Pallet for stuff specific to parachains' usage of XCM" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} diff --git a/cumulus/pallets/xcm/src/lib.rs b/cumulus/pallets/xcm/src/lib.rs index 69b4f43754003495b4e6cec9b8e9e29e1ce73a08..90a0ec76defe2b0df8d99faacfb611c0f7834a3b 100644 --- a/cumulus/pallets/xcm/src/lib.rs +++ b/cumulus/pallets/xcm/src/lib.rs @@ -20,25 +20,18 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, DecodeLimit, Encode}; -use cumulus_primitives_core::{ - relay_chain::BlockNumber as RelayBlockNumber, DmpMessageHandler, ParaId, -}; -use frame_support::weights::Weight; +use codec::{Decode, Encode}; +use cumulus_primitives_core::ParaId; pub use pallet::*; use scale_info::TypeInfo; use sp_runtime::{traits::BadOrigin, RuntimeDebug}; -use sp_std::{convert::TryFrom, prelude::*}; -use xcm::{ - latest::{ExecuteXcm, Outcome, Parent, Xcm}, - VersionedXcm, MAX_XCM_DECODE_DEPTH, -}; +use sp_std::prelude::*; +use xcm::latest::{ExecuteXcm, Outcome}; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -52,17 +45,7 @@ pub mod pallet { type XcmExecutor: ExecuteXcm; } - #[pallet::error] - pub enum Error {} - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet {} - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Downward message is invalid XCM. /// \[ id \] @@ -85,6 +68,9 @@ pub mod pallet { SiblingParachain(ParaId), } + #[pallet::call] + impl Pallet {} + impl From for Origin { fn from(id: ParaId) -> Origin { Origin::SiblingParachain(id) @@ -97,75 +83,6 @@ pub mod pallet { } } -/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages -/// immediately. Their origin is asserted to be the Parent location. -/// -/// The weight `limit` is only respected as the maximum for an individual message. -/// -/// Because this largely ignores the given weight limit, it probably isn't good for most production -/// uses. Use DmpQueue pallet for a more robust design. -pub struct UnlimitedDmpExecution(sp_std::marker::PhantomData); -impl DmpMessageHandler for UnlimitedDmpExecution { - fn handle_dmp_messages( - iter: impl Iterator)>, - limit: Weight, - ) -> Weight { - let mut used = Weight::zero(); - for (_sent_at, data) in iter { - let id = sp_io::hashing::blake2_256(&data[..]); - let msg = VersionedXcm::::decode_all_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data.as_slice(), - ) - .map(Xcm::::try_from); - match msg { - Err(_) => Pallet::::deposit_event(Event::InvalidFormat(id)), - Ok(Err(())) => Pallet::::deposit_event(Event::UnsupportedVersion(id)), - Ok(Ok(x)) => { - let outcome = T::XcmExecutor::execute_xcm(Parent, x, id, limit); - used = used.saturating_add(outcome.weight_used()); - Pallet::::deposit_event(Event::ExecutedDownward(id, outcome)); - }, - } - } - used - } -} - -/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages -/// immediately. Their origin is asserted to be the Parent location. -/// -/// This respects the given weight limit and silently drops messages if they would break it. It -/// probably isn't good for most production uses. Use DmpQueue pallet for a more robust design. -pub struct LimitAndDropDmpExecution(sp_std::marker::PhantomData); -impl DmpMessageHandler for LimitAndDropDmpExecution { - fn handle_dmp_messages( - iter: impl Iterator)>, - limit: Weight, - ) -> Weight { - let mut used = Weight::zero(); - for (_sent_at, data) in iter { - let id = sp_io::hashing::blake2_256(&data[..]); - let msg = VersionedXcm::::decode_all_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data.as_slice(), - ) - .map(Xcm::::try_from); - match msg { - Err(_) => Pallet::::deposit_event(Event::InvalidFormat(id)), - Ok(Err(())) => Pallet::::deposit_event(Event::UnsupportedVersion(id)), - Ok(Ok(x)) => { - let weight_limit = limit.saturating_sub(used); - let outcome = T::XcmExecutor::execute_xcm(Parent, x, id, weight_limit); - used = used.saturating_add(outcome.weight_used()); - Pallet::::deposit_event(Event::ExecutedDownward(id, outcome)); - }, - } - } - used - } -} - /// Ensure that the origin `o` represents a sibling parachain. /// Returns `Ok` with the parachain ID of the sibling or an `Err` otherwise. pub fn ensure_sibling_para(o: OuterOrigin) -> Result diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 5aab57da91b4f731567bd81d384cab646beb936d..fba006e7986f990d08d70d5cc23d266b0e642dc4 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -3,36 +3,45 @@ name = "cumulus-pallet-xcmp-queue" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Pallet to queue outbound and inbound XCMP messages." +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } log = { version = "0.4.20", default-features = false } -rand_chacha = { version = "0.3.0", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} +sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} sp-std = { path = "../../../substrate/primitives/std", default-features = false} +pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } +polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-primitives-core = { path = "../../primitives/core", default-features = false } # Optional import for benchmarking frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} +bounded-collections = { version = "0.1.4", default-features = false } + +# Bridges +bp-xcm-bridge-hub-router = { path = "../../../bridges/primitives/xcm-bridge-hub-router", default-features = false, optional = true } [dev-dependencies] # Substrate sp-core = { path = "../../../substrate/primitives/core" } pallet-balances = { path = "../../../substrate/frame/balances" } +frame-support = { path = "../../../substrate/frame/support", features = ["experimental"] } # Polkadot xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder" } @@ -43,14 +52,19 @@ cumulus-pallet-parachain-system = { path = "../parachain-system", features = ["p [features] default = [ "std" ] std = [ + "bounded-collections/std", + "bp-xcm-bridge-hub-router?/std", "codec/std", "cumulus-primitives-core/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", + "pallet-message-queue/std", "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", "scale-info/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", @@ -60,11 +74,14 @@ std = [ runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", @@ -74,6 +91,9 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", + "pallet-message-queue/try-runtime", "polkadot-runtime-common/try-runtime", + "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] +bridging = [ "bp-xcm-bridge-hub-router" ] diff --git a/cumulus/pallets/xcmp-queue/src/benchmarking.rs b/cumulus/pallets/xcmp-queue/src/benchmarking.rs index 17ec60a2f3fafe9d257074fc803c6567b4422c31..81dfbc2bb71ce7196fb7adfc95732d7c56a23d7b 100644 --- a/cumulus/pallets/xcmp-queue/src/benchmarking.rs +++ b/cumulus/pallets/xcmp-queue/src/benchmarking.rs @@ -17,12 +17,158 @@ use crate::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use codec::DecodeAll; +use frame_benchmarking::v2::*; +use frame_support::traits::Hooks; use frame_system::RawOrigin; +use xcm::v3::MAX_INSTRUCTIONS_TO_DECODE; -benchmarks! { - set_config_with_u32 {}: update_resume_threshold(RawOrigin::Root, 100) - set_config_with_weight {}: update_weight_restrict_decay(RawOrigin::Root, Weight::from_parts(3_000_000, 0)) -} +#[benchmarks] +mod benchmarks { + use super::*; + + /// Modify any of the `QueueConfig` fields with a new `u32` value. + /// + /// Used as weight for: + /// - update_suspend_threshold + /// - update_drop_threshold + /// - update_resume_threshold + #[benchmark] + fn set_config_with_u32() { + #[extrinsic_call] + Pallet::::update_resume_threshold(RawOrigin::Root, 1); + } + + #[benchmark] + fn enqueue_xcmp_message() { + assert!(QueueConfig::::get().drop_threshold * MaxXcmpMessageLenOf::::get() > 1000); + let msg = BoundedVec::>::default(); + + #[block] + { + Pallet::::enqueue_xcmp_message(0.into(), msg, &mut WeightMeter::new()).unwrap(); + } + } + + #[benchmark] + fn suspend_channel() { + let para = 123.into(); + let data = ChannelSignal::Suspend.encode(); + + #[block] + { + ChannelSignal::decode_all(&mut &data[..]).unwrap(); + Pallet::::suspend_channel(para); + } + + assert_eq!( + OutboundXcmpStatus::::get() + .iter() + .find(|p| p.recipient == para) + .unwrap() + .state, + OutboundState::Suspended + ); + } + + #[benchmark] + fn resume_channel() { + let para = 123.into(); + let data = ChannelSignal::Resume.encode(); + + Pallet::::suspend_channel(para); + + #[block] + { + ChannelSignal::decode_all(&mut &data[..]).unwrap(); + Pallet::::resume_channel(para); + } + + assert!( + OutboundXcmpStatus::::get().iter().find(|p| p.recipient == para).is_none(), + "No messages in the channel; therefore removed." + ); + } + + /// Split a singular XCM. + #[benchmark] + fn take_first_concatenated_xcm() { + let max_downward_message_size = MaxXcmpMessageLenOf::::get() as usize; + + assert!(MAX_INSTRUCTIONS_TO_DECODE as u32 > MAX_XCM_DECODE_DEPTH, "Preconditon failed"); + let max_instrs = MAX_INSTRUCTIONS_TO_DECODE as u32 - MAX_XCM_DECODE_DEPTH; + let mut xcm = Xcm::(vec![ClearOrigin; max_instrs as usize]); -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + xcm = Xcm::(vec![Instruction::SetAppendix(xcm)]); + } + + let data = VersionedXcm::::from(xcm).encode(); + assert!(data.len() < max_downward_message_size, "Page size is too small"); + // Verify that decoding works with the exact recursion limit: + VersionedXcm::::decode_with_depth_limit( + MAX_XCM_DECODE_DEPTH, + &mut &data[..], + ) + .unwrap(); + VersionedXcm::::decode_with_depth_limit( + MAX_XCM_DECODE_DEPTH - 1, + &mut &data[..], + ) + .unwrap_err(); + + #[block] + { + Pallet::::take_first_concatenated_xcm(&mut &data[..], &mut WeightMeter::new()) + .unwrap(); + } + } + + /// Benchmark the migration for a maximal sized message. + #[benchmark] + fn on_idle_good_msg() { + use migration::v3; + + let block = 5; + let para = ParaId::from(4); + let message = vec![123u8; MaxXcmpMessageLenOf::::get() as usize]; + let message_metadata = vec![(block, XcmpMessageFormat::ConcatenatedVersionedXcm)]; + + v3::InboundXcmpMessages::::insert(para, block, message); + v3::InboundXcmpStatus::::set(Some(vec![v3::InboundChannelDetails { + sender: para, + state: v3::InboundState::Ok, + message_metadata, + }])); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + } + + /// Benchmark the migration with a 64 KiB message that will not be possible to enqueue. + #[benchmark] + fn on_idle_large_msg() { + use migration::v3; + + let block = 5; + let para = ParaId::from(4); + let message = vec![123u8; 1 << 16]; // 64 KiB message + let message_metadata = vec![(block, XcmpMessageFormat::ConcatenatedVersionedXcm)]; + + v3::InboundXcmpMessages::::insert(para, block, message); + v3::InboundXcmpStatus::::set(Some(vec![v3::InboundChannelDetails { + sender: para, + state: v3::InboundState::Ok, + message_metadata, + }])); + + #[block] + { + Pallet::::on_idle(0u32.into(), Weight::MAX); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/cumulus/pallets/xcmp-queue/src/bridging.rs b/cumulus/pallets/xcmp-queue/src/bridging.rs new file mode 100644 index 0000000000000000000000000000000000000000..9db4b6e74c3987ae08a6d38eb75ffcacb9e5e713 --- /dev/null +++ b/cumulus/pallets/xcmp-queue/src/bridging.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet, OutboundState}; +use cumulus_primitives_core::ParaId; +use frame_support::pallet_prelude::Get; + +/// Adapter implementation for `bp_xcm_bridge_hub_router::XcmChannelStatusProvider` which checks +/// both `OutboundXcmpStatus` and `InboundXcmpStatus` for defined `ParaId` if any of those is +/// suspended. +pub struct InAndOutXcmpChannelStatusProvider( + sp_std::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, +); +impl, Runtime: crate::Config> + bp_xcm_bridge_hub_router::XcmChannelStatusProvider + for InAndOutXcmpChannelStatusProvider +{ + fn is_congested() -> bool { + // if the inbound channel with recipient is suspended, it means that we are unable to + // receive congestion reports from the bridge hub. So we assume the bridge pipeline is + // congested too + if pallet::Pallet::::is_inbound_channel_suspended(SiblingBridgeHubParaId::get()) { + return true + } + + // if the outbound channel with recipient is suspended, it means that one of further + // bridge queues (e.g. bridge queue between two bridge hubs) is overloaded, so we shall + // take larger fee for our outbound messages + OutXcmpChannelStatusProvider::::is_congested() + } +} + +/// Adapter implementation for `bp_xcm_bridge_hub_router::XcmChannelStatusProvider` which checks +/// only `OutboundXcmpStatus` for defined `SiblingParaId` if is suspended. +pub struct OutXcmpChannelStatusProvider( + sp_std::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, +); +impl, Runtime: crate::Config> + bp_xcm_bridge_hub_router::XcmChannelStatusProvider + for OutXcmpChannelStatusProvider +{ + fn is_congested() -> bool { + let sibling_bridge_hub_id: ParaId = SiblingBridgeHubParaId::get(); + + // let's find the channel's state with the sibling parachain, + let Some((outbound_state, queued_pages)) = + pallet::Pallet::::outbound_channel_state(sibling_bridge_hub_id) + else { + return false + }; + // suspended channel => it is congested + if outbound_state == OutboundState::Suspended { + return true + } + + // It takes some time for target parachain to suspend inbound channel with the target BH and + // during that we will keep accepting new message delivery transactions. Let's also reject + // new deliveries if there are too many "pages" (concatenated XCM messages) in the target BH + // -> target parachain queue. + + // If the outbound channel has at least `N` pages enqueued, let's assume it is congested. + // Normally, the chain with a few opened HRMP channels, will "send" pages at every block. + // Having `N` pages means that for last `N` blocks we either have not sent any messages, + // or have sent signals. + + const MAX_QUEUED_PAGES_BEFORE_DEACTIVATION: u16 = 4; + if queued_pages > MAX_QUEUED_PAGES_BEFORE_DEACTIVATION { + return true + } + + false + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn suspend_channel_for_benchmarks(target: ParaId) { + pallet::Pallet::::suspend_channel(target) +} diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 1cb92f59518697d184ca9a6eac407ccdef86f8f2..d687f83d8b3e36bb673bd8540117429880012cd7 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -22,6 +22,16 @@ //! Also provides an implementation of `SendXcm` which can be placed in a router tuple for relaying //! XCM over XCMP if the destination is `Parent/Parachain`. It requires an implementation of //! `XcmExecutor` for dispatching incoming XCM messages. +//! +//! To prevent out of memory errors on the `OutboundXcmpMessages` queue, an exponential fee factor +//! (`DeliveryFeeFactor`) is set, much like the one used in DMP. +//! The fee factor increases whenever the total size of messages in a particular channel passes a +//! threshold. This threshold is defined as a percentage of the maximum total size the channel can +//! have. More concretely, the threshold is `max_total_size` / `THRESHOLD_FACTOR`, where: +//! - `max_total_size` is the maximum size, in bytes, of the channel, not number of messages. +//! It is defined in the channel configuration. +//! - `THRESHOLD_FACTOR` just declares which percentage of the max size is the actual threshold. +//! If it's 2, then the threshold is half of the max size, if it's 4, it's a quarter, and so on. #![cfg_attr(not(feature = "std"), no_std)] @@ -35,26 +45,31 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +#[cfg(feature = "bridging")] +pub mod bridging; pub mod weights; pub use weights::WeightInfo; +use bounded_collections::BoundedBTreeSet; use codec::{Decode, DecodeLimit, Encode}; use cumulus_primitives_core::{ relay_chain::BlockNumber as RelayBlockNumber, ChannelStatus, GetChannelInfo, MessageSendError, ParaId, XcmpMessageFormat, XcmpMessageHandler, XcmpMessageSource, }; + use frame_support::{ - traits::{EnsureOrigin, Get}, - weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, -}; -use polkadot_runtime_common::xcm_sender::PriceForParachainDelivery; -use rand_chacha::{ - rand_core::{RngCore, SeedableRng}, - ChaChaRng, + defensive, defensive_assert, + traits::{EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, + weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight, WeightMeter}, + BoundedVec, }; +use pallet_message_queue::OnQueueChanged; +use polkadot_runtime_common::xcm_sender::PriceForMessageDelivery; +use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_core::MAX_POSSIBLE_ALLOCATION; +use sp_runtime::{FixedU128, RuntimeDebug, Saturating}; +use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedXcm, WrapVersion, MAX_XCM_DECODE_DEPTH}; use xcm_executor::traits::ConvertOrigin; @@ -62,20 +77,30 @@ pub use pallet::*; /// Index used to identify overweight XCMs. pub type OverweightIndex = u64; +/// The max length of an XCMP message. +pub type MaxXcmpMessageLenOf = + <::XcmpQueue as EnqueueMessage>::MaxMessageLen; const LOG_TARGET: &str = "xcmp_queue"; const DEFAULT_POV_SIZE: u64 = 64 * 1024; // 64 KB -// Maximum amount of messages to process per block. This is a temporary measure until we properly -// account for proof size weights. -const MAX_MESSAGES_PER_BLOCK: u8 = 10; -// Maximum amount of messages that can exist in the overweight queue at any given time. -const MAX_OVERWEIGHT_MESSAGES: u32 = 1000; +/// Constants related to delivery fee calculation +pub mod delivery_fee_constants { + use super::FixedU128; + + /// Fees will start increasing when queue is half full + pub const THRESHOLD_FACTOR: u32 = 2; + /// The base number the delivery fee factor gets multiplied by every time it is increased. + /// Also, the number it gets divided by when decreased. + pub const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); // 1.05 + /// The contribution of each KB to a fee factor increase + pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001 +} #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, Twox64Concat}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -87,17 +112,25 @@ pub mod pallet { pub trait Config: frame_system::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Something to execute an XCM message. We need this to service the XCMoXCMP queue. - type XcmExecutor: ExecuteXcm; - - /// Information on the avaialble XCMP channels. + /// Information on the available XCMP channels. type ChannelInfo: GetChannelInfo; /// Means of converting an `Xcm` into a `VersionedXcm`. type VersionWrapper: WrapVersion; - /// The origin that is allowed to execute overweight messages. - type ExecuteOverweightOrigin: EnsureOrigin; + /// Enqueue an inbound horizontal message for later processing. + /// + /// This defines the maximal message length via [`crate::MaxXcmpMessageLenOf`]. The pallet + /// assumes that this hook will eventually process all the pushed messages. + type XcmpQueue: EnqueueMessage; + + /// The maximum number of inbound XCMP channels that can be suspended simultaneously. + /// + /// Any further channel suspensions will fail and messages may get dropped without further + /// notice. Choosing a high value (1000) is okay; the trade-off that is described in + /// [`InboundXcmpSuspended`] still applies at that scale. + #[pallet::constant] + type MaxInboundSuspended: Get; /// The origin that is allowed to resume or suspend the XCMP queue. type ControllerOrigin: EnsureOrigin; @@ -107,58 +140,14 @@ pub mod pallet { type ControllerOriginConverter: ConvertOrigin; /// The price for delivering an XCM to a sibling parachain destination. - type PriceForSiblingDelivery: PriceForParachainDelivery; + type PriceForSiblingDelivery: PriceForMessageDelivery; /// The weight information of this pallet. type WeightInfo: WeightInfo; } - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_idle(_now: BlockNumberFor, max_weight: Weight) -> Weight { - // on_idle processes additional messages with any remaining block weight. - Self::service_xcmp_queue(max_weight) - } - } - #[pallet::call] impl Pallet { - /// Services a single overweight XCM. - /// - /// - `origin`: Must pass `ExecuteOverweightOrigin`. - /// - `index`: The index of the overweight XCM to service - /// - `weight_limit`: The amount of weight that XCM execution may take. - /// - /// Errors: - /// - `BadOverweightIndex`: XCM under `index` is not found in the `Overweight` storage map. - /// - `BadXcm`: XCM under `index` cannot be properly decoded into a valid XCM format. - /// - `WeightOverLimit`: XCM execution may use greater `weight_limit`. - /// - /// Events: - /// - `OverweightServiced`: On success. - #[pallet::call_index(0)] - #[pallet::weight((weight_limit.saturating_add(Weight::from_parts(1_000_000, 0)), DispatchClass::Operational))] - pub fn service_overweight( - origin: OriginFor, - index: OverweightIndex, - weight_limit: Weight, - ) -> DispatchResultWithPostInfo { - T::ExecuteOverweightOrigin::ensure_origin(origin)?; - - let (sender, sent_at, data) = - Overweight::::get(index).ok_or(Error::::BadOverweightIndex)?; - let xcm = VersionedXcm::::decode_all_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data.as_slice(), - ) - .map_err(|_| Error::::BadXcm)?; - let used = Self::handle_xcm_message(sender, sent_at, xcm, weight_limit) - .map_err(|_| Error::::WeightOverLimit)?; - Overweight::::remove(index); - Self::deposit_event(Event::OverweightServiced { index, used }); - Ok(Some(used.saturating_add(Weight::from_parts(1_000_000, 0))).into()) - } - /// Suspends all XCM executions for the XCMP queue, regardless of the sender's origin. /// /// - `origin`: Must pass `ControllerOrigin`. @@ -167,9 +156,14 @@ pub mod pallet { pub fn suspend_xcm_execution(origin: OriginFor) -> DispatchResult { T::ControllerOrigin::ensure_origin(origin)?; - QueueSuspended::::put(true); - - Ok(()) + QueueSuspended::::try_mutate(|suspended| { + if *suspended { + Err(Error::::AlreadySuspended.into()) + } else { + *suspended = true; + Ok(()) + } + }) } /// Resumes all XCM executions for the XCMP queue. @@ -182,13 +176,18 @@ pub mod pallet { pub fn resume_xcm_execution(origin: OriginFor) -> DispatchResult { T::ControllerOrigin::ensure_origin(origin)?; - QueueSuspended::::put(false); - - Ok(()) + QueueSuspended::::try_mutate(|suspended| { + if !*suspended { + Err(Error::::AlreadyResumed.into()) + } else { + *suspended = false; + Ok(()) + } + }) } - /// Overwrites the number of pages of messages which must be in the queue for the other side - /// to be told to suspend their sending. + /// Overwrites the number of pages which must be in the queue for the other side to be + /// told to suspend their sending. /// /// - `origin`: Must pass `Root`. /// - `new`: Desired value for `QueueConfigData.suspend_value` @@ -196,13 +195,15 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::set_config_with_u32(), DispatchClass::Operational,))] pub fn update_suspend_threshold(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; - QueueConfig::::mutate(|data| data.suspend_threshold = new); - Ok(()) + QueueConfig::::try_mutate(|data| { + data.suspend_threshold = new; + data.validate::() + }) } - /// Overwrites the number of pages of messages which must be in the queue after which we - /// drop any further messages from the channel. + /// Overwrites the number of pages which must be in the queue after which we drop any + /// further messages from the channel. /// /// - `origin`: Must pass `Root`. /// - `new`: Desired value for `QueueConfigData.drop_threshold` @@ -210,13 +211,15 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::set_config_with_u32(),DispatchClass::Operational,))] pub fn update_drop_threshold(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; - QueueConfig::::mutate(|data| data.drop_threshold = new); - Ok(()) + QueueConfig::::try_mutate(|data| { + data.drop_threshold = new; + data.validate::() + }) } - /// Overwrites the number of pages of messages which the queue must be reduced to before it - /// signals that message sending may recommence after it has been suspended. + /// Overwrites the number of pages which the queue must be reduced to before it signals + /// that message sending may recommence after it has been suspended. /// /// - `origin`: Must pass `Root`. /// - `new`: Desired value for `QueueConfigData.resume_threshold` @@ -224,112 +227,68 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::set_config_with_u32(), DispatchClass::Operational,))] pub fn update_resume_threshold(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; - QueueConfig::::mutate(|data| data.resume_threshold = new); - Ok(()) + QueueConfig::::try_mutate(|data| { + data.resume_threshold = new; + data.validate::() + }) } + } - /// Overwrites the amount of remaining weight under which we stop processing messages. - /// - /// - `origin`: Must pass `Root`. - /// - `new`: Desired value for `QueueConfigData.threshold_weight` - #[pallet::call_index(6)] - #[pallet::weight((T::WeightInfo::set_config_with_weight(), DispatchClass::Operational,))] - pub fn update_threshold_weight(origin: OriginFor, new: Weight) -> DispatchResult { - ensure_root(origin)?; - QueueConfig::::mutate(|data| data.threshold_weight = new); - - Ok(()) + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + let w = Self::on_idle_weight(); + assert!(w != Weight::zero()); + assert!(w.all_lte(T::BlockWeights::get().max_block)); } - /// Overwrites the speed to which the available weight approaches the maximum weight. - /// A lower number results in a faster progression. A value of 1 makes the entire weight - /// available initially. - /// - /// - `origin`: Must pass `Root`. - /// - `new`: Desired value for `QueueConfigData.weight_restrict_decay`. - #[pallet::call_index(7)] - #[pallet::weight((T::WeightInfo::set_config_with_weight(), DispatchClass::Operational,))] - pub fn update_weight_restrict_decay(origin: OriginFor, new: Weight) -> DispatchResult { - ensure_root(origin)?; - QueueConfig::::mutate(|data| data.weight_restrict_decay = new); + fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { + let mut meter = WeightMeter::with_limit(limit); - Ok(()) - } + if meter.try_consume(Self::on_idle_weight()).is_err() { + log::debug!( + "Not enough weight for on_idle. {} < {}", + Self::on_idle_weight(), + limit + ); + return meter.consumed() + } - /// Overwrite the maximum amount of weight any individual message may consume. - /// Messages above this weight go into the overweight queue and may only be serviced - /// explicitly. - /// - /// - `origin`: Must pass `Root`. - /// - `new`: Desired value for `QueueConfigData.xcmp_max_individual_weight`. - #[pallet::call_index(8)] - #[pallet::weight((T::WeightInfo::set_config_with_weight(), DispatchClass::Operational,))] - pub fn update_xcmp_max_individual_weight( - origin: OriginFor, - new: Weight, - ) -> DispatchResult { - ensure_root(origin)?; - QueueConfig::::mutate(|data| data.xcmp_max_individual_weight = new); + migration::lazy_migrate_inbound_queue::(); - Ok(()) + meter.consumed() } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Some XCM was executed ok. - Success { message_hash: XcmHash, message_id: XcmHash, weight: Weight }, - /// Some XCM failed. - Fail { message_hash: XcmHash, message_id: XcmHash, error: XcmError, weight: Weight }, - /// Bad XCM version used. - BadVersion { message_hash: XcmHash }, - /// Bad XCM format used. - BadFormat { message_hash: XcmHash }, /// An HRMP message was sent to a sibling parachain. XcmpMessageSent { message_hash: XcmHash }, - /// An XCM exceeded the individual message weight budget. - OverweightEnqueued { - sender: ParaId, - sent_at: RelayBlockNumber, - index: OverweightIndex, - required: Weight, - }, - /// An XCM from the overweight queue was executed with the given actual weight used. - OverweightServiced { index: OverweightIndex, used: Weight }, } #[pallet::error] pub enum Error { - /// Failed to send XCM message. - FailedToSend, - /// Bad XCM origin. - BadXcmOrigin, - /// Bad XCM data. - BadXcm, - /// Bad overweight index. - BadOverweightIndex, - /// Provided weight is possibly not enough to execute the message. - WeightOverLimit, + /// Setting the queue config failed since one of its values was invalid. + BadQueueConfig, + /// The execution is already suspended. + AlreadySuspended, + /// The execution is already resumed. + AlreadyResumed, } - /// Status of the inbound XCMP channels. - #[pallet::storage] - pub(super) type InboundXcmpStatus = - StorageValue<_, Vec, ValueQuery>; - - /// Inbound aggregate XCMP messages. It can only be one per ParaId/block. + /// The suspended inbound XCMP channels. All others are not suspended. + /// + /// This is a `StorageValue` instead of a `StorageMap` since we expect multiple reads per block + /// to different keys with a one byte payload. The access to `BoundedBTreeSet` will be cached + /// within the block and therefore only included once in the proof size. + /// + /// NOTE: The PoV benchmarking cannot know this and will over-estimate, but the actual proof + /// will be smaller. #[pallet::storage] - pub(super) type InboundXcmpMessages = StorageDoubleMap< - _, - Blake2_128Concat, - ParaId, - Twox64Concat, - RelayBlockNumber, - Vec, - ValueQuery, - >; + pub type InboundXcmpSuspended = + StorageValue<_, BoundedBTreeSet, ValueQuery>; /// The non-empty XCMP channels in order of becoming non-empty, and the index of the first /// and last outbound message. If the two indices are equal, then it indicates an empty @@ -356,28 +315,20 @@ pub mod pallet { #[pallet::storage] pub(super) type QueueConfig = StorageValue<_, QueueConfigData, ValueQuery>; - /// The messages that exceeded max individual message weight budget. - /// - /// These message stay in this storage map until they are manually dispatched via - /// `service_overweight`. - #[pallet::storage] - pub(super) type Overweight = - CountedStorageMap<_, Twox64Concat, OverweightIndex, (ParaId, RelayBlockNumber, Vec)>; - - /// The number of overweight messages ever recorded in `Overweight`. Also doubles as the next - /// available free overweight index. - #[pallet::storage] - pub(super) type OverweightCount = StorageValue<_, OverweightIndex, ValueQuery>; - /// Whether or not the XCMP queue is suspended from executing incoming XCMs or not. #[pallet::storage] pub(super) type QueueSuspended = StorageValue<_, bool, ValueQuery>; -} -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum InboundState { - Ok, - Suspended, + /// Initialization value for the DeliveryFee factor. + #[pallet::type_value] + pub fn InitialFactor() -> FixedU128 { + FixedU128::from_u32(1) + } + + /// The factor to multiply the base delivery fee by. + #[pallet::storage] + pub(super) type DeliveryFeeFactor = + StorageMap<_, Twox64Concat, ParaId, FixedU128, ValueQuery, InitialFactor>; } #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] @@ -386,22 +337,9 @@ pub enum OutboundState { Suspended, } -/// Struct containing detailed information about the inbound channel. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo)] -pub struct InboundChannelDetails { - /// The `ParaId` of the parachain that this channel is connected with. - sender: ParaId, - /// The state of the channel. - state: InboundState, - /// The ordered metadata of each inbound message. - /// - /// Contains info about the relay block number that the message was sent at, and the format - /// of the incoming message. - message_metadata: Vec<(RelayBlockNumber, XcmpMessageFormat)>, -} - /// Struct containing detailed information about the outbound channel. #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(Debug))] pub struct OutboundChannelDetails { /// The `ParaId` of the parachain that this channel is connected with. recipient: ParaId, @@ -439,31 +377,40 @@ impl OutboundChannelDetails { #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct QueueConfigData { - /// The number of pages of messages which must be in the queue for the other side to be told to - /// suspend their sending. + /// The number of pages which must be in the queue for the other side to be told to suspend + /// their sending. suspend_threshold: u32, - /// The number of pages of messages which must be in the queue after which we drop any further - /// messages from the channel. + /// The number of pages which must be in the queue after which we drop any further messages + /// from the channel. This should normally not happen since the `suspend_threshold` can be used + /// to suspend the channel. drop_threshold: u32, - /// The number of pages of messages which the queue must be reduced to before it signals that + /// The number of pages which the queue must be reduced to before it signals that /// message sending may recommence after it has been suspended. resume_threshold: u32, - /// The amount of remaining weight under which we stop processing messages. + /// UNUSED - The amount of remaining weight under which we stop processing messages. + #[deprecated(note = "Will be removed")] threshold_weight: Weight, - /// The speed to which the available weight approaches the maximum weight. A lower number - /// results in a faster progression. A value of 1 makes the entire weight available initially. + /// UNUSED - The speed to which the available weight approaches the maximum weight. A lower + /// number results in a faster progression. A value of 1 makes the entire weight available + /// initially. + #[deprecated(note = "Will be removed")] weight_restrict_decay: Weight, - /// The maximum amount of weight any individual message may consume. Messages above this weight - /// go into the overweight queue and may only be serviced explicitly. + /// UNUSED - The maximum amount of weight any individual message may consume. Messages above + /// this weight go into the overweight queue and may only be serviced explicitly. + #[deprecated(note = "Will be removed")] xcmp_max_individual_weight: Weight, } impl Default for QueueConfigData { fn default() -> Self { + // NOTE that these default values are only used on genesis. They should give a rough idea of + // what to set these values to, but is in no way a requirement. + #![allow(deprecated)] Self { - suspend_threshold: 2, - drop_threshold: 5, - resume_threshold: 1, + drop_threshold: 48, // 64KiB * 48 = 3MiB + suspend_threshold: 32, // 64KiB * 32 = 2MiB + resume_threshold: 8, // 64KiB * 8 = 512KiB + // unused: threshold_weight: Weight::from_parts(100_000, 0), weight_restrict_decay: Weight::from_parts(2, 0), xcmp_max_individual_weight: Weight::from_parts( @@ -474,6 +421,22 @@ impl Default for QueueConfigData { } } +impl QueueConfigData { + /// Validate all assumptions about `Self`. + /// + /// Should be called prior to accepting this as new config. + pub fn validate(&self) -> sp_runtime::DispatchResult { + if self.resume_threshold < self.suspend_threshold && + self.suspend_threshold <= self.drop_threshold && + self.resume_threshold > 0 + { + Ok(()) + } else { + Err(Error::::BadQueueConfig.into()) + } + } +} + #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, TypeInfo)] pub enum ChannelSignal { Suspend, @@ -501,61 +464,96 @@ impl Pallet { /// length prefixed and can thus decode each fragment from the aggregate stream. With this, /// we can concatenate them into a single aggregate blob without needing to be concerned /// about encoding fragment boundaries. + /// + /// If successful, returns the number of pages in the outbound queue after enqueuing the new + /// fragment. fn send_fragment( recipient: ParaId, format: XcmpMessageFormat, fragment: Fragment, ) -> Result { - let data = fragment.encode(); - - // Optimization note: `max_message_size` could potentially be stored in - // `OutboundXcmpMessages` once known; that way it's only accessed when a new page is needed. + let encoded_fragment = fragment.encode(); - let max_message_size = - T::ChannelInfo::get_channel_max(recipient).ok_or(MessageSendError::NoChannel)?; - if data.len() > max_message_size { + let channel_info = + T::ChannelInfo::get_channel_info(recipient).ok_or(MessageSendError::NoChannel)?; + let max_message_size = channel_info.max_message_size as usize; + // Max message size refers to aggregates, or pages. Not to individual fragments. + if encoded_fragment.len() > max_message_size { return Err(MessageSendError::TooBig) } - let mut s = >::get(); - let details = if let Some(details) = s.iter_mut().find(|item| item.recipient == recipient) { + let mut all_channels = >::get(); + let channel_details = if let Some(details) = + all_channels.iter_mut().find(|channel| channel.recipient == recipient) + { details } else { - s.push(OutboundChannelDetails::new(recipient)); - s.last_mut().expect("can't be empty; a new element was just pushed; qed") + all_channels.push(OutboundChannelDetails::new(recipient)); + all_channels + .last_mut() + .expect("can't be empty; a new element was just pushed; qed") }; - let have_active = details.last_index > details.first_index; - let appended = have_active && - >::mutate(recipient, details.last_index - 1, |s| { - if XcmpMessageFormat::decode_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut &s[..]) != - Ok(format) - { - return false - } - if s.len() + data.len() > max_message_size { - return false - } - s.extend_from_slice(&data[..]); - true - }); - if appended { - Ok((details.last_index - details.first_index - 1) as u32) + let have_active = channel_details.last_index > channel_details.first_index; + // Try to append fragment to the last page, if there is enough space. + // We return the size of the last page inside of the option, to not calculate it again. + let appended_to_last_page = have_active + .then(|| { + >::mutate( + recipient, + channel_details.last_index - 1, + |page| { + if XcmpMessageFormat::decode_with_depth_limit( + MAX_XCM_DECODE_DEPTH, + &mut &page[..], + ) != Ok(format) + { + defensive!("Bad format in outbound queue; dropping message"); + return None + } + if page.len() + encoded_fragment.len() > max_message_size { + return None + } + page.extend_from_slice(&encoded_fragment[..]); + Some(page.len()) + }, + ) + }) + .flatten(); + + let (number_of_pages, last_page_size) = if let Some(size) = appended_to_last_page { + let number_of_pages = (channel_details.last_index - channel_details.first_index) as u32; + (number_of_pages, size) } else { // Need to add a new page. - let page_index = details.last_index; - details.last_index += 1; + let page_index = channel_details.last_index; + channel_details.last_index += 1; let mut new_page = format.encode(); - new_page.extend_from_slice(&data[..]); + new_page.extend_from_slice(&encoded_fragment[..]); + let last_page_size = new_page.len(); + let number_of_pages = (channel_details.last_index - channel_details.first_index) as u32; >::insert(recipient, page_index, new_page); - let r = (details.last_index - details.first_index - 1) as u32; - >::put(s); - Ok(r) + >::put(all_channels); + (number_of_pages, last_page_size) + }; + + // We have to count the total size here since `channel_info.total_size` is not updated at + // this point in time. We assume all previous pages are filled, which, in practice, is not + // always the case. + let total_size = + number_of_pages.saturating_sub(1) * max_message_size as u32 + last_page_size as u32; + let threshold = channel_info.max_total_size / delivery_fee_constants::THRESHOLD_FACTOR; + if total_size > threshold { + let message_size_factor = FixedU128::from((encoded_fragment.len() / 1024) as u128) + .saturating_mul(delivery_fee_constants::MESSAGE_SIZE_FEE_BASE); + Self::increase_fee_factor(recipient, message_size_factor); } + + Ok(number_of_pages) } /// Sends a signal to the `dest` chain over XCMP. This is guaranteed to be dispatched on this /// block. - fn send_signal(dest: ParaId, signal: ChannelSignal) -> Result<(), ()> { + fn send_signal(dest: ParaId, signal: ChannelSignal) { let mut s = >::get(); if let Some(details) = s.iter_mut().find(|item| item.recipient == dest) { details.signals_exist = true; @@ -563,365 +561,16 @@ impl Pallet { s.push(OutboundChannelDetails::new(dest).with_signals()); } >::mutate(dest, |page| { - if page.is_empty() { - *page = (XcmpMessageFormat::Signals, signal).encode(); - } else { - signal.using_encoded(|s| page.extend_from_slice(s)); - } + *page = (XcmpMessageFormat::Signals, signal).encode(); }); >::put(s); - - Ok(()) - } - - pub fn send_blob_message(recipient: ParaId, blob: Vec) -> Result { - Self::send_fragment(recipient, XcmpMessageFormat::ConcatenatedEncodedBlob, blob) - } - - pub fn send_xcm_message( - recipient: ParaId, - xcm: VersionedXcm<()>, - ) -> Result { - Self::send_fragment(recipient, XcmpMessageFormat::ConcatenatedVersionedXcm, xcm) - } - - fn create_shuffle(len: usize) -> Vec { - // Create a shuffled order for use to iterate through. - // Not a great random seed, but good enough for our purposes. - let seed = frame_system::Pallet::::parent_hash(); - let seed = - <[u8; 32]>::decode(&mut sp_runtime::traits::TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed"); - let mut rng = ChaChaRng::from_seed(seed); - let mut shuffled = (0..len).collect::>(); - for i in 0..len { - let j = (rng.next_u32() as usize) % len; - shuffled.as_mut_slice().swap(i, j); - } - shuffled - } - - fn handle_blob_message( - _sender: ParaId, - _sent_at: RelayBlockNumber, - _blob: Vec, - _weight_limit: Weight, - ) -> Result { - debug_assert!(false, "Blob messages not handled."); - Err(false) - } - - fn handle_xcm_message( - sender: ParaId, - _sent_at: RelayBlockNumber, - xcm: VersionedXcm, - max_weight: Weight, - ) -> Result { - let message_hash = xcm.using_encoded(sp_io::hashing::blake2_256); - log::debug!("Processing XCMP-XCM: {:?}", &message_hash); - let (result, event) = match Xcm::::try_from(xcm) { - Ok(xcm) => { - let location = (Parent, Parachain(sender.into())); - let mut message_id = message_hash; - - match T::XcmExecutor::prepare_and_execute( - location, - xcm, - &mut message_id, - max_weight, - Weight::zero(), - ) { - Outcome::Error(error) => ( - Err(error), - Event::Fail { message_hash, message_id, error, weight: Weight::zero() }, - ), - Outcome::Complete(weight) => - (Ok(weight), Event::Success { message_hash, message_id, weight }), - // As far as the caller is concerned, this was dispatched without error, so - // we just report the weight used. - Outcome::Incomplete(weight, error) => - (Ok(weight), Event::Fail { message_hash, message_id, error, weight }), - } - }, - Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion { message_hash }), - }; - Self::deposit_event(event); - result - } - - fn process_xcmp_message( - sender: ParaId, - (sent_at, format): (RelayBlockNumber, XcmpMessageFormat), - messages_processed: &mut u8, - max_weight: Weight, - max_individual_weight: Weight, - ) -> (Weight, bool) { - let data = >::get(sender, sent_at); - let mut last_remaining_fragments; - let mut remaining_fragments = &data[..]; - let mut weight_used = Weight::zero(); - match format { - XcmpMessageFormat::ConcatenatedVersionedXcm => { - while !remaining_fragments.is_empty() && - *messages_processed < MAX_MESSAGES_PER_BLOCK - { - last_remaining_fragments = remaining_fragments; - if let Ok(xcm) = VersionedXcm::::decode_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut remaining_fragments, - ) { - let weight = max_weight - weight_used; - *messages_processed += 1; - match Self::handle_xcm_message(sender, sent_at, xcm, weight) { - Ok(used) => weight_used = weight_used.saturating_add(used), - Err(XcmError::WeightLimitReached(required)) - if required.any_gt(max_individual_weight) => - { - let is_under_limit = - Overweight::::count() < MAX_OVERWEIGHT_MESSAGES; - weight_used.saturating_accrue(T::DbWeight::get().reads(1)); - if is_under_limit { - // overweight - add to overweight queue and continue with - // message execution consuming the message. - let msg_len = last_remaining_fragments - .len() - .saturating_sub(remaining_fragments.len()); - let overweight_xcm = - last_remaining_fragments[..msg_len].to_vec(); - let index = - Self::stash_overweight(sender, sent_at, overweight_xcm); - let e = Event::OverweightEnqueued { - sender, - sent_at, - index, - required, - }; - Self::deposit_event(e); - } - }, - Err(XcmError::WeightLimitReached(required)) - if required.all_lte(max_weight) => - { - // That message didn't get processed this time because of being - // too heavy. We leave it around for next time and bail. - remaining_fragments = last_remaining_fragments; - break - }, - Err(error) => { - log::error!( - "Failed to process XCMP-XCM message, caused by {:?}", - error - ); - // Message looks invalid; don't attempt to retry - }, - } - } else { - debug_assert!(false, "Invalid incoming XCMP message data"); - remaining_fragments = &b""[..]; - } - } - }, - XcmpMessageFormat::ConcatenatedEncodedBlob => { - while !remaining_fragments.is_empty() { - last_remaining_fragments = remaining_fragments; - - if let Ok(blob) = >::decode(&mut remaining_fragments) { - let weight = max_weight - weight_used; - *messages_processed += 1; - match Self::handle_blob_message(sender, sent_at, blob, weight) { - Ok(used) => weight_used = weight_used.saturating_add(used), - Err(true) => { - // That message didn't get processed this time because of being - // too heavy. We leave it around for next time and bail. - remaining_fragments = last_remaining_fragments; - break - }, - Err(false) => { - // Message invalid; don't attempt to retry - }, - } - } else { - debug_assert!(false, "Invalid incoming blob message data"); - remaining_fragments = &b""[..]; - } - } - }, - XcmpMessageFormat::Signals => { - debug_assert!(false, "All signals are handled immediately; qed"); - remaining_fragments = &b""[..]; - }, - } - let is_empty = remaining_fragments.is_empty(); - if is_empty { - >::remove(sender, sent_at); - } else { - >::insert(sender, sent_at, remaining_fragments); - } - (weight_used, is_empty) - } - - /// Puts a given XCM into the list of overweight messages, allowing it to be executed later. - fn stash_overweight( - sender: ParaId, - sent_at: RelayBlockNumber, - xcm: Vec, - ) -> OverweightIndex { - let index = OverweightCount::::mutate(|count| { - let index = *count; - *count += 1; - index - }); - - Overweight::::insert(index, (sender, sent_at, xcm)); - index - } - - /// Service the incoming XCMP message queue attempting to execute up to `max_weight` execution - /// weight of messages. - /// - /// Channels are first shuffled and then processed in this random one page at a time, order over - /// and over until either `max_weight` is exhausted or no channel has messages that can be - /// processed any more. - /// - /// There are two obvious "modes" that we could apportion `max_weight`: one would be to attempt - /// to spend it all on the first channel's first page, then use the leftover (if any) for the - /// second channel's first page and so on until finally we cycle back and the process messages - /// on the first channel's second page &c. The other mode would be to apportion only `1/N` of - /// `max_weight` for the first page (where `N` could be, perhaps, the number of channels to - /// service, using the remainder plus the next `1/N` for the next channel's page &c. - /// - /// Both modes have good qualities, the first ensures that a channel with a large message (over - /// `1/N` does not get indefinitely blocked if other channels have continuous, light traffic. - /// The second is fairer, and ensures that channels with continuous light messages don't suffer - /// high latency. - /// - /// The following code is a hybrid solution; we have a concept of `weight_available` which - /// incrementally approaches `max_weight` as more channels are attempted to be processed. We use - /// the parameter `weight_restrict_decay` to control the speed with which `weight_available` - /// approaches `max_weight`, with `0` being strictly equivalent to the first aforementioned - /// mode, and `N` approximating the second. A reasonable parameter may be `1`, which makes - /// half of the `max_weight` available for the first page, then a quarter plus the remainder - /// for the second &c. though empirical and or practical factors may give rise to adjusting it - /// further. - fn service_xcmp_queue(max_weight: Weight) -> Weight { - let suspended = QueueSuspended::::get(); - let mut messages_processed = 0; - - let mut status = >::get(); // <- sorted. - if status.is_empty() { - return Weight::zero() - } - - let QueueConfigData { - resume_threshold, - threshold_weight, - weight_restrict_decay, - xcmp_max_individual_weight, - .. - } = >::get(); - - let mut shuffled = Self::create_shuffle(status.len()); - let mut weight_used = Weight::zero(); - let mut weight_available = Weight::zero(); - - // We don't want the possibility of a chain sending a series of really heavy messages and - // tying up the block's execution time from other chains. Therefore we execute any remaining - // messages in a random order. - // Order within a single channel will always be preserved, however this does mean that - // relative order between channels may not. The result is that chains which tend to send - // fewer, lighter messages will generally have a lower latency than chains which tend to - // send more, heavier messages. - - let mut shuffle_index = 0; - while shuffle_index < shuffled.len() && - max_weight.saturating_sub(weight_used).all_gte(threshold_weight) && - messages_processed < MAX_MESSAGES_PER_BLOCK - { - let index = shuffled[shuffle_index]; - let sender = status[index].sender; - let sender_origin = T::ControllerOriginConverter::convert_origin( - (Parent, Parachain(sender.into())), - OriginKind::Superuser, - ); - let is_controller = sender_origin - .map_or(false, |origin| T::ControllerOrigin::try_origin(origin).is_ok()); - - if suspended && !is_controller { - shuffle_index += 1; - continue - } - - if weight_available != max_weight { - // Get incrementally closer to freeing up max_weight for message execution over the - // first round. For the second round we unlock all weight. If we come close enough - // on the first round to unlocking everything, then we do so. - if shuffle_index < status.len() { - weight_available += - (max_weight - weight_available) / (weight_restrict_decay.ref_time() + 1); - if (weight_available + threshold_weight).any_gt(max_weight) { - weight_available = max_weight; - } - } else { - weight_available = max_weight; - } - } - - let weight_processed = if status[index].message_metadata.is_empty() { - debug_assert!(false, "channel exists in status; there must be messages; qed"); - Weight::zero() - } else { - // Process up to one block's worth for now. - let weight_remaining = weight_available.saturating_sub(weight_used); - let (weight_processed, is_empty) = Self::process_xcmp_message( - sender, - status[index].message_metadata[0], - &mut messages_processed, - weight_remaining, - xcmp_max_individual_weight, - ); - if is_empty { - status[index].message_metadata.remove(0); - } - weight_processed - }; - weight_used += weight_processed; - - if status[index].message_metadata.len() as u32 <= resume_threshold && - status[index].state == InboundState::Suspended - { - // Resume - let r = Self::send_signal(sender, ChannelSignal::Resume); - debug_assert!(r.is_ok(), "WARNING: Failed sending resume into suspended channel"); - status[index].state = InboundState::Ok; - } - - // If there are more and we're making progress, we process them after we've given the - // other channels a look in. If we've still not unlocked all weight, then we set them - // up for processing a second time anyway. - if !status[index].message_metadata.is_empty() && - (weight_processed.any_gt(Weight::zero()) || weight_available != max_weight) - { - if shuffle_index + 1 == shuffled.len() { - // Only this queue left. Just run around this loop once more. - continue - } - shuffled.push(index); - } - shuffle_index += 1; - } - - // Only retain the senders that have non-empty queues. - status.retain(|item| !item.message_metadata.is_empty()); - - >::put(status); - weight_used } fn suspend_channel(target: ParaId) { >::mutate(|s| { if let Some(details) = s.iter_mut().find(|item| item.recipient == target) { let ok = details.state == OutboundState::Ok; - debug_assert!(ok, "WARNING: Attempt to suspend channel that was not Ok."); + defensive_assert!(ok, "WARNING: Attempt to suspend channel that was not Ok."); details.state = OutboundState::Suspended; } else { s.push(OutboundChannelDetails::new(target).with_suspended_state()); @@ -933,7 +582,7 @@ impl Pallet { >::mutate(|s| { if let Some(index) = s.iter().position(|item| item.recipient == target) { let suspended = s[index].state == OutboundState::Suspended; - debug_assert!( + defensive_assert!( suspended, "WARNING: Attempt to resume channel that was not suspended." ); @@ -943,10 +592,120 @@ impl Pallet { s[index].state = OutboundState::Ok; } } else { - debug_assert!(false, "WARNING: Attempt to resume channel that was not suspended."); + defensive!("WARNING: Attempt to resume channel that was not suspended."); } }); } + + fn enqueue_xcmp_message( + sender: ParaId, + xcm: BoundedVec>, + meter: &mut WeightMeter, + ) -> Result<(), ()> { + if meter.try_consume(T::WeightInfo::enqueue_xcmp_message()).is_err() { + defensive!("Out of weight: cannot enqueue XCMP messages; dropping msg"); + return Err(()) + } + + let QueueConfigData { drop_threshold, .. } = >::get(); + let fp = T::XcmpQueue::footprint(sender); + // Assume that it will not fit into the current page: + let new_pages = fp.pages.saturating_add(1); + if new_pages > drop_threshold { + // This should not happen since the channel should have been suspended in + // [`on_queue_changed`]. + log::error!("XCMP queue for sibling {:?} is full; dropping messages.", sender); + return Err(()) + } + + T::XcmpQueue::enqueue_message(xcm.as_bounded_slice(), sender); + Ok(()) + } + + /// Split concatenated encoded `VersionedXcm`s or `MaybeDoubleEncodedVersionedXcm`s into + /// individual items. + /// + /// We directly encode them again since that is needed later on. + pub(crate) fn take_first_concatenated_xcm( + data: &mut &[u8], + meter: &mut WeightMeter, + ) -> Result>, ()> { + if data.is_empty() { + return Err(()) + } + + if meter.try_consume(T::WeightInfo::take_first_concatenated_xcm()).is_err() { + defensive!("Out of weight; could not decode all; dropping"); + return Err(()) + } + + let xcm = VersionedXcm::<()>::decode_with_depth_limit(MAX_XCM_DECODE_DEPTH, data) + .map_err(|_| ())?; + xcm.encode().try_into().map_err(|_| ()) + } + + /// The worst-case weight of `on_idle`. + pub fn on_idle_weight() -> Weight { + ::WeightInfo::on_idle_good_msg() + .max(::WeightInfo::on_idle_large_msg()) + } + + #[cfg(feature = "bridging")] + fn is_inbound_channel_suspended(sender: ParaId) -> bool { + >::get().iter().any(|c| c == &sender) + } + + #[cfg(feature = "bridging")] + /// Returns tuple of `OutboundState` and number of queued pages. + fn outbound_channel_state(target: ParaId) -> Option<(OutboundState, u16)> { + >::get().iter().find(|c| c.recipient == target).map(|c| { + let queued_pages = c.last_index.saturating_sub(c.first_index); + (c.state, queued_pages) + }) + } +} + +impl OnQueueChanged for Pallet { + // Suspends/Resumes the queue when certain thresholds are reached. + fn on_queue_changed(para: ParaId, fp: QueueFootprint) { + let QueueConfigData { resume_threshold, suspend_threshold, .. } = >::get(); + + let mut suspended_channels = >::get(); + let suspended = suspended_channels.contains(¶); + + if suspended && fp.pages <= resume_threshold { + Self::send_signal(para, ChannelSignal::Resume); + + suspended_channels.remove(¶); + >::put(suspended_channels); + } else if !suspended && fp.pages >= suspend_threshold { + log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); + Self::send_signal(para, ChannelSignal::Suspend); + + if let Err(err) = suspended_channels.try_insert(para) { + log::error!("Too many channels suspended; cannot suspend sibling {:?}: {:?}; further messages may be dropped.", para, err); + } + >::put(suspended_channels); + } + } +} + +impl QueuePausedQuery for Pallet { + fn is_paused(para: &ParaId) -> bool { + if !QueueSuspended::::get() { + return false + } + + // Make an exception for the superuser queue: + let sender_origin = T::ControllerOriginConverter::convert_origin( + (Parent, Parachain((*para).into())), + OriginKind::Superuser, + ); + let is_controller = + sender_origin.map_or(false, |origin| T::ControllerOrigin::try_origin(origin).is_ok()); + + !is_controller + } } impl XcmpMessageHandler for Pallet { @@ -954,73 +713,64 @@ impl XcmpMessageHandler for Pallet { iter: I, max_weight: Weight, ) -> Weight { - let mut status = >::get(); + let mut meter = WeightMeter::with_limit(max_weight); - let QueueConfigData { suspend_threshold, drop_threshold, .. } = >::get(); - - for (sender, sent_at, data) in iter { - // Figure out the message format. - let mut data_ref = data; - let format = match XcmpMessageFormat::decode_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data_ref, - ) { + for (sender, _sent_at, mut data) in iter { + let format = match XcmpMessageFormat::decode(&mut data) { Ok(f) => f, Err(_) => { - debug_assert!(false, "Unknown XCMP message format. Silently dropping message"); + defensive!("Unknown XCMP message format - dropping"); continue }, }; - if format == XcmpMessageFormat::Signals { - while !data_ref.is_empty() { - use ChannelSignal::*; - match ChannelSignal::decode(&mut data_ref) { - Ok(Suspend) => Self::suspend_channel(sender), - Ok(Resume) => Self::resume_channel(sender), - Err(_) => break, - } - } - } else { - // Record the fact we received it. - match status.binary_search_by_key(&sender, |item| item.sender) { - Ok(i) => { - let count = status[i].message_metadata.len(); - if count as u32 >= suspend_threshold && status[i].state == InboundState::Ok + + match format { + XcmpMessageFormat::Signals => + while !data.is_empty() { + if meter + .try_consume( + T::WeightInfo::suspend_channel() + .max(T::WeightInfo::resume_channel()), + ) + .is_err() { - status[i].state = InboundState::Suspended; - let r = Self::send_signal(sender, ChannelSignal::Suspend); - if r.is_err() { - log::warn!( - "Attempt to suspend channel failed. Messages may be dropped." - ); - } + defensive!("Not enough weight to process signals - dropping"); + break + } + + match ChannelSignal::decode(&mut data) { + Ok(ChannelSignal::Suspend) => Self::suspend_channel(sender), + Ok(ChannelSignal::Resume) => Self::resume_channel(sender), + Err(_) => { + defensive!("Undecodable channel signal - dropping"); + break + }, } - if (count as u32) < drop_threshold { - status[i].message_metadata.push((sent_at, format)); - } else { - debug_assert!( - false, - "XCMP channel queue full. Silently dropping message" + }, + XcmpMessageFormat::ConcatenatedVersionedXcm => + while !data.is_empty() { + let Ok(xcm) = Self::take_first_concatenated_xcm(&mut data, &mut meter) + else { + defensive!("HRMP inbound decode stream broke; page will be dropped.",); + break + }; + + if let Err(()) = Self::enqueue_xcmp_message(sender, xcm, &mut meter) { + defensive!( + "Could not enqueue XCMP messages. Used weight: ", + meter.consumed_ratio() ); + break } }, - Err(_) => status.push(InboundChannelDetails { - sender, - state: InboundState::Ok, - message_metadata: vec![(sent_at, format)], - }), - } - // Queue the payload for later execution. - >::insert(sender, sent_at, data_ref); + XcmpMessageFormat::ConcatenatedEncodedBlob => { + defensive!("Blob messages are unhandled - dropping"); + continue + }, } - - // Optimization note; it would make sense to execute messages immediately if - // `status.is_empty()` here. } - status.sort(); - >::put(status); - Self::service_xcmp_queue(max_weight) + meter.consumed() } } @@ -1040,14 +790,6 @@ impl XcmpMessageSource for Pallet { mut last_index, } = *status; - if result.len() == max_message_count { - // We check this condition in the beginning of the loop so that we don't include - // a message where the limit is 0. - break - } - if outbound_state == OutboundState::Suspended { - continue - } let (max_size_now, max_size_ever) = match T::ChannelInfo::get_channel_status(para_id) { ChannelStatus::Closed => { // This means that there is no such channel anymore. Nothing to be done but @@ -1065,15 +807,28 @@ impl XcmpMessageSource for Pallet { ChannelStatus::Ready(n, e) => (n, e), }; + // This is a hard limit from the host config; not even signals can bypass it. + if result.len() == max_message_count { + // We check this condition in the beginning of the loop so that we don't include + // a message where the limit is 0. + break + } + let page = if signals_exist { let page = >::get(para_id); + defensive_assert!(!page.is_empty(), "Signals must exist"); + if page.len() < max_size_now { >::remove(para_id); signals_exist = false; page } else { + defensive!("Signals should fit into a single page"); continue } + } else if outbound_state == OutboundState::Suspended { + // Signals are exempt from suspension. + continue } else if last_index > first_index { let page = >::get(para_id, first_index); if page.len() < max_size_now { @@ -1093,13 +848,28 @@ impl XcmpMessageSource for Pallet { if page.len() > max_size_ever { // TODO: #274 This means that the channel's max message size has changed since - // the message was sent. We should parse it and split into smaller mesasges but + // the message was sent. We should parse it and split into smaller messages but // since it's so unlikely then for now we just drop it. - log::warn!("WARNING: oversize message in queue. silently dropping."); + defensive!("WARNING: oversize message in queue - dropping"); } else { result.push((para_id, page)); } + let max_total_size = match T::ChannelInfo::get_channel_info(para_id) { + Some(channel_info) => channel_info.max_total_size, + None => { + log::warn!("calling `get_channel_info` with no RelevantMessagingState?!"); + MAX_POSSIBLE_ALLOCATION // We use this as a fallback in case the messaging state is not present + }, + }; + let threshold = max_total_size.saturating_div(delivery_fee_constants::THRESHOLD_FACTOR); + let remaining_total_size: usize = (first_index..last_index) + .map(|index| OutboundXcmpMessages::::decode_len(para_id, index).unwrap()) + .sum(); + if remaining_total_size <= threshold as usize { + Self::decrease_fee_factor(para_id); + } + *status = OutboundChannelDetails { recipient: para_id, state: outbound_state, @@ -1108,6 +878,7 @@ impl XcmpMessageSource for Pallet { last_index, }; } + debug_assert!(!statuses.iter().any(|s| s.signals_exist), "Signals should be handled"); // Sort the outbound messages by ascending recipient para id to satisfy the acceptance // criteria requirement. @@ -1129,7 +900,7 @@ impl XcmpMessageSource for Pallet { let pruned = old_statuses_len - statuses.len(); // removing an item from status implies a message being sent, so the result messages must // be no less than the pruned channels. - statuses.rotate_left(result.len() - pruned); + statuses.rotate_left(result.len().saturating_sub(pruned)); >::put(statuses); @@ -1152,9 +923,12 @@ impl SendXcm for Pallet { MultiLocation { parents: 1, interior: X1(Parachain(id)) } => { let xcm = msg.take().ok_or(SendError::MissingArgument)?; let id = ParaId::from(*id); - let price = T::PriceForSiblingDelivery::price_for_parachain_delivery(id, &xcm); + let price = T::PriceForSiblingDelivery::price_for_delivery(id, &xcm); let versioned_xcm = T::VersionWrapper::wrap_version(&d, xcm) .map_err(|()| SendError::DestinationUnsupported)?; + validate_xcm_nesting(&versioned_xcm) + .map_err(|()| SendError::ExceedsMaxMessageSize)?; + Ok(((id, versioned_xcm), price)) }, _ => { @@ -1168,13 +942,51 @@ impl SendXcm for Pallet { fn deliver((id, xcm): (ParaId, VersionedXcm<()>)) -> Result { let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + defensive_assert!( + validate_xcm_nesting(&xcm).is_ok(), + "Tickets are valid prior to delivery by trait XCM; qed" + ); match Self::send_fragment(id, XcmpMessageFormat::ConcatenatedVersionedXcm, xcm) { Ok(_) => { Self::deposit_event(Event::XcmpMessageSent { message_hash: hash }); Ok(hash) }, - Err(e) => Err(SendError::Transport(<&'static str>::from(e))), + Err(e) => Err(SendError::Transport(e.into())), } } } + +/// Checks that the XCM is decodable with `MAX_XCM_DECODE_DEPTH`. +/// +/// Note that this uses the limit of the sender - not the receiver. It it best effort. +pub(crate) fn validate_xcm_nesting(xcm: &VersionedXcm<()>) -> Result<(), ()> { + xcm.using_encoded(|mut enc| { + VersionedXcm::<()>::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut enc).map(|_| ()) + }) + .map_err(|_| ()) +} + +impl FeeTracker for Pallet { + type Id = ParaId; + + fn get_fee_factor(id: Self::Id) -> FixedU128 { + >::get(id) + } + + fn increase_fee_factor(id: Self::Id, message_size_factor: FixedU128) -> FixedU128 { + >::mutate(id, |f| { + *f = f.saturating_mul( + delivery_fee_constants::EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor), + ); + *f + }) + } + + fn decrease_fee_factor(id: Self::Id) -> FixedU128 { + >::mutate(id, |f| { + *f = InitialFactor::get().max(*f / delivery_fee_constants::EXPONENTIAL_FEE_BASE); + *f + }) + } +} diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index a54ddfb9cecd0888bb0b54ab19a28d5796e41fab..6d7f434b041a5d613efb7d6978f1e15643bbbd0c 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -16,20 +16,23 @@ //! A module that is responsible for migration of storage. -use crate::{Config, Overweight, Pallet, QueueConfig, DEFAULT_POV_SIZE}; +use crate::{Config, OverweightIndex, Pallet, ParaId, QueueConfig, DEFAULT_POV_SIZE}; +use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, + traits::{EnqueueMessage, OnRuntimeUpgrade, StorageVersion}, weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; /// The current storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); +pub const LOG: &str = "runtime::xcmp-queue-migration"; + /// Migrates the pallet storage to the most recent version. -pub struct Migration(PhantomData); +pub struct MigrationToV3(PhantomData); -impl OnRuntimeUpgrade for Migration { +impl OnRuntimeUpgrade for MigrationToV3 { fn on_runtime_upgrade() -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -77,11 +80,55 @@ mod v1 { } } +pub mod v3 { + use super::*; + use crate::*; + + /// Status of the inbound XCMP channels. + #[frame_support::storage_alias] + pub(crate) type InboundXcmpStatus = + StorageValue, Vec, OptionQuery>; + + /// Inbound aggregate XCMP messages. It can only be one per ParaId/block. + #[frame_support::storage_alias] + pub(crate) type InboundXcmpMessages = StorageDoubleMap< + Pallet, + Blake2_128Concat, + ParaId, + Twox64Concat, + RelayBlockNumber, + Vec, + OptionQuery, + >; + + #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo)] + pub struct InboundChannelDetails { + /// The `ParaId` of the parachain that this channel is connected with. + pub sender: ParaId, + /// The state of the channel. + pub state: InboundState, + /// The ordered metadata of each inbound message. + /// + /// Contains info about the relay block number that the message was sent at, and the format + /// of the incoming message. + pub message_metadata: Vec<(RelayBlockNumber, XcmpMessageFormat)>, + } + + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, TypeInfo, + )] + pub enum InboundState { + Ok, + Suspended, + } +} + /// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with /// 2D weights). /// /// NOTE: Only use this function if you know what you're doing. Default to using /// `migrate_to_latest`. +#[allow(deprecated)] pub fn migrate_to_v2() -> Weight { let translate = |pre: v1::QueueConfigData| -> super::QueueConfigData { super::QueueConfigData { @@ -108,17 +155,70 @@ pub fn migrate_to_v2() -> Weight { } pub fn migrate_to_v3() -> Weight { + #[frame_support::storage_alias] + type Overweight = + CountedStorageMap, Twox64Concat, OverweightIndex, ParaId>; let overweight_messages = Overweight::::initialize_counter() as u64; T::DbWeight::get().reads_writes(overweight_messages, 1) } +pub fn lazy_migrate_inbound_queue() { + let Some(mut states) = v3::InboundXcmpStatus::::get() else { + log::debug!(target: LOG, "Lazy migration finished: item gone"); + return + }; + let Some(ref mut next) = states.first_mut() else { + log::debug!(target: LOG, "Lazy migration finished: item empty"); + v3::InboundXcmpStatus::::kill(); + return + }; + log::debug!( + "Migrating inbound HRMP channel with sibling {:?}, msgs left {}.", + next.sender, + next.message_metadata.len() + ); + // We take the last element since the MQ is a FIFO and we want to keep the order. + let Some((block_number, format)) = next.message_metadata.pop() else { + states.remove(0); + v3::InboundXcmpStatus::::put(states); + return + }; + if format != XcmpMessageFormat::ConcatenatedVersionedXcm { + log::warn!(target: LOG, + "Dropping message with format {:?} (not ConcatenatedVersionedXcm)", + format + ); + v3::InboundXcmpMessages::::remove(&next.sender, &block_number); + v3::InboundXcmpStatus::::put(states); + return + } + + let Some(msg) = v3::InboundXcmpMessages::::take(&next.sender, &block_number) else { + defensive!("Storage corrupted: HRMP message missing:", (next.sender, block_number)); + v3::InboundXcmpStatus::::put(states); + return + }; + + let Ok(msg): Result, _> = msg.try_into() else { + log::error!(target: LOG, "Message dropped: too big"); + v3::InboundXcmpStatus::::put(states); + return + }; + + // Finally! We have a proper message. + T::XcmpQueue::enqueue_message(msg.as_bounded_slice(), next.sender); + log::debug!(target: LOG, "Migrated HRMP message to MQ: {:?}", (next.sender, block_number)); + v3::InboundXcmpStatus::::put(states); +} + #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test}; #[test] + #[allow(deprecated)] fn test_migration_to_v2() { let v1 = v1::QueueConfigData { suspend_threshold: 5, diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index a3f10fa5428ce2cc67ada4359cd81f8b5c180fa5..7c3a3bd1bd02c7b897c2cafbb28ba93279fb69bd 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -17,10 +17,11 @@ use super::*; use crate as xcmp_queue; use core::marker::PhantomData; use cumulus_pallet_parachain_system::AnyRelayNumber; -use cumulus_primitives_core::{IsSystem, ParaId}; +use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ parameter_types, traits::{ConstU32, Everything, Nothing, OriginTrait}, + BoundedSlice, }; use frame_system::EnsureRoot; use sp_core::H256; @@ -85,8 +86,10 @@ parameter_types! { pub const MaxReserves: u32 = 50; } +pub type Balance = u64; + impl pallet_balances::Config for Test { - type Balance = u64; + type Balance = Balance; type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; @@ -96,17 +99,20 @@ impl pallet_balances::Config for Test { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; } impl cumulus_pallet_parachain_system::Config for Test { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = (); type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = (); + // Ignore all DMP messages by enqueueing them into `()`: + type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; type ReservedDmpWeight = (); type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = (); @@ -196,19 +202,137 @@ impl ConvertOrigin } } +parameter_types! { + pub static EnqueuedMessages: Vec<(ParaId, Vec)> = Default::default(); +} + +/// An `EnqueueMessage` implementation that puts all messages in thread-local storage. +pub struct EnqueueToLocalStorage(PhantomData); + +impl> EnqueueMessage for EnqueueToLocalStorage { + type MaxMessageLen = sp_core::ConstU32<65_536>; + + fn enqueue_message(message: BoundedSlice, origin: ParaId) { + let mut msgs = EnqueuedMessages::get(); + msgs.push((origin, message.to_vec())); + EnqueuedMessages::set(msgs); + T::on_queue_changed(origin, Self::footprint(origin)); + } + + fn enqueue_messages<'a>( + iter: impl Iterator>, + origin: ParaId, + ) { + let mut msgs = EnqueuedMessages::get(); + msgs.extend(iter.map(|m| (origin, m.to_vec()))); + EnqueuedMessages::set(msgs); + T::on_queue_changed(origin, Self::footprint(origin)); + } + + fn sweep_queue(origin: ParaId) { + let mut msgs = EnqueuedMessages::get(); + msgs.retain(|(o, _)| o != &origin); + EnqueuedMessages::set(msgs); + T::on_queue_changed(origin, Self::footprint(origin)); + } + + fn footprint(origin: ParaId) -> QueueFootprint { + let msgs = EnqueuedMessages::get(); + let mut footprint = QueueFootprint::default(); + for (o, m) in msgs { + if o == origin { + footprint.storage.count += 1; + footprint.storage.size += m.len() as u64; + } + } + footprint.pages = footprint.storage.size as u32 / 16; // Number does not matter + footprint + } +} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(RelayChain::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: Balance = 300_000_000; + /// The fee per byte + pub const ByteFee: Balance = 1_000_000; +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + ByteFee, + XcmpQueue, +>; + impl Config for Test { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = xcm_executor::XcmExecutor; - type ChannelInfo = ParachainSystem; + type ChannelInfo = MockedChannelInfo; type VersionWrapper = (); - type ExecuteOverweightOrigin = EnsureRoot; + type XcmpQueue = EnqueueToLocalStorage>; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = SystemParachainAsSuperuser; type WeightInfo = (); - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - t.into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() +} + +/// A para that we have an HRMP channel with. +pub const HRMP_PARA_ID: u32 = 7777; + +pub struct MockedChannelInfo; +impl GetChannelInfo for MockedChannelInfo { + fn get_channel_status(id: ParaId) -> ChannelStatus { + if id == HRMP_PARA_ID.into() { + return ChannelStatus::Ready(usize::MAX, usize::MAX) + } + + ParachainSystem::get_channel_status(id) + } + + fn get_channel_info(id: ParaId) -> Option { + if id == HRMP_PARA_ID.into() { + return Some(ChannelInfo { + max_capacity: u32::MAX, + max_total_size: u32::MAX, + max_message_size: u32::MAX, + msg_count: 0, + total_size: 0, + }) + } + + ParachainSystem::get_channel_info(id) + } +} + +pub(crate) fn mk_page() -> Vec { + let mut page = Vec::::new(); + + for i in 0..100 { + page.extend(match i % 2 { + 0 => v2_xcm().encode(), + 1 => v3_xcm().encode(), + // We cannot push an undecodable XCM here since it would break the decode stream. + // This is expected and the whole reason to introduce `MaybeDoubleEncodedVersionedXcm` + // instead. + _ => unreachable!(), + }); + } + + page +} + +pub(crate) fn v2_xcm() -> VersionedXcm<()> { + let instr = xcm::v2::Instruction::<()>::ClearOrigin; + VersionedXcm::V2(xcm::v2::Xcm::<()>(vec![instr; 3])) +} + +pub(crate) fn v3_xcm() -> VersionedXcm<()> { + let instr = xcm::v3::Instruction::<()>::Trap(1); + VersionedXcm::V3(xcm::v3::Xcm::<()>(vec![instr; 3])) } diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index ba005a5badfeb69472b09cddd1f93f6dc0ac42dd..30dba6ead3407cc0064d4eb61cd64003cb60bf1b 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -13,247 +13,325 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::*; +use super::{ + mock::{mk_page, v2_xcm, v3_xcm, EnqueuedMessages, HRMP_PARA_ID}, + *, +}; +use XcmpMessageFormat::*; + +use codec::Input; use cumulus_primitives_core::{ParaId, XcmpMessageHandler}; -use frame_support::{assert_noop, assert_ok}; -use mock::{new_test_ext, ParachainSystem, RuntimeCall, RuntimeOrigin, Test, XcmpQueue}; -use sp_runtime::traits::BadOrigin; +use frame_support::{ + assert_err, assert_noop, assert_ok, assert_storage_noop, hypothetically, + traits::{Footprint, Hooks}, + StorageNoopGuard, +}; +use mock::{new_test_ext, ParachainSystem, RuntimeOrigin as Origin, Test, XcmpQueue}; +use sp_runtime::traits::{BadOrigin, Zero}; +use std::iter::{once, repeat}; #[test] -fn one_message_does_not_panic() { +fn empty_concatenated_works() { new_test_ext().execute_with(|| { - let message_format = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); - let messages = vec![(Default::default(), 1u32, message_format.as_slice())]; + let data = ConcatenatedVersionedXcm.encode(); - // This shouldn't cause a panic - XcmpQueue::handle_xcmp_messages(messages.into_iter(), Weight::MAX); + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); }) } #[test] -#[should_panic = "Invalid incoming blob message data"] -#[cfg(debug_assertions)] -fn bad_message_is_handled() { +fn xcm_enqueueing_basic_works() { new_test_ext().execute_with(|| { - let bad_data = vec![ - 1, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 64, 239, 139, 0, - 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 37, 0, - 0, 0, 0, 0, 0, 0, 16, 0, 127, 147, - ]; - InboundXcmpMessages::::insert(ParaId::from(1000), 1, bad_data); - let format = XcmpMessageFormat::ConcatenatedEncodedBlob; - // This should exit with an error. - XcmpQueue::process_xcmp_message( - 1000.into(), - (1, format), - &mut 0, - Weight::from_parts(10_000_000_000, 0), - Weight::from_parts(10_000_000_000, 0), - ); - }); + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])).encode(); + let data = [ConcatenatedVersionedXcm.encode(), xcm.clone()].concat(); + + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); + + assert_eq!(EnqueuedMessages::get(), vec![(1000.into(), xcm)]); + }) } -/// Tests that a blob message is handled. Currently this isn't implemented and panics when debug -/// assertions are enabled. When this feature is enabled, this test should be rewritten properly. #[test] -#[should_panic = "Blob messages not handled."] -#[cfg(debug_assertions)] -fn handle_blob_message() { +fn xcm_enqueueing_many_works() { new_test_ext().execute_with(|| { - let bad_data = vec![ - 1, 1, 1, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 64, 239, - 139, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, - 37, 0, 0, 0, 0, 0, 0, 0, 16, 0, 127, 147, - ]; - InboundXcmpMessages::::insert(ParaId::from(1000), 1, bad_data); - let format = XcmpMessageFormat::ConcatenatedEncodedBlob; - XcmpQueue::process_xcmp_message( - 1000.into(), - (1, format), - &mut 0, - Weight::from_parts(10_000_000_000, 0), - Weight::from_parts(10_000_000_000, 0), + let mut encoded_xcms = vec![]; + for i in 0..10 { + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin; i as usize])); + encoded_xcms.push(xcm.encode()); + } + let mut data = ConcatenatedVersionedXcm.encode(); + data.extend(encoded_xcms.iter().flatten()); + + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); + + assert_eq!( + EnqueuedMessages::get(), + encoded_xcms.into_iter().map(|xcm| (1000.into(), xcm)).collect::>(), ); - }); + }) } #[test] -#[should_panic = "Invalid incoming XCMP message data"] -#[cfg(debug_assertions)] -fn handle_invalid_data() { +fn xcm_enqueueing_multiple_times_works() { new_test_ext().execute_with(|| { - let data = Xcm::(vec![]).encode(); - InboundXcmpMessages::::insert(ParaId::from(1000), 1, data); - let format = XcmpMessageFormat::ConcatenatedVersionedXcm; - XcmpQueue::process_xcmp_message( - 1000.into(), - (1, format), - &mut 0, - Weight::from_parts(10_000_000_000, 0), - Weight::from_parts(10_000_000_000, 0), + let mut encoded_xcms = vec![]; + for _ in 0..10 { + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])); + encoded_xcms.push(xcm.encode()); + } + let mut data = ConcatenatedVersionedXcm.encode(); + data.extend(encoded_xcms.iter().flatten()); + + for i in 0..10 { + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); + assert_eq!((i + 1) * 10, EnqueuedMessages::get().len()); + } + + assert_eq!( + EnqueuedMessages::get(), + encoded_xcms + .into_iter() + .map(|xcm| (1000.into(), xcm)) + .cycle() + .take(100) + .collect::>(), ); - }); + }) } #[test] -fn service_overweight_unknown() { +#[cfg_attr(debug_assertions, should_panic = "Defensive failure")] +fn xcm_enqueueing_starts_dropping_on_overflow() { new_test_ext().execute_with(|| { - assert_noop!( - XcmpQueue::service_overweight(RuntimeOrigin::root(), 0, Weight::from_parts(1000, 1000)), - Error::::BadOverweightIndex, + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])); + let data = (ConcatenatedVersionedXcm, xcm).encode(); + // Its possible to enqueue 256 messages at most: + let limit = 256; + + XcmpQueue::handle_xcmp_messages( + repeat((1000.into(), 1, data.as_slice())).take(limit * 2), + Weight::MAX, ); - }); + assert_eq!(EnqueuedMessages::get().len(), limit); + // The drop threshold for pages is 48, the others numbers dont really matter: + assert_eq!( + ::XcmpQueue::footprint(1000.into()), + QueueFootprint { storage: Footprint { count: 256, size: 768 }, pages: 48 } + ); + }) } +/// First enqueue 10 good, 1 bad and then 10 good XCMs. +/// +/// It should only process the first 10 good though. #[test] -fn service_overweight_bad_xcm_format() { +#[cfg(not(debug_assertions))] +fn xcm_enqueueing_broken_xcm_works() { new_test_ext().execute_with(|| { - let bad_xcm = vec![255]; - Overweight::::insert(0, (ParaId::from(1000), 0, bad_xcm)); + let mut encoded_xcms = vec![]; + for _ in 0..10 { + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])); + encoded_xcms.push(xcm.encode()); + } + let mut good = ConcatenatedVersionedXcm.encode(); + good.extend(encoded_xcms.iter().flatten()); + + let mut bad = ConcatenatedVersionedXcm.encode(); + bad.extend(vec![0u8].into_iter()); + + // Of we enqueue them in multiple pages, then its fine. + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, good.as_slice())), Weight::MAX); + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, bad.as_slice())), Weight::MAX); + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, good.as_slice())), Weight::MAX); + assert_eq!(20, EnqueuedMessages::get().len()); - assert_noop!( - XcmpQueue::service_overweight(RuntimeOrigin::root(), 0, Weight::from_parts(1000, 1000)), - Error::::BadXcm + assert_eq!( + EnqueuedMessages::get(), + encoded_xcms + .clone() + .into_iter() + .map(|xcm| (1000.into(), xcm)) + .cycle() + .take(20) + .collect::>(), ); - }); + EnqueuedMessages::set(&vec![]); + + // But if we do it all in one page, then it only uses the first 10: + XcmpQueue::handle_xcmp_messages( + vec![(1000.into(), 1, good.as_slice()), (1000.into(), 1, bad.as_slice())].into_iter(), + Weight::MAX, + ); + assert_eq!(10, EnqueuedMessages::get().len()); + assert_eq!( + EnqueuedMessages::get(), + encoded_xcms + .into_iter() + .map(|xcm| (1000.into(), xcm)) + .cycle() + .take(10) + .collect::>(), + ); + }) } +/// Message blobs are not supported and panic in debug mode. #[test] -fn suspend_xcm_execution_works() { +#[should_panic = "Blob messages are unhandled"] +#[cfg(debug_assertions)] +fn bad_blob_message_panics() { new_test_ext().execute_with(|| { - QueueSuspended::::put(true); - - let xcm = - VersionedXcm::from(Xcm::(vec![Instruction::::ClearOrigin])) - .encode(); - let mut message_format = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); - message_format.extend(xcm.clone()); - let messages = vec![(ParaId::from(999), 1u32, message_format.as_slice())]; - - // This should have executed the incoming XCM, because it came from a system parachain - XcmpQueue::handle_xcmp_messages(messages.into_iter(), Weight::MAX); - - let queued_xcm = InboundXcmpMessages::::get(ParaId::from(999), 1u32); - assert!(queued_xcm.is_empty()); + let data = [ConcatenatedEncodedBlob.encode(), vec![1].encode()].concat(); - let messages = vec![(ParaId::from(2000), 1u32, message_format.as_slice())]; + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); + }); +} - // This shouldn't have executed the incoming XCM - XcmpQueue::handle_xcmp_messages(messages.into_iter(), Weight::MAX); +/// Message blobs do not panic in release mode but are just a No-OP. +#[test] +#[cfg(not(debug_assertions))] +fn bad_blob_message_no_panic() { + new_test_ext().execute_with(|| { + let data = [ConcatenatedEncodedBlob.encode(), vec![1].encode()].concat(); - let queued_xcm = InboundXcmpMessages::::get(ParaId::from(2000), 1u32); - assert_eq!(queued_xcm, xcm); + frame_support::assert_storage_noop!(XcmpQueue::handle_xcmp_messages( + once((1000.into(), 1, data.as_slice())), + Weight::MAX, + )); }); } +/// Invalid concatenated XCMs panic in debug mode. #[test] -fn update_suspend_threshold_works() { +#[should_panic = "HRMP inbound decode stream broke; page will be dropped."] +#[cfg(debug_assertions)] +fn handle_invalid_data_panics() { new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!(data.suspend_threshold, 2); - assert_ok!(XcmpQueue::update_suspend_threshold(RuntimeOrigin::root(), 3)); - assert_noop!(XcmpQueue::update_suspend_threshold(RuntimeOrigin::signed(2), 5), BadOrigin); - let data: QueueConfigData = >::get(); + let data = [ConcatenatedVersionedXcm.encode(), Xcm::(vec![]).encode()].concat(); - assert_eq!(data.suspend_threshold, 3); + XcmpQueue::handle_xcmp_messages(once((1000.into(), 1, data.as_slice())), Weight::MAX); }); } +/// Invalid concatenated XCMs do not panic in release mode but are just a No-OP. #[test] -fn update_drop_threshold_works() { +#[cfg(not(debug_assertions))] +fn handle_invalid_data_no_panic() { new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!(data.drop_threshold, 5); - assert_ok!(XcmpQueue::update_drop_threshold(RuntimeOrigin::root(), 6)); - assert_noop!(XcmpQueue::update_drop_threshold(RuntimeOrigin::signed(2), 7), BadOrigin); - let data: QueueConfigData = >::get(); + let data = [ConcatenatedVersionedXcm.encode(), Xcm::(vec![]).encode()].concat(); - assert_eq!(data.drop_threshold, 6); + frame_support::assert_storage_noop!(XcmpQueue::handle_xcmp_messages( + once((1000.into(), 1, data.as_slice())), + Weight::MAX, + )); }); } #[test] -fn update_resume_threshold_works() { +fn suspend_xcm_execution_works() { new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!(data.resume_threshold, 1); - assert_ok!(XcmpQueue::update_resume_threshold(RuntimeOrigin::root(), 2)); - assert_noop!(XcmpQueue::update_resume_threshold(RuntimeOrigin::signed(7), 3), BadOrigin); - let data: QueueConfigData = >::get(); - - assert_eq!(data.resume_threshold, 2); + assert!(!XcmpQueue::is_paused(&2000.into())); + QueueSuspended::::put(true); + assert!(XcmpQueue::is_paused(&2000.into())); + // System parachains can bypass suspension: + assert!(!XcmpQueue::is_paused(&999.into())); }); } #[test] -fn update_threshold_weight_works() { +fn suspend_and_resume_xcm_execution_work() { new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!(data.threshold_weight, Weight::from_parts(100_000, 0)); - assert_ok!(XcmpQueue::update_threshold_weight( - RuntimeOrigin::root(), - Weight::from_parts(10_000, 0) - )); + assert_noop!(XcmpQueue::suspend_xcm_execution(Origin::signed(1)), BadOrigin); + assert_ok!(XcmpQueue::suspend_xcm_execution(Origin::root())); assert_noop!( - XcmpQueue::update_threshold_weight( - RuntimeOrigin::signed(5), - Weight::from_parts(10_000_000, 0), - ), - BadOrigin + XcmpQueue::suspend_xcm_execution(Origin::root()), + Error::::AlreadySuspended ); - let data: QueueConfigData = >::get(); + assert!(QueueSuspended::::get()); - assert_eq!(data.threshold_weight, Weight::from_parts(10_000, 0)); + assert_noop!(XcmpQueue::resume_xcm_execution(Origin::signed(1)), BadOrigin); + assert_ok!(XcmpQueue::resume_xcm_execution(Origin::root())); + assert_noop!( + XcmpQueue::resume_xcm_execution(Origin::root()), + Error::::AlreadyResumed + ); + assert!(!QueueSuspended::::get()); }); } #[test] -fn update_weight_restrict_decay_works() { +#[cfg(not(debug_assertions))] +fn xcm_enqueueing_backpressure_works() { + let para: ParaId = 1000.into(); new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!(data.weight_restrict_decay, Weight::from_parts(2, 0)); - assert_ok!(XcmpQueue::update_weight_restrict_decay( - RuntimeOrigin::root(), - Weight::from_parts(5, 0) - )); - assert_noop!( - XcmpQueue::update_weight_restrict_decay( - RuntimeOrigin::signed(6), - Weight::from_parts(4, 0), - ), - BadOrigin - ); - let data: QueueConfigData = >::get(); + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])); + let data = (ConcatenatedVersionedXcm, xcm).encode(); + + XcmpQueue::handle_xcmp_messages(repeat((para, 1, data.as_slice())).take(170), Weight::MAX); + + assert_eq!(EnqueuedMessages::get().len(), 170,); + // Not yet suspended: + assert!(InboundXcmpSuspended::::get().is_empty()); + // Enqueueing one more will suspend it: + let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])).encode(); + let small = [ConcatenatedVersionedXcm.encode(), xcm].concat(); + + XcmpQueue::handle_xcmp_messages(once((para, 1, small.as_slice())), Weight::MAX); + // Suspended: + assert_eq!(InboundXcmpSuspended::::get().iter().collect::>(), vec![¶]); + + // Now enqueueing many more will only work until the drop threshold: + XcmpQueue::handle_xcmp_messages(repeat((para, 1, data.as_slice())).take(100), Weight::MAX); + assert_eq!(mock::EnqueuedMessages::get().len(), 256); + + crate::mock::EnqueueToLocalStorage::>::sweep_queue(para); + XcmpQueue::handle_xcmp_messages(once((para, 1, small.as_slice())), Weight::MAX); + // Got resumed: + assert!(InboundXcmpSuspended::::get().is_empty()); + // Still resumed: + XcmpQueue::handle_xcmp_messages(once((para, 1, small.as_slice())), Weight::MAX); + assert!(InboundXcmpSuspended::::get().is_empty()); + }); +} - assert_eq!(data.weight_restrict_decay, Weight::from_parts(5, 0)); +#[test] +fn update_suspend_threshold_works() { + new_test_ext().execute_with(|| { + assert_eq!(>::get().suspend_threshold, 32); + assert_noop!(XcmpQueue::update_suspend_threshold(Origin::signed(2), 49), BadOrigin); + + assert_ok!(XcmpQueue::update_suspend_threshold(Origin::root(), 33)); + assert_eq!(>::get().suspend_threshold, 33); }); } #[test] -fn update_xcmp_max_individual_weight() { +fn update_drop_threshold_works() { new_test_ext().execute_with(|| { - let data: QueueConfigData = >::get(); - assert_eq!( - data.xcmp_max_individual_weight, - Weight::from_parts(20u64 * WEIGHT_REF_TIME_PER_MILLIS, DEFAULT_POV_SIZE), + assert_eq!(>::get().drop_threshold, 48); + assert_ok!(XcmpQueue::update_drop_threshold(Origin::root(), 4000)); + assert_noop!(XcmpQueue::update_drop_threshold(Origin::signed(2), 7), BadOrigin); + + assert_eq!(>::get().drop_threshold, 4000); + }); +} + +#[test] +fn update_resume_threshold_works() { + new_test_ext().execute_with(|| { + assert_eq!(>::get().resume_threshold, 8); + assert_noop!( + XcmpQueue::update_resume_threshold(Origin::root(), 0), + Error::::BadQueueConfig ); - assert_ok!(XcmpQueue::update_xcmp_max_individual_weight( - RuntimeOrigin::root(), - Weight::from_parts(30u64 * WEIGHT_REF_TIME_PER_MILLIS, 0) - )); assert_noop!( - XcmpQueue::update_xcmp_max_individual_weight( - RuntimeOrigin::signed(3), - Weight::from_parts(10u64 * WEIGHT_REF_TIME_PER_MILLIS, 0) - ), - BadOrigin + XcmpQueue::update_resume_threshold(Origin::root(), 33), + Error::::BadQueueConfig ); - let data: QueueConfigData = >::get(); + assert_ok!(XcmpQueue::update_resume_threshold(Origin::root(), 16)); + assert_noop!(XcmpQueue::update_resume_threshold(Origin::signed(7), 3), BadOrigin); - assert_eq!( - data.xcmp_max_individual_weight, - Weight::from_parts(30u64 * WEIGHT_REF_TIME_PER_MILLIS, 0) - ); + assert_eq!(>::get().resume_threshold, 16); }); } @@ -324,13 +402,14 @@ fn xcmp_queue_consumes_dest_and_msg_on_ok_validate() { let dest = (Parent, X1(Parachain(5555))); let mut dest_wrapper = Some(dest.into()); let mut msg_wrapper = Some(message.clone()); - assert!(::validate(&mut dest_wrapper, &mut msg_wrapper).is_ok()); - - // check wrapper were consumed - assert_eq!(None, dest_wrapper.take()); - assert_eq!(None, msg_wrapper.take()); new_test_ext().execute_with(|| { + assert!(::validate(&mut dest_wrapper, &mut msg_wrapper).is_ok()); + + // check wrapper were consumed + assert_eq!(None, dest_wrapper.take()); + assert_eq!(None, msg_wrapper.take()); + // another try with router chain with asserting sender assert_eq!( Err(SendError::Transport("NoChannel")), @@ -342,6 +421,287 @@ fn xcmp_queue_consumes_dest_and_msg_on_ok_validate() { }); } +#[test] +fn xcmp_queue_validate_nested_xcm_works() { + let dest = (Parent, X1(Parachain(5555))); + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + new_test_ext().execute_with(|| { + // Check that the good message is validated: + assert_ok!(::validate( + &mut Some(dest.into()), + &mut Some(good.clone()) + )); + + // Nesting the message one more time should reject it: + let bad = Xcm(vec![SetAppendix(good)]); + assert_eq!( + Err(SendError::ExceedsMaxMessageSize), + ::validate(&mut Some(dest.into()), &mut Some(bad)) + ); + }); +} + +#[test] +fn send_xcm_nested_works() { + let dest = (Parent, X1(Parachain(HRMP_PARA_ID))); + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + // Check that the good message is sent: + new_test_ext().execute_with(|| { + assert_ok!(send_xcm::(dest.into(), good.clone())); + assert_eq!( + XcmpQueue::take_outbound_messages(usize::MAX), + vec![( + HRMP_PARA_ID.into(), + (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::V3(good.clone())) + .encode(), + )] + ); + }); + + // Nesting the message one more time should not send it: + let bad = Xcm(vec![SetAppendix(good)]); + new_test_ext().execute_with(|| { + assert_err!(send_xcm::(dest.into(), bad), SendError::ExceedsMaxMessageSize); + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + }); +} + +#[test] +fn hrmp_signals_are_prioritized() { + let message = Xcm(vec![Trap(5)]); + + let sibling_para_id = ParaId::from(12345); + let dest = (Parent, X1(Parachain(sibling_para_id.into()))); + let mut dest_wrapper = Some(dest.into()); + let mut msg_wrapper = Some(message.clone()); + + new_test_ext().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + ::validate(&mut dest_wrapper, &mut msg_wrapper).unwrap(); + + // check wrapper were consumed + assert_eq!(None, dest_wrapper.take()); + assert_eq!(None, msg_wrapper.take()); + + ParachainSystem::open_custom_outbound_hrmp_channel_for_benchmarks_or_tests( + sibling_para_id, + cumulus_primitives_core::AbridgedHrmpChannel { + max_capacity: 128, + max_total_size: 1 << 16, + max_message_size: 128, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + + let taken = XcmpQueue::take_outbound_messages(130); + assert_eq!(taken, vec![]); + + // Enqueue some messages + let num_events = frame_system::Pallet::::events().len(); + for _ in 0..256 { + assert_ok!(send_xcm::(dest.into(), message.clone())); + } + assert_eq!(num_events + 256, frame_system::Pallet::::events().len()); + + // Without a signal we get the messages in order: + let mut expected_msg = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); + for _ in 0..31 { + expected_msg.extend(VersionedXcm::V3(message.clone()).encode()); + } + + hypothetically!({ + let taken = XcmpQueue::take_outbound_messages(usize::MAX); + assert_eq!(taken, vec![(sibling_para_id.into(), expected_msg,)]); + }); + + // But a signal gets prioritized instead of the messages: + XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend); + + let taken = XcmpQueue::take_outbound_messages(130); + assert_eq!( + taken, + vec![( + sibling_para_id.into(), + (XcmpMessageFormat::Signals, ChannelSignal::Suspend).encode() + )] + ); + }); +} + +#[test] +fn maybe_double_encoded_versioned_xcm_works() { + // pre conditions + assert_eq!(VersionedXcm::<()>::V2(Default::default()).encode(), &[2, 0]); + assert_eq!(VersionedXcm::<()>::V3(Default::default()).encode(), &[3, 0]); +} + +// Now also testing a page instead of just concat messages. +#[test] +fn maybe_double_encoded_versioned_xcm_decode_page_works() { + let page = mk_page(); + + // Now try to decode the page. + let input = &mut &page[..]; + for i in 0..100 { + match (i % 2, VersionedXcm::<()>::decode(input)) { + (0, Ok(xcm)) => { + assert_eq!(xcm, v2_xcm()); + }, + (1, Ok(xcm)) => { + assert_eq!(xcm, v3_xcm()); + }, + unexpected => unreachable!("{:?}", unexpected), + } + } + + assert_eq!(input.remaining_len(), Ok(Some(0)), "All data consumed"); +} + +/// Check that `take_first_concatenated_xcm` correctly splits a page into its XCMs. +#[test] +fn take_first_concatenated_xcm_works() { + let page = mk_page(); + let input = &mut &page[..]; + + for i in 0..100 { + let xcm = XcmpQueue::take_first_concatenated_xcm(input, &mut WeightMeter::new()).unwrap(); + match (i % 2, xcm) { + (0, data) | (2, data) => { + assert_eq!(data, v2_xcm().encode()); + }, + (1, data) | (3, data) => { + assert_eq!(data, v3_xcm().encode()); + }, + unexpected => unreachable!("{:?}", unexpected), + } + } + assert_eq!(input.remaining_len(), Ok(Some(0)), "All data consumed"); +} + +/// A message that is not too deeply nested will be accepted by `take_first_concatenated_xcm`. +#[test] +fn take_first_concatenated_xcm_good_recursion_depth_works() { + let mut good = Xcm::<()>(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + let good = VersionedXcm::V3(good); + + let page = good.encode(); + assert_ok!(XcmpQueue::take_first_concatenated_xcm(&mut &page[..], &mut WeightMeter::new())); +} + +/// A message that is too deeply nested will be rejected by `take_first_concatenated_xcm`. +#[test] +fn take_first_concatenated_xcm_good_bad_depth_errors() { + let mut bad = Xcm::<()>(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH { + bad = Xcm(vec![SetAppendix(bad)]); + } + let bad = VersionedXcm::V3(bad); + + let page = bad.encode(); + assert_err!( + XcmpQueue::take_first_concatenated_xcm(&mut &page[..], &mut WeightMeter::new()), + () + ); +} + +#[test] +fn lazy_migration_works() { + use crate::migration::v3::*; + + new_test_ext().execute_with(|| { + EnqueuedMessages::set(vec![]); + let _g = StorageNoopGuard::default(); // No storage is leaked. + + let mut channels = vec![]; + for i in 0..20 { + let para = ParaId::from(i); + let mut message_metadata = vec![]; + for block in 0..30 { + message_metadata.push((block, XcmpMessageFormat::ConcatenatedVersionedXcm)); + InboundXcmpMessages::::insert(para, block, vec![(i + block) as u8]); + } + + channels.push(InboundChannelDetails { + sender: para, + state: InboundState::Ok, + message_metadata, + }); + } + InboundXcmpStatus::::set(Some(channels)); + + for para in 0..20u32 { + assert_eq!(InboundXcmpStatus::::get().unwrap().len() as u32, 20 - para); + assert_eq!(InboundXcmpMessages::::iter_prefix(ParaId::from(para)).count(), 30); + + for block in 0..30 { + XcmpQueue::on_idle(0u32.into(), Weight::MAX); + assert_eq!( + EnqueuedMessages::get(), + vec![(para.into(), vec![(29 - block + para) as u8])] + ); + EnqueuedMessages::set(vec![]); + } + // One more to jump to the next channel: + XcmpQueue::on_idle(0u32.into(), Weight::MAX); + + assert_eq!(InboundXcmpStatus::::get().unwrap().len() as u32, 20 - para - 1); + assert_eq!(InboundXcmpMessages::::iter_prefix(ParaId::from(para)).count(), 0); + } + // One more for the cleanup: + XcmpQueue::on_idle(0u32.into(), Weight::MAX); + + assert!(!InboundXcmpStatus::::exists()); + assert_eq!(InboundXcmpMessages::::iter().count(), 0); + EnqueuedMessages::set(vec![]); + }); +} + +#[test] +fn lazy_migration_noop_when_out_of_weight() { + use crate::migration::v3::*; + assert!(!XcmpQueue::on_idle_weight().is_zero(), "pre condition"); + + new_test_ext().execute_with(|| { + let _g = StorageNoopGuard::default(); // No storage is leaked. + let block = 5; + let para = ParaId::from(4); + let message_metadata = vec![(block, XcmpMessageFormat::ConcatenatedVersionedXcm)]; + + InboundXcmpMessages::::insert(para, block, vec![123u8]); + InboundXcmpStatus::::set(Some(vec![InboundChannelDetails { + sender: para, + state: InboundState::Ok, + message_metadata, + }])); + + // Hypothetically, it would do something with enough weight limit: + hypothetically!({ + XcmpQueue::on_idle(0u32.into(), Weight::MAX); + assert_eq!(EnqueuedMessages::get(), vec![(para, vec![123u8])]); + }); + // But does not, since the limit is zero: + assert_storage_noop!({ XcmpQueue::on_idle(0u32.into(), Weight::zero()) }); + + InboundXcmpMessages::::remove(para, block); + InboundXcmpStatus::::kill(); + }); +} + #[test] fn xcmp_queue_send_xcm_works() { new_test_ext().execute_with(|| { @@ -370,3 +730,80 @@ fn xcmp_queue_send_xcm_works() { .any(|(para_id, _)| para_id == &sibling_para_id)); }) } + +#[test] +fn verify_fee_factor_increase_and_decrease() { + use cumulus_primitives_core::AbridgedHrmpChannel; + use sp_runtime::FixedU128; + + let sibling_para_id = ParaId::from(12345); + let destination = (Parent, Parachain(sibling_para_id.into())).into(); + let xcm = Xcm(vec![ClearOrigin; 100]); + let versioned_xcm = VersionedXcm::from(xcm.clone()); + let mut xcmp_message = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); + xcmp_message.extend(versioned_xcm.encode()); + + new_test_ext().execute_with(|| { + let initial = InitialFactor::get(); + assert_eq!(DeliveryFeeFactor::::get(sibling_para_id), initial); + + // Open channel so messages can actually be sent + ParachainSystem::open_custom_outbound_hrmp_channel_for_benchmarks_or_tests( + sibling_para_id, + AbridgedHrmpChannel { + max_capacity: 10, + max_total_size: 1000, + max_message_size: 104, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + + // Fee factor is only increased in `send_fragment`, which is called by `send_xcm`. + // When queue is not congested, fee factor doesn't change. + assert_ok!(send_xcm::(destination, xcm.clone())); // Size 104 + assert_ok!(send_xcm::(destination, xcm.clone())); // Size 208 + assert_ok!(send_xcm::(destination, xcm.clone())); // Size 312 + assert_ok!(send_xcm::(destination, xcm.clone())); // Size 416 + assert_eq!(DeliveryFeeFactor::::get(sibling_para_id), initial); + + // Sending the message right now is cheap + let (_, delivery_fees) = + validate_send::(destination, xcm.clone()).expect("message can be sent; qed"); + let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { + unreachable!("asset is fungible; qed"); + }; + assert_eq!(delivery_fee_amount, 402_000_000); + + let smaller_xcm = Xcm(vec![ClearOrigin; 30]); + + // When we get to half of `max_total_size`, because `THRESHOLD_FACTOR` is 2, + // then the fee factor starts to increase. + assert_ok!(send_xcm::(destination, xcm.clone())); // Size 520 + assert_eq!(DeliveryFeeFactor::::get(sibling_para_id), FixedU128::from_float(1.05)); + + for _ in 0..12 { + // We finish at size 929 + assert_ok!(send_xcm::(destination, smaller_xcm.clone())); + } + assert!(DeliveryFeeFactor::::get(sibling_para_id) > FixedU128::from_float(1.88)); + + // Sending the message right now is expensive + let (_, delivery_fees) = + validate_send::(destination, xcm.clone()).expect("message can be sent; qed"); + let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { + unreachable!("asset is fungible; qed"); + }; + assert_eq!(delivery_fee_amount, 758_030_955); + + // Fee factor only decreases in `take_outbound_messages` + for _ in 0..5 { + // We take 5 100 byte pages + XcmpQueue::take_outbound_messages(1); + } + assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.72)); + XcmpQueue::take_outbound_messages(1); + assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.63)); + }); +} diff --git a/cumulus/pallets/xcmp-queue/src/weights.rs b/cumulus/pallets/xcmp-queue/src/weights.rs index 2d20bba6f8f6b18571ea4d241d57f08998d13d87..27daa791e19a334fe1a6189feb52a9ef6f206312 100644 --- a/cumulus/pallets/xcmp-queue/src/weights.rs +++ b/cumulus/pallets/xcmp-queue/src/weights.rs @@ -13,51 +13,239 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// asset-hub-kusama-dev +// --output +// cumulus/pallets/xcmp-queue/src/weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; -// Implemented by autogenerated benchmarking code. +/// Weight functions needed for `cumulus_pallet_xcmp_queue`. pub trait WeightInfo { fn set_config_with_u32() -> Weight; - fn set_config_with_weight() -> Weight; + fn enqueue_xcmp_message() -> Weight; + fn suspend_channel() -> Weight; + fn resume_channel() -> Weight; + fn take_first_concatenated_xcm() -> Weight; + fn on_idle_good_msg() -> Weight; + fn on_idle_large_msg() -> Weight; } +/// Weights for `cumulus_pallet_xcmp_queue` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); - impl WeightInfo for SubstrateWeight { - // Storage: XcmpQueue QueueConfig (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { - Weight::from_parts(2_717_000_u64, 0) + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - - // Storage: XcmpQueue QueueConfig (r:1 w:1) - fn set_config_with_weight() -> Weight { - Weight::from_parts(2_717_000_u64, 0) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3517` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3517) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1596) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65747` + // Estimated: `69212` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(66_000_000, 69212) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(52_000_000, 69175) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } } +// For backwards compatibility and tests. impl WeightInfo for () { - // Storage: XcmpQueue QueueConfig (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { - Weight::from_parts(2_717_000_u64, 0) + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - - // Storage: XcmpQueue QueueConfig (r:1 w:1) - fn set_config_with_weight() -> Weight { - Weight::from_parts(2_717_000_u64, 0) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3517` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3517) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1596) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65747` + // Estimated: `69212` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(66_000_000, 69212) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(52_000_000, 69175) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } } diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index e73c7b507262eca43c2e77293d0ecadd606bdc57..73bbbb6d77146aee2f0d64dcc8d5963af5d4776c 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -17,6 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.188", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.28" +serde_json = "1.0.108" # Local parachain-template-runtime = { path = "../runtime" } @@ -76,6 +77,7 @@ substrate-build-script-utils = { path = "../../../substrate/utils/build-script-u [features] default = [] runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "parachain-template-runtime/runtime-benchmarks", diff --git a/cumulus/parachain-template/node/src/chain_spec.rs b/cumulus/parachain-template/node/src/chain_spec.rs index 0ca3c51900f20673b8b44452da3fdce9a117d932..a79c78699c07102aca27ed06c73f93ccf86ed3ce 100644 --- a/cumulus/parachain-template/node/src/chain_spec.rs +++ b/cumulus/parachain-template/node/src/chain_spec.rs @@ -7,8 +7,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = - sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec<(), Extensions>; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; @@ -68,53 +67,48 @@ pub fn development_config() -> ChainSpec { properties.insert("tokenDecimals".into(), 12.into()); properties.insert("ss58Format".into(), 42.into()); - ChainSpec::from_genesis( - // Name - "Development", - // ID - "dev", - ChainType::Development, - move || { - testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + ChainSpec::builder( + parachain_template_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! para_id: 1000, }, ) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + get_account_id_from_seed::("Alice"), + 1000.into(), + )) + .build() } pub fn local_testnet_config() -> ChainSpec { @@ -124,59 +118,51 @@ pub fn local_testnet_config() -> ChainSpec { properties.insert("tokenDecimals".into(), 12.into()); properties.insert("ss58Format".into(), 42.into()); - ChainSpec::from_genesis( - // Name - "Local Testnet", - // ID - "local_testnet", - ChainType::Local, - move || { - testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - 1000.into(), - ) - }, - // Bootnodes - Vec::new(), - // Telemetry - None, - // Protocol ID - Some("template-local"), - // Fork ID - None, - // Properties - Some(properties), - // Extensions + #[allow(deprecated)] + ChainSpec::builder( + parachain_template_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! para_id: 1000, }, ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + get_account_id_from_seed::("Alice"), + 1000.into(), + )) + .with_protocol_id("template-local") + .with_properties(properties) + .build() } fn testnet_genesis( @@ -184,28 +170,20 @@ fn testnet_genesis( endowed_accounts: Vec, root: AccountId, id: ParaId, -) -> parachain_template_runtime::RuntimeGenesisConfig { - parachain_template_runtime::RuntimeGenesisConfig { - system: parachain_template_runtime::SystemConfig { - code: parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: parachain_template_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), }, - parachain_info: parachain_template_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: parachain_template_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: EXISTENTIAL_DEPOSIT * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": EXISTENTIAL_DEPOSIT * 16, }, - session: parachain_template_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -214,18 +192,11 @@ fn testnet_genesis( template_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: parachain_template_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - transaction_payment: Default::default(), - sudo: parachain_template_runtime::SudoConfig { key: Some(root) }, - } + "sudo": { "key": Some(root) } + }) } diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 9fa6d60c2e74a9c6118b166cb7ee141a47b9595b..43d16ee0d5b7b326e5cf56f8b964f84fc84d16f3 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -255,10 +255,14 @@ async fn start_node_impl( // Here you can check whether the hardware meets your chains' requirements. Putting a link // in there and swapping out the requirements for your own are probably a good idea. The // requirements for a para-chain are dictated by its relay-chain. - if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && validator { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." + match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + Err(err) if validator => { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority'.", + err ); + }, + _ => {}, } if let Some(ref mut telemetry) = telemetry { @@ -411,6 +415,7 @@ fn start_consensus( collator_service, // Very limited proposal time. authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, }; let fut = diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/cumulus/parachain-template/pallets/template/Cargo.toml index af35ab651dc48e85da2a6d138f84de367cb033f8..925457839348217cd52ea0691c87161baeae0206 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/cumulus/parachain-template/pallets/template/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } -scale-info = { version = "2.2.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true} diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 68db2f041ddb3019b62cfbcb1478ee3ee13c6cea..01e250078737f9f47f12a822810b7690d0b1834d 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -18,7 +18,7 @@ substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optio codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Local @@ -35,6 +35,7 @@ frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-fea pallet-aura = { path = "../../../substrate/frame/aura", default-features = false} pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false} pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} @@ -71,7 +72,8 @@ cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-feature cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../parachains/pallets/parachain-info", default-features = false } +parachains-common = { path = "../../parachains/common", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } [features] default = [ "std" ] @@ -97,6 +99,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-parachain-template/std", "pallet-session/std", "pallet-sudo/std", @@ -105,6 +108,7 @@ std = [ "pallet-transaction-payment/std", "pallet-xcm/std", "parachain-info/std", + "parachains-common/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "scale-info/std", @@ -127,9 +131,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -138,10 +144,12 @@ runtime-benchmarks = [ "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-parachain-template/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -163,6 +171,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-parachain-template/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index b9bf97d7786f6c27736929ad5b408a57f9f64a37..7a064e227d4cf1e0471758cdac5303db6d716a73 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -10,6 +10,7 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -25,12 +26,15 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, + }, weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, @@ -42,9 +46,10 @@ use frame_system::{ EnsureRoot, }; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{RelayLocation, XcmConfig, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -56,7 +61,6 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use xcm::latest::prelude::BodyId; -use xcm_executor::XcmExecutor; /// Import the template pallet. pub use pallet_parachain_template; @@ -351,6 +355,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -379,14 +384,16 @@ impl pallet_sudo::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -401,24 +408,45 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = (); - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = (); -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type PriceForSiblingDelivery = NoPriceForMessageDelivery; } parameter_types! { @@ -511,7 +539,7 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue = 30, PolkadotXcm: pallet_xcm = 31, CumulusXcm: cumulus_pallet_xcm = 32, - DmpQueue: cumulus_pallet_dmp_queue = 33, + MessageQueue: pallet_message_queue = 33, // Template TemplatePallet: pallet_parachain_template = 50, @@ -525,8 +553,10 @@ mod benches { [pallet_balances, Balances] [pallet_session, SessionBench::] [pallet_timestamp, Timestamp] + [pallet_message_queue, MessageQueue] [pallet_sudo, Sudo] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] ); } diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 353f68d22e35085a4b6fb803171a7b5d3c680fc6..752137c96f184ec731d2ce5edb4860ff94c0e1f4 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -150,11 +150,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -180,8 +175,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index e508587956237249c6e672cd256ae9b7ac5a8208..946bfc5983df9bced1750c827370e48fb8b25d12 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -3,16 +3,10 @@ "id": "asset-hub-kusama", "chainType": "Live", "bootNodes": [ - "/ip4/34.77.217.152/tcp/30334/p2p/12D3KooWF63ZxKtZMYs5247WQA8fcTiGJb2osXykc31cmjwNLwem", - "/ip4/34.77.119.77/tcp/30334/p2p/12D3KooWGowDwrXAh9cxkbPHPHuwMouFHrMcJhCVXcFS2B8vc5Ry", "/dns/kusama-asset-hub-connect-0.polkadot.io/tcp/30334/p2p/12D3KooWMzvdGcUXxacLdMQzRVrsP1mJrZHcrz8LtGbhLzve84Qx", "/dns/kusama-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWMzvdGcUXxacLdMQzRVrsP1mJrZHcrz8LtGbhLzve84Qx", "/dns/kusama-asset-hub-connect-1.polkadot.io/tcp/30334/p2p/12D3KooWQmGf5z3DU1kKcZoLzMNgdbP31ybjuwxS1VGLKMUjq5ez", "/dns/kusama-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQmGf5z3DU1kKcZoLzMNgdbP31ybjuwxS1VGLKMUjq5ez", - "/dns/kusama-asset-hub-connect-2.polkadot.io/tcp/30334/p2p/12D3KooWLm6iHcmA3YD4xn2zfbm4KLF5KSUqJJAnmt2UGr9o2PgB", - "/dns/kusama-asset-hub-connect-2.polkadot.io/tcp/443/wss/p2p/12D3KooWLm6iHcmA3YD4xn2zfbm4KLF5KSUqJJAnmt2UGr9o2PgB", - "/dns/kusama-asset-hub-connect-3.polkadot.io/tcp/30334/p2p/12D3KooWD8Bma5qPbq7N5qdED3Xy6GXHfvfk86TL8aVTQKxmWkHG", - "/dns/kusama-asset-hub-connect-3.polkadot.io/tcp/443/wss/p2p/12D3KooWD8Bma5qPbq7N5qdED3Xy6GXHfvfk86TL8aVTQKxmWkHG", "/dns/boot.stake.plus/tcp/34333/p2p/12D3KooWAzSSZ7jLqMw1WPomYEKCYANQaKemXQ8BKoFvNEvfmdqR", "/dns/boot.stake.plus/tcp/34334/wss/p2p/12D3KooWAzSSZ7jLqMw1WPomYEKCYANQaKemXQ8BKoFvNEvfmdqR", "/dns/boot.metaspan.io/tcp/26052/p2p/12D3KooW9z9hKqe3mqYAp5UJMhZiCqhkTHyiR43fegnGmTJ3JAba", @@ -26,7 +20,9 @@ "/dns/statemine.bootnode.amforc.com/tcp/30336/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", "/dns/statemine.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", - "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk" + "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", + "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", + "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL" ], "telemetryEndpoints": null, "protocolId": null, @@ -57,4 +53,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 46f3e0e4a957c7020d7f1c2bb04fb559d06c4605..c26506eb995aaa3c1fc4e60aa9228e6882755d96 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -3,16 +3,10 @@ "id": "asset-hub-polkadot", "chainType": "Live", "bootNodes": [ - "/ip4/34.65.251.121/tcp/30334/p2p/12D3KooWG3GrM6XKMM4gp3cvemdwUvu96ziYoJmqmetLZBXE8bSa", - "/ip4/34.65.35.228/tcp/30334/p2p/12D3KooWMRyTLrCEPcAQD6c4EnudL3vVzg9zji3whvsMYPUYevpq", "/dns/polkadot-asset-hub-connect-0.polkadot.io/tcp/30334/p2p/12D3KooWLHqbcQtoBygf7GJgVjVa3TaeLuf7VbicNdooaCmQM2JZ", "/dns/polkadot-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWLHqbcQtoBygf7GJgVjVa3TaeLuf7VbicNdooaCmQM2JZ", "/dns/polkadot-asset-hub-connect-1.polkadot.io/tcp/30334/p2p/12D3KooWNDrKSayoZXGGE2dRSFW2g1iGPq3fTZE2U39ma9yZGKd3", "/dns/polkadot-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWNDrKSayoZXGGE2dRSFW2g1iGPq3fTZE2U39ma9yZGKd3", - "/dns/polkadot-asset-hub-connect-2.polkadot.io/tcp/30334/p2p/12D3KooWApa2JW4rbLtgzuK7fjLMupLS9HZheX9cdkQKyu6AnGrP", - "/dns/polkadot-asset-hub-connect-2.polkadot.io/tcp/443/wss/p2p/12D3KooWApa2JW4rbLtgzuK7fjLMupLS9HZheX9cdkQKyu6AnGrP", - "/dns/polkadot-asset-hub-connect-3.polkadot.io/tcp/30334/p2p/12D3KooWRsVeHqRs2iKmjLiguxp8myL4G2mDAWhtX2jHwyWujseV", - "/dns/polkadot-asset-hub-connect-3.polkadot.io/tcp/443/wss/p2p/12D3KooWRsVeHqRs2iKmjLiguxp8myL4G2mDAWhtX2jHwyWujseV", "/dns/boot.stake.plus/tcp/35333/p2p/12D3KooWFrQjYaPZSSLLxEVmoaHFcrF6VoY4awG4KRSLaqy3JCdQ", "/dns/boot.stake.plus/tcp/35334/wss/p2p/12D3KooWFrQjYaPZSSLLxEVmoaHFcrF6VoY4awG4KRSLaqy3JCdQ", "/dns/boot.metaspan.io/tcp/16052/p2p/12D3KooWLwiJuvqQUB4kYaSjLenFKH9dWZhGZ4qi7pSb3sUYU651", @@ -26,7 +20,9 @@ "/dns/statemint.bootnode.amforc.com/tcp/30341/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", "/dns/statemint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", - "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr" + "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", + "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", + "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx" ], "telemetryEndpoints": null, "protocolId": null, @@ -59,4 +55,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/asset-hub-rococo.json b/cumulus/parachains/chain-specs/asset-hub-rococo.json index 064a2dfc0db8446340d05722b4df93cb44a18e7e..900d9f0ffb2c35645d59e16649ee49b7713a01f1 100644 --- a/cumulus/parachains/chain-specs/asset-hub-rococo.json +++ b/cumulus/parachains/chain-specs/asset-hub-rococo.json @@ -4,9 +4,7 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", - "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS", - "/dns/rococo-asset-hub-bootnode-2.polkadot.io/tcp/30333/p2p/12D3KooWA3cVSDJFrN5HEYbt11cK2W7zJbiPHxR2joJXcgqzVt8K", - "/dns/rococo-asset-hub-bootnode-3.polkadot.io/tcp/30333/p2p/12D3KooWPf3MtBZKJ3G6wYyvCTxFCi9vgzxDdHbjJJRCrFu3FgJb" + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index d168803e1c3fdec079ad58399a41c2a1aaa32947..f0e71981e7a1cbeae8bcdd4511f54036544ac0f7 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -5,8 +5,6 @@ "bootNodes": [ "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", - "/dns/westend-asset-hub-bootnode-2.polkadot.io/tcp/30333/p2p/12D3KooWBkKDWhHzu6Hhe492adEpVV7wzuaWGxUfEnr6g5JCr7Gr", - "/dns/westend-asset-hub-bootnode-3.polkadot.io/tcp/30333/p2p/12D3KooWMGpzCmhD6np6eKqxL7AAunKn1dN86Dr7a9E2xgZ2rt6G", "/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", @@ -20,7 +18,9 @@ "/dns/westmint.bootnode.amforc.com/tcp/30339/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", "/dns/westmint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", - "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux" + "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", + "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", + "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk" ], "telemetryEndpoints": null, "protocolId": null, @@ -50,4 +50,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/asset-hub-wococo.json b/cumulus/parachains/chain-specs/asset-hub-wococo.json new file mode 100644 index 0000000000000000000000000000000000000000..6afb4f1743debef4b9ac05a4fae0626bad5057b0 --- /dev/null +++ b/cumulus/parachains/chain-specs/asset-hub-wococo.json @@ -0,0 +1,83 @@ +{ + "name": "Wococo Asset Hub", + "id": "asset-hub-wococo", + "chainType": "Live", + "bootNodes": [ + "/dns/wococo-wockmint-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJHWGqJW76brGnS4VXW9AFREKCW8L1mYmtTgChU1xrTCL", + "/dns/wococo-wockmint-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMFRVSLCGDgV9NR7whE1nYyRZvQPzPtwPEkatPi7N9Bpg", + "/dns/wococo-wockmint-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQXxjpGQn8MoYeGe5kfg7WvUcGtKXUihCfXaWooZc4TD5", + "/dns/wococo-wockmint-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMzMAbKH6o6yQpGCpNFqQRJCivCUDszxN31KCF1QCGj8w" + ], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "WND" + }, + "relay_chain": "wococo", + "para_id": 1000, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f": "0xe8030000", + "0x0d715f2646c8f85767b5d2764bb278264e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x15464cac3378d46f113cd5b7a4d71c84476f594316a7dfe49c1f352d95abdaf1": "0x00000000", + "0x15464cac3378d46f113cd5b7a4d71c844e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x15464cac3378d46f113cd5b7a4d71c845579297f4dfb9609e7e4c2ebab9ce40a": "0x1002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d6318141ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9657ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0x15464cac3378d46f113cd5b7a4d71c84579f5a43435b04a98d64da0cefe18505": "0x00a0acb9030000000000000000000000", + "0x1809d78346727a0ef58c0fa03bafa3234e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x267ada16405529c2f7ef2727d71edbde4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96": "0x0000000002136c670a700600", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746b4def25cfda6ef3a00000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92d548e67179c2d8ec20a201550b5233b02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da931e06c9b9fb6579408dd7c7efc6971d21ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9b46e8b924e2d9b5085de1837f6346bf1caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9ca6e2d154871bec94e6469892e75acee08d401e08c1173a01aa54ac8f8e901370077df2f8ddfa3cbf275c496cd17fd16": "0x00000000000000000100000000000000000064a7b3b6e00d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9d6da2c551fad8f5e3026ed1418ffcf4c7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x959220776573746d696e74", + "0x30e64a56026f4b5e3c2d196283a9a17d4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0x3a63": "0x", + "0x3a636f6465": "0x52bc537646db8e0528b52ffd005804e9047ed30530125110582a271d6243fb8525139496cd603b47cb23211a000582c82f9cd0f269f878309c9b48a767dd4259df66ab796efb233f2eff50842e693b8abe55048f05e4ecc33eeb840f6bef46f6de64cbbda54c29a5ac13fc102111eb29abe20759a5357413f5eced701f75e6d97b186885b455c5ccb3b3003d14ab66cf2fdc4886a616e8b45545ccc7d796106b5e75ca2c8449da77adcae93dd229b700d643f8c96f57015a2420499930ddaaecbca9334703eb8bf43b92f2ea93bd5b2e8428f3da12628a9ff2a107e0b545e5cd43edf2b48d1c0dac8746f62f9facbf3ec99e0a61a26b901b8acdd1c0fa56ee729eeb2bb27ff9f54956adebbea6e057d46dd19ed753418b5d2da2e991ee8acdce5f3b8c9711f6f620565ef114616fef6b462f6a877e5d2e79c57fb5343dd2c1df4fb20a3a742d478623290f157e1ad0943b47f2aa9d1dbaf20a6acc0dc1e56860416f5fb9be80a6bce2ded4d3b4850efef65a8e42b9f12a3af435bf1ed23e3b7b7bd0951b3adae8698bca1bafedb69e3e7bede9478f835bf4b425049a575e31f475c9abfd75e686767be58ac0af3fc91e0bee86a1698acad2f63fbc9a3138afa62a065e416f678157d8b777bcd21695d6b7ef30100b75eadb9557d8cf17be5ddb4d19a86b1f85bc2a625fdfe1157bbb3290b64f431d65d5c6aa75e81a6febdbba32900aa96fad575f8e5db922ecd08fb0c38d57ac2d364353d6d4353557e95957a9d39611657e95807a536f7645edfa7395f8b565049adf5f5fae869df2facd5582ae1e14f8ead04392f3eaeda91299d0798e981e7e4739afae69fccf4f8df3b39df5d55356a9b757baf57de502a82f77e447ddf7b74a4c46106082d852818e7dcaeff66eefee36dfdddda93602a28cf38a9d5dc5ab099fd9532d20ac006a2166817ff63cbbb23bf9d140a7ad29635e5b53b4fc543d3b015e5b4a6b5eb9a11abad90571d76dbc52e7e8afae454fbfbd7d4d5be8f6afdea277f972437db97234b0fed2963b12fdfa66f4e81a7347ba8fae1c6415740d6eeded6bbd5d5ef4e5945573e7d7354db9eb9bacbfbcbd29a3777b13fee541acb53715f0ed9a72d8377f78ccdb9bdafaf6e84d65d50f3df0a004876f9fcaaa69c3b7cf38c3b70775f4f431efa8e3c489f3e6a3b7376378e8da6eed418f9ebe7235607f7947d8c76feef777b471e2c489f393f5edd05356ed0759b5fef39a02eb5a3845d3588549d3152ad0694b69e9a59957df186885d451bcd296d29657ef79754da90c9abad0adab6ba669ca2bdd5f57d77294570a79c5aeaead0637e595fe94afaa4383196c58a5dfd0dbd99b1be24f57870633de6095fe7a7b0abfe58624abda959b41b921f6205e32347d665fe62f8859bb3468ebebd5d0b594947ec26797eb526a0329fbcceed767f7703787dee66d0fe2edeeeef5ee6576f9fcf1206187ce7b649dbfc9debcbba4696afab36bfad369da42a7deaedc90fa72457e5052529af2fa29aba67c768d37e5d53aaf9455ec41dc1e6415bb00bad93db36b286db78e57ec3bbc6a6747f16a9f5db9a1861e1414fe094bfebafcfd7890ac435f875fd27e7fca0ded37f7d783388835c55f91bfa6c0ea5d33347d4dcdeed935c9abf9438530ddfadc9e7db62babd4b5f624abd403d0adb7ab2b57c37a517f925513be7a106bebf5739b1241da350f653b69bbad077f532248ebe68fb087a2e3d7d9392582b44a7f24c343d97e9dfda4449056e98f647828d7afb3e72811a455fa23197e9d5db9235049e9d78358e34d95c8fa6c5756a94b5ecdfd7675ad3dc92a750074edeaca1981ac52ef4fb24a3d8835ded6e3df9408d2b61ecaf5ebec9c12415aa53f92e1d7d997aba153faf58d57faecca1d59a55f0fe2200ee220564f59a5ae29fe8abad71458bcdc86a6ace900ba7575cd546862798002294c2a7c514104418610c44d10230449820a4a418410e40841c2086226489620ad540c524da4d804f912a44baa8dd40e8234915a23d583d49a5412a92ba93641b0a4e2a45084940c5246a454904a416a05a92b525f82780942458a072914a48a4889204544ea88d496204ea49648654929118404292d41944831911a2265454a8814961486908211524042ca4a8a4a0a895495149210844a0a882050048141902f825c0962444a8a2052a4a808628220270812454a291544ca4b6a8e149690029c2060a4102788174196489d2088529029a9292931824c11a48814185f0f3e345f199f19df19dfd237836f071f0d3e361f1a9f0dbe1aec8862870952a8a291e181068f15de133b86d09474aa6865ec58e3616142e3a1d1bde1d4f87043c7053ddaf8aa300a5070506f744a3d92f0369a4ca7e1c3ca8e3578aaf04ac0e30b8bf121448f568a0b4e8d1e6cec68a2eb02cee151d1834c8e10261ab415de193d4b34989c22728ce01ff06871d2628ad3a34cd7dac4e0a1060b2105371e1a54125234e0c1c647123d6210a44a902372a8f408634715531a1d21b81ce820913325c74a8e928f367c48d18dc92122c525058649d0c34cea891d3bd8c1048f3382201144053ee0d0d122d5da2144cf14ed8c46c5cba2cfe420a1871841c8f070c3f34110227280c8b9d221a179d0030829287ac0c0c7154182d8c2c839420a4208522665028d083d28f871a647053d637ab6e84941cf173dad1e2f7ac8f474d123464f183d55b418f00dd412504340c1817a831202aa08a82304b1b20da15be0c30427256c32d862b099e9ced8c630b9d12df134919a022507cf123e48e04389078914e2f80105ca08419010248e206f82c0e1238814ac74657286f041c50714bd829c207c74f1c144f7850f25fa8a931c283676b6e8f185ef00d503d41cfc65478cbb01e33825c1d9745b745c9cb0742fd8c8f81af6927a415442e98047991d627690f154e0ade0e404d7859606cf1a3e84a073042a0b141729334180904ac8a8f01c3da5ada08220af7c40ec113d72a053e69bd263049f528e12761061471c3972f458a207971e5db81ff4d8d243043949e8f1c40f267e28d143891e4dfcd0e283cdc9049a117a90a047143b38d8c9c1ce0d76d4eca4d95963478d1e26e8712515450f2374ce48559113a7c7102825f800228545902a78d460871a3b64ecb0b1c1009503c9821d6f768cd829620712506eb604cb65a7053b2e40b9e1a3042928a1a9f041066a8d1e259e389a0c3e3578e0fca0a28de1a3891d2d76c8c06403f7018f123c47482c3a303a2f3a325b1c3da6f0e2f460a287961e59bc397a60e901c58e0976aeec28b19365a7cb4e133b5f76bcec50b113c5ce143b27d8916247cbce969d2476b8ec78c18e133b47ec9060078a1d26764ab0b3c48e2e7670b163cc8e2d76b0ec886007899d11ec3ca193041d3974defc3043478cb8844e1850c953e3a30a8a085b0b9a20a70a8f333bc6ca60cd6c0c568c0d63cb2c180b8325b3adfd62bdd82e76cc72b12fd82d56ccba60b5d8166c181d255071a0e2a0d490436c607440682ebdc4f64593e0b4c422d14df1408f979e2f3d9cf89125070e0a093e86f031a577c0549a081e5e4e4c9c94f8d6f010e3e30828858f2d7a80c1a3c50712a730a72c4e22383571d2c1c9cbe906a7149c707052c10905a72f272e4e579c5820e750c10b4cc20f32b83824159e25f0284133e34305a836bc07a81df07041bb00c5031f59b418ecd0018a4d2786b7f1c1c579e03f3865e92c38317614811bc309c105c14dd9e6d8e0702fd8caa43087b3c15ba0d49ce6f8318686841f65687058881e2e7a5ea02981c7153cacd0deeca4c133070a08a7383c49a07ed085715a024f16940f78b09ce0c038a835df18720a0f131d0cb42bb42a3414685e7498a1a30c9d317a62806ad36c78e4f8b1448f12ec70c143c8469035910121df4838520e9904f98614827423cd9065c81948154036d20b76042c0e06072b023607c606d686549256303798103038b020603bc0d86040c0dcc0da603dc07c80f1205e81418844c8206415094434024302468448453c41f4227611a788524415c42ba215714cd422b620868959c428a209629708457c229620728924884ec42de20be2185106d14c8c4114238611cb4430220c2299d88a5f442e220b2216114c5c0104226689494425229688443c220211a7442a31885804dc017403ae81338063c035200b6016b00bccd26d1a8e1642fba0d74875d169601852881ea3b5e830cd82c6a255d062fa0441ac48a901cbf4117da58de822b01fb01cadc47158094d8593d042f01bec86dbb00f98070c07b7c141805358058c055f21e1e02cb80590090ec32be017c0167301c9c02f980a36019f008a80a78056ac1c7bc52a61d37011f00a076c12568dd5c1b6b16a768dadc17ed9142c0ad6060fd8334b03366271b02a5834bb0218056c028201612097b04eec96c5b2599c34e18051510e36d0b04534400a039628a1c004248084b546cc60a4c82e3164480109100a42c001628001689360c147880a074c49dbd7e5d6c8b8899e2cd140921e9610f1902489b6ad4313a224494c3c574e3a4670b243932346464e12908000e8c4a14f887268b2c46889069e10196980c9069af85024059d22104188ce107e74dc18192dc9c1c868490284c043d2069a1019c1a144443b247192942489870d3021ea808f4e10f48911932549443a242122da61059d37d4c90e4d9824f10012910f474998f4f0494d8838e0812536b80e107a747e4094f4c4034f7cc00127494f7c68a344493e6880095252d2121d9d1e10191939c901a5c30345226ad28127493b3480d3d98112e5d0c44993244b9230e9e1897268e2a401494d7c7092430e0f44878d0e4e7220d201051d37f48907888a923c79e20122244d9c3079f28408029e4e1bda01263d3c125111929309d4e0e9b0a144494b746062b4044a12221f887c70c00d3a106d60034b6e0880ce1a7d529403110f39aca14f929a102111f1d08489eba8d184c90e3c3a3ad01c983c69c20429e907a2241e30eaa18725499088743062e28350510e4e8c847a74d4e8931f3a39d025497a7470a01c48b22469890e4b928818a0a373830f30315ac224894913263c74d268510e4e8c34908428c9871c887e20d2a19386223949c0a663037d42c424890992131e927c58b243a706dac40993a425483ee89024c989d193244c7af825483e3c0f5d22230d2c496ae2c30e499272204262a24313264992f808927dc1c09c99c1975796a62d25a55f56fbb8b029d894ed613118249e104f38e18418642320bb550783988e62dcaaac18c6183386618ca92aa6ac0d9b59bb19630c638dac1cb9b556d6c13adcac3730b309dbc62b4015042075e88e54a8cc18738c10fef8f1e3c78e1d58373304ba5b4a4d15728eb67563acf024a1840da1005a0733840f7000d41ada00610d1a94cc3b4ed287b2895b636ec654595b99b7ad6b40b72d037837d86dcbbcaa18c6baa96edaad3d54bba15e18ebd5da596fdce5c5306c555987064c0712c5305535811baa9432c2085b428ca5aaa658e7b09a3055b8ab92554a0e9e7c4028197664960d777084124ad6b61f526e10c2eeae1322840516babb75e860eeeed6341d3ab8000440051580f0c05831e514987b98997bb8879bb91976c3de95b29b6163dadccd0d2137774a47c70486404c600844a70340e7a30304009dcf0258f9c80c9baab2911e3d984dba8c754b29314c37d9cdcc41308c21434c8b70f70eb6dcade5685a8f5e55e62db58a61d8ea5eca97aa5e7da95e3c545587326666056012424d6b5695524ad912c2d3099e4e10b2a6490865b7d692a1d45825b166d5b4d3c907c60cb7146facdcaa2d318961ccd82ac6184bd6d61edddd2db16eaca564960c5bb1865ace2a2f6b6429a584cdcdddcc2a99b937664e00f3a6da0d25ef76a79042967177376498a3dc7c623e0935e7aa0fb995394618e12e841884b065c36ed8aa1cc618646eb88bc55db9bbca61905b42c5b09c886152ca66868d410ce3d61377ab6a3343d81242c8cc0c5b99595519c39495316dd588618ac1c817e44bebc62ec9bdda98623dcd7573ddad90399390a5067748c99d20638a61580761da43310cc354b9b1968d2994cc58378432608a6dddccac90bb2133c78861cc52aa6a430821e466c80a95592a63aa8ab162caacaa1062aaaa182b3702989b9931d8aacacaaaccdacd0c15723343664c95a12aa6513562ca504ac57461c4b0c5604308176bacbba3d458752933fbc0585b310c63e686aacd50c21327a1b6435ed7d52d99b1861a94522a86298c5019c266e66ea8ca98546e86d80130ec93b2314cee462c0618887c9f01620e866111c3561802d43107468961a76e6ec6300cc3ba31cc8b18a6187635841a6c08b915e36666be1846ac195356a10915b6f48eb207909c7080880317d292a4157c783f8a7258a2012323270058fd40e104147ca07aaea41c963c69b20429294912130e70c0898ae7e36942d4430f4b7a9603272811d20e49909c242dc9c18991d1929e02e8408444a403929324ab020c80880952120006f082ba90b40489a809d1069082987400444f9e1021f5c001273924412282e2240722a4244b9ee890035112178a560518400f39103d01f21d98201141496200170a508001e84094c449520f1e40ca21851756407e70d281a41356051880a3860cc0852403202a5a62a481241b68c28487274b7840415d20e20093a4263e207d437648f20108110f49929a3041ea21490e4f9870a027531788a0101925f96109075030415dd88013a3241b68c264031b7062e463c779fc40a4431326499a3059c2430e3bece0e42380911193a21f023c5961003a8024274e5cd061091192ca03444549927e206ac2024f12d10656f851179e243d61f20311005ad001fcc0640722a41e7ea03421ea1d75214913274c9e2011414982b484c8288724441b5882e424e949121d8c98f090439224231f96ec40a44392a41f887ca034216ad5b60366b158502a8ba552a4b05851ca2a4bca25056349694e096b73a44881dd4a582b85c54a54cab25852a448814a582b458a9456a22c2952a45c523425ac85ac84b55296b54a58cb92c26a2552a448912245a5b05889949522859528cba444599912d64a59252a450a4b9522458a14d6b23025ac9522e5522265951595b096c56225aa4a58cb624125ca62b15a89b258ac84b5ac55a2ac552281988355d5e5c093251a0ad7d5552f30901452d714029db6be74799dd7969730af9c083ae8d1d7513d73a3b7d3402c2a2aa0c28b11507421860884a04b5e09e0a177601efa0eaf4e0ffd8b170fbd8781b4a5a4a45445087a4ad5402a04bfd99fd14fc744a75d0349a17514afd8eb76d8671d96d1a15e5b5544f1a6d756155b7e76fb15f90c5230050ac4f4ec083a6d5561e55320a60a2514b496bb81958430d706ba9c5d530e6b875eb0eff2a0fee1e117a35794c40f3f28fdea3b5e39242bbc7e5e3c0057b0b9e3c46118f8d5f78b90f6354e1ca14b9508f44944b7afbe2ea197c4efe572e5242083d0f569fbf4e73007394040cdd5b053bcc9faa1e89767450a5d1ebd1b584948ca2b14f8d1798e6c978fb0bf29e5864e4889cc18bfa39c879f974b57e92f4fdb4721371b4d9374d0a79cf0a36bcbcb14edb1e3ec9e074d59c35c5dfba18216ed35f9f9a2f68d55ab984be7cba14fa39f979eb9263dd94487b93e679e7c6d2029c42e650d50f6f7c3be263f7f7d45d0e307572e1bd1ddddebddcbcc0ed5582dd1a1d1cfcba4fe233c5250832a7e1d7a463ff0d7e1aa19dad8dc50b787b12fb7ce5cf3ee11bc6cd9dddddd650fe2f53615c25eac7801e2893aa6628ebf5e5b54bcf94582d6804e535f57f1ca0a95166214ab76d4e341ddf3ff5922beb6ba50691f753da90b107ab32b82bec2cf75f6c88a521efacc18a06e7cfcbc480f62574f7af4a0171b0b3742f2535233b0409fd9b70735f456786d95abc00a2f655991190dd59ef2fdcd557ae590e43cf4f6a0c087edaaadaa2a447db968da802d005ac0fce6d1d97bb2cfcedc0cfad957b49e31d2aa90e849f28f34ce6bce5c91cd4d26edf301f549a8df4f554874ce3c0502faa6a5cb6f9eb9a6c58d4d9eb13759cfce1c127824f3cd83d808e69aafa779e66dde1c12e9bb93bf5521eb996bdee4df0c8afca39e43296959d7721056a107e0350d4107fdf225dafce5980d3828a1a585785ac52b9ecb3da552c2c312534421144e78e3230bbc9a97ab943faaf49e61c34109afa29aa18157fbd17f9458fae82f44f8f351d198f9e8ca2b9e9fcae62357430f8f39ab275d295e45245e0e4992c75c62ce8a7daa2ce8a76597e972f5b998b3273fccbbbcc9faabe8e28e609863df0cc1b3eb5fec75917646b2d787649da3f76859def8cb8b3be5a7746d0db1d4693bbcba3c3ae48c489f21887364fdfa264f79e99ab292c4bc79b97a505f537eb9dd481e68338657a01c57ef78c5b9fa0eaf4caeea3d9ee295747515af2e67bdc9e35c7a2c4dd33c88378f477a06c49587ce51b95ce3a2e902d1692b8b150f5d83ae69acdd3ccb34d756f3a37e4d5f2be241827984d80943829d30cda56b99c9f533d357447ae6999a876ef2330f8ba267dfdcc77c73e9ed67cd1531994ca6af28fa84fe8455c313d66f5e7439ac013af6cdcbb72ff3a0fe4c6a71536efb6694dffcf226fccb73bce62aaa2eac10951cafa2eac2e52b14d49ba7c5edf2da733c15921e92ec2be21c3ae7ca0da900a6e3bc2fcdaf6f72ae7d3c9a631e9140e73e1eed4322fd72a95d9ec9b122293124129347a067dfbc5c734df3e6e63c99eb6b7e793cd95724ba76b97e96310f5d738db722936bdfdc9f976faef566f2369ffb1b7345b423daa72a646ade9e7d2a6466ded0b56f9a7cf3b4fd6b7fffdaf9420cafae2db78581a7bcfa4989e8abff0c91b22c136f496041d358a808b11444126076caa6adafe728562d52a7eb9aaa7227f80a45c7abaa6ac7aa95ac62301a3b3b4f8cab97c7137d1f734c07fa0adf8e397bd1972fbef8c2d653e74b2aaf5e1ed497175f8b1ebbb6513af68dbd45b8c508828e5d8b5c11557361aa1347efe757bf8e552b7cb3b0ed1968ba5688e87805af2d11ace0198ad71613585ef5dad2d23a9214849b9fac31d74f1fb288f1ea394b44d730786d91408edf32af2d27b478165e5b2350633923a632d6fc919277f3c41f4939fd4c6a430d14143f93be40a1b5f133c98836e2cf27376841005eb0f4f38998a557a9a4f4aa715efdd59909ed689fa160f3de7befbdc61b7b91bfb9af18e8b445459b8f69fcba16b7fd66cb04587e1daec78324a651d4aecedcd084c2631dbad6de91c68913a706efa3c76fb25e5f831e141e58fc39a6f1eb253112fff63466cec8c6f9a38d133f9e23ebed46d6fb4bd2b4d5e5ca27f1c76fb243d96f677d7bdf9ef79ca4bd3a2b6e5719f39bf3ed6bcbaede5f40f6160b41a72d24ac78e99891cbe4d29b262fca5c7a74ccdb35f636d74e1ecfc9f527eb4f8e7993f93bca792849fc27df723c9e93ef77aec18d5d638fe70854527acde1f76bce1e3f73464e9ee3758e792767afb71c0fbe0637cc53237c8ed7afc50df354099fe3f16b90572306afce37c73c5dc2e7685c8627cefbb9cdd5fce6b1775e8e07758e17b50873e94d03eb0be92aa51659c7963b82b9e439d2bf79fff64dd61825a5674f736db9289de6274ff329aff176f234d71a8ed72011348ed7e276f29e600f857f0348fb9a436e08049de61ad45cd3d8bb90650a27cdcb2801f127efe7359337e167dac7b9aa1013e79bc99bfd996fd966ca5cf390368e63df347693a71c0dca42a6ae1bbf8e55f0fbbe3f14ab4e8bb1d1903af8da42a20b52175f5b484ca12a24058edb368ef3f576e0388ee35c53dc1b4d339934cdd7db41d3344d734d6944c83229b34c3ae65996652e9548e69acabea26edff0ac00a12dc7533c8a552a0463660fb20a932a04f3f576d8017398299121a94478a069063a0c732dc7887e4abbbaafeefeb4fbc2fcf23615824149d4690b092c1f1d831ec4d8e518e69767e4c2780acd887e8e6beae9138f629542f1aa42d6d5db5f4e85e46c2a24c7d923ea4ecef9e69a137526cf5c3ae644dde571cacfc9f14d89b0e738a744f4739c73d4d0dab5f6728e60469ab9ec7296de994bbfbae5956998f4f6a0ec4be9d82af7a9dbfbf3975f9174cc956380ccbec9fa6cb328b2bba507f55194e775c947fd2cd983b8878e7db51c55f778dad7dbd9e3696f6f86a0576561628e797bfa9863471be72552d2d6c34b57ee8892972ed508e64de945ed986bd2fb908ed48d92d227e9789377276d6f72e9cad5d0c3671ec4d2433a92e15985b4275d2ff9db330f497ad2f526570e4992cf1c85cf5c7a4732fcaa90acb34f958576ae487429bd756ceeab476fb2af2b6764bfdbeb58a5ce928b91a169d4e9e85a6cd7966317d230fe26eb7979bf19e5d7d5c354551dd35fef28ca6347fdcaad07b196a37ed4a3ae499e0a59a59eeaa79763dec41c4af7465069ca4ff6230cce94380f45ceac08ba74fd66f7eaec6dec50b0232428d83f61fd7e43ea41addee59cc46ff2cb4ddfd1c279987949fcd03f9e23dbfa76e686da2f4fe7fae5ed41e18be7e5df3c8af2d027ebb1c5e437a3bc74f6969959fa3e61bd3afb51175167bf8efae1c3afa87d5d392410c97a7f45eaacd95e705895c312b41c3ae68e38e34aba1c94c8fab71ecac5359703953aa5df557a3d82b6aee5e86faeae3f9fa9c995c86e9eab909f57bf8e48cf932e3b88d7d35fcea4998a6439fb9a72438ae974ac3279e69a2f37b4df64fd669ec9931e94fdb99ff97a50f6b3cca5a70fb5e6ed3eed77d4cf53a47f7d3f5721eafbd2cbbe22e99a43ee48fee5daa79ea3516773e8992f778479f6a9af26bd293d337d93f5a61d3269df8cf29aaf9779cdab2caccf05b0be785516d6330feaa328df599bbca3fe76cdbc1ebaeedb83983d7dcc13f297273d6d37e9cde8eb98c7b3dfd1fef5691925a50fc0637b51a1359687dedeec8aa2aff0337e7b8555fada1a6249db2bbcd2874ea444907ede8b3a4b79e0ebaadff551a92244e8722b3b84aef06ae7a1ff50818b6e1afd5e6115f49f1dd840e5f2f520e6a19b7be5a14fddc1438fa2deb58f11b8b8000a178059a3a4247439145a4f09568184144044d18510952a4284d6adec1052a752858530b78200a1f5cbb38200a12857dcb01ec586f681be4b0ffdf22857dc80f9f545096a1fe8d817e58a1b2edf2f8ab70ff4ebfbd1566b7dea921e6108197809020e52c0061afed055c0a25321161367ba78e20932695850042115529f3f0f5db91b5c494968aff06a8587be5898e1139a2a91f9c3ca5999a04708a57c47371e7e715e1874ec6b727df372a3c6c2cdc7eff2d9df8cf2b091a81949fcf57991d1a794376271f9abc901a8b43efa84dffce1bb899a81e5a3cf0f3cbcbe26cae6e3b7f4c58b16d280013c62ccf7bcb68e78e235dea2e7258aba949c3871e27011ca36ce7b51eaa1061641f84009fa1e759c38ef4509c81127d0d2052948971598514aea41d9bc17a516d280811294035069fda0a4666011529f1ff8e8aa30a8b630b565840d3e69df0b10ca4628fb94948d907e51d4a53479c204379802474949487e4a54949484d46714ef6ff6a0c14d7aead06b674e022da441268e90ba7212d061430d2ae608224e1c21f59d6fbebad01a422ffa2e2663f476e975315ebc2d37c423682efabeb68c38c1a358151d7afb5a6fd0af08a1b70f9975a02e95486f4befccfbf2bebc3dbb5c5eed7d7963dedddae5e9c721364110b35fbe97f340dfe7e781525c1eb16b1f7a50aa10e520681a1d2a91cbd5a312b9bc17581545f03fac8aae5c4f9983f4063279745503691ebd87579055d2a30f5122ae42a447d7bc8e7da49b3cc93ed204d94028f6911e1dc5abcca3eb9b9f2f7c741394c87ef44b7a6bb93cc5a4b717c1e67b58259d7d7d39e93c98f7f3f360dfd1c679ccb56504095e5b468ce051ac927e1dc1cf3e65958c2a445d39a842d483f8f2f6a5302b7263a129afb076f8cd7db861de7e34121d3a47ef68e31469878ec41bca31a01dba74f84df910f3e58e7ea002c414a53f9a1d50c212e2fce54f587f7d4fa22babb06f42e7e819492a404cf9a38d337f600e89fce60f9a7246a243875c91cb5585cc8879fb9777b4717e3d0ec18fe7483bf476f815b1c3ef28cacf7e8ecb6068986bcb0d69be7974e971ae4ae4e4976b27af08ebe7e69a2b57449e8aa4ab6fde94beaeed6bcb082c1f55c8e541ccec9c27dde4ed6b9a720c60cf32e7bccc4d9eae2b37947d3373cdbc7dcd8358536f73cdd3d79433a2dfdc57df5ce34d33627a5521535dd376d3360f62dd344f7a1babda75e836d7dcf4531e91bf7d4599b37b33f3f5a0560e097bb6dcd1c679930775109b3cfdccd397fa97173de84156e9b6bd45d30a74d027eaa31cc2abf8f3d1a3bf101dd5e6a3f730100bc5183eb2f0d13b5eb1c718a36b2a7e45da92a2a9aeeaeaa029a6b3b10aeed8c0abe95d80fc54340f3d3a26a33f76f9d4348ff9105e4d5410afa69c8157b3e3815798c7c02b9ec71c075e69ab88328ff90518080ab5a6d1328fb9f2aae7e7cecf218f450fe21e8d3d2ccdaef1986bbcb1c78f62e331d79457fb74b4715e31dfe115740c73cc35857d45eaa6bf689aa4d316124a3f650b8a38be5d9fbd67d75ea4e2959538781dd5a34bcf3bef461a85ea810cb442ed5defbc114d630abab9b55689a4b8d3f4a421759055fab9f2904cb4764d4d6d59e96e9e67f7ed67f7a98d553f1ea0714491459a23b460892c4010524f07aade871001068885d863686f7ff3f1ecdb7edb6b8b882e0f3f14ab565b72687a818e25afb6209aded0b16ba72cb4d796952f8fead158263841133e60c3043188020742de6a0b59b5906b80ac624d53cec8f54dffcba5623a9055df4c6a93a378a5797b0faf300dcbb26f666ef20cdb58d5f10acbbc5dc3bc7d2d63d5e6cb1941fdf6cdcd4d8e79fa9b97d49fb9b6acdb27ccd3d73c7d5386fa75d94044b890b28faa90d9f3fbcdcba3b76bbd7b79fa32ac4b2f29fe6e1a6f3ffce5ea4dd99c623aeb0a398d3a1bab52ac6ac7dc59857d41ad1ef4e6befae525f5abafd7b18a7db921fde22759c5ddbb6c20a8a4a4a4a424d4ae5222fd9d7ad3c612b4b7d7806e5d469756ba3cfb861a62e9d97b22e41ad0adebbef7a0296b9a6eacfea1adaf67267e93f5d1d7d37a5b6f5fdb343bc498e686d6b009cfdd1bcfdee3713c0b1dafa6f6dce388aef1471be7a792e78aa89a57d7e206b75c5b9c02dbae21c0687d2984715338a96b4a443f55211b0e63dceb8a314208638c70ee4479ec9b50cab8d7eeeec28d0b312d767b19ea1bcc0c61733333ef2e332f431823f7f2a2d1925b8707569934e69598668a30f3a4b775579a5e0385f0eeeeeeeeeeeeee4a1e4023fa0ca1b2a0ce0558255d51d8e5ec3b17ccbc6eefbeaede306f7b87b57bee185ba1f636981d4b505e4e74c4302c0e613b3e74d03c94bd506e52afbda48851ea60cebc9b2eb7db3697315ddea03777777d779b2bb2bfc6183863def042e9b5e505989fa86f2fba7c176eb49ce8f3fa26bc2ee831865756b124ab262bba4efe8d57907feebc7a81e5b5e5459a47bdb6bc68fd7247b0387120906c9f9e3b51348839f64dd66b92c782aaa3ac520c7ed0d3c0351f5d536ea8c7b5ec82f04b03311828a6817090f08b0acaeb877b52dcb52da373ef0a200e3ffd8038f47520aedc0201e2da6e403c7d209eb9a69a6983bcca5c5ddb366e0a795dc22f2b1d7af0cbee57645dba84d0cb3ccb56582850c75bc8d60f88d735100403cd7c74ec9ba808bf44f825c22f53df446155f41ffcd2344a441f68d50788666380f8dc612060b0cc817844c1198b133cf21a5eb1cb2d3a321f4ff0c93278361f39cd03f1c96a50b8b8a12324fe40540fc431cf3c4db9ccd7839e7946d6a1af43879ff42b3bc19bfb40e4976215100f622dd3cdfaa42a0408dc80b01806c3aae899c761dce84c7ef9d437569603f269bca24ec7aa8882a360e50aafb63410fce14df7e6cdd43753f553dfcc1f93b727df93af5f9bea42bdc9d73b5e20bebef14a2e8f2f37d4e3382c0c9747af8012811edd02ba3279749f92880e5705c2d4675d879bbc566d615d7aaa42260abfee99f0eb9eb3403c15c274d2a39bbcad4ba72f3dbb34b92d6744fa3a9c991526305fbe4c06c3667889d3f0183ec368b8f5b1ccdb6003abb4222bae2ff0cb97cbdb77c39d7250701478c7b4ca30981320ab4c2e3dc92a935f5e110a8eb40fc46d905bac79e9d0b5131756751becf2714bfb44d72dacca41d7c351ee63fb09bf7c815f78753990e7d161cb0caf7c7874b8c42bcda3c330bce2ca3cca273cf3118af908c77cbc723af8057e815640306dfaa6d106dd049f0c463afc8ae0273f1a5885c2a7ac3239902f691f5ef9b221ba66c32b143c7ab76920f71d0e79253d3ab4d240dc05889b3e2b1fddf3a052fb44d738c815b93c1ba29b6ac648910dac8a9eb44f03aba25f5f11e4b2a581b84bfb44b8e5a36ba74d37cc64312956c586abf1c47304bec9e19bbe1982bf50f0da0daba2bbd771b04ffcf298f4b80cb7784c97066230ed134ff8a67c20dce58737f77ff8c515c1fefa1a0e19dd3a8ff3f01ece60d0701a5ef1787456a3b2f3d9f944fd8eabeeb872433d3c887bbc28de2ab12a5e1c8369201cda277a940c067e3cdf0fac8a8e721f3ef5cd167bf2f5e6909c9cc1f08af3e81c86c52811fde89143723a82dfdc3cd3ffe1e9fbf05026785edc7b78ca2acd79a43efa8e87b43bbcb9a17e32988faec343c270c85ce79bb2fb669793e6a3cfc03ed04f9cb745cebee9aa5fbf3ad95d0ed7f0f00a6c689a9a0ce62f1d9ee3b0fd724d537ea6a2654e74ea41acdda7ddf97a2b86a7bf22ed9d775f63de1e14c32b6cccb2a1405dfb5c8ef2810fe1afcbe726e12f577d0cf31c9deeea7ebaae35cc83aceaa18178f80edf5c1b68c7351fdf6e721edeaa4ffb8eceb7eff0547d66f66354878e0e9ffc05e51d4a87a6f3c29621daeaa266bee81298bfdca4f3c20fab303799ae2c96e641d3b4c53246e9db3bd730cc14211b3367201aa806b2c93153a6753a6dd77d50166e8b65b1b0eaf20bfbe67ec7859b6fd7e19a94daa6c3db31acbafca781b6d53e973b4abbbbb821ec5b316c74db642b6c05afba6f76dba681760b5b69a01d7e396b612e3cc574ce734cbe6b78d5b90ebf7c3296bf78cbe42e7f4dfef297eff06c689fcbb1ddd240bba67d2ebfb46f8be9e3be2d399fceb7e52fd754a56b209cc79c4a9b2b6e3e6a79cce7bef9cbbbeebc731dde9befe2782c8739233c7f7e1aa59ee7685fc7a1c06f3fcaf9d6e14a7cb91ada757c50f675beb9deb97245ba8f0556615eb41f0facba5c0756715c664487f9aee9b4d306fff265c32be9972baf329fdbe6af53c3a8ed106b9dcb990aaf30bf5c5595f3f6fabb408e37f7739c39ccdba4cec9b5f502a587be62d48c71d3e2d5d4a53258ccf06aae965dd3403db4cfe59acf0ea58557397eb9a77ff2380f697fee9abf7cb1f04afff21e1a08a53eed976bb7ea3b93baf9cb95f894975f20f3a467c3857d0ce6350529cde2f8042eb4ddba897a45f973e63328117ecd8d7ee055bbe6471408d53e9a6bae8a3a437583d77e5ef3a96cbc06e73557e953ddbc16c77a5b2cbdbef1da12c3e6b5e524c7f1ebcd0d79139679d862d57af3e7d7233724bf9f0bf25e7a5d279e23d74b872fbf29e5b10fbad1365669ae52678755976b4bccd2cf21af798c81579acfa066d48c4fd4a31a08aacffaf6cd4d62dfdc91dfec79d46b657453cdbcf63b03ab349f28ff950a24d5675d25e6d8279d065e5d1ff41978255dc35c5ba58b0b2ed63c17695e657875819a576540af79c6c099319f39743272a8f9cc07e65387a71d7e8c911bc2fcf229678389994f5583691f1e6a8169a04dd33eec7c351894fca6ab7e36988b1bc29cb5742a84e936cdcf4d837d4f414627e7a6815d9a6614f6cdaee19706469f9b66d72891a37583f95c36be86cc611a5e61cebe6a78059d7dd7281124d9d166f09b9255d82715f69a6b0a6dd5e095dad35f4237bcc21e4a2f23418762d50eab78b98e55dc1edd216c1ffcbca65ba5d39698d69bdc4deae633d75cb921d6dce4ca2bd3c50d996860a92e7de6d3c7af9af9cca7ceafb63ef399fdfa2ad090f6c986f6e78734e7da4326ea35de4c12f3214a843ff30bf0c0abf6cc0da040b27d32efc8a064f0997fe653cff84cdfd07cea9acf7c2a9bcf586055e6ebfd689f8a5519d4d9587579f4a9facc8764dc37e5cf219fb9d47cfba6347db3d3959f797346fc4d26d77c7d4d269349ddc43234e8d0cb7f1ac45af4a00ae9de40a7ad15b079a84494882e15bfedc92e0653c8065219b0198384166e70792384807740ecf8cdfdf664145d34025555d715e5215428b1e566d08f5f1287e4ba3c7e50f8615cde3a3ea253d75437906a0d78f90c4de1eeaa105eec5b554510c06f0c5efdd1575bd828cfdecc45ec72edba30ec3beabf1442d6b5fcc551c781f3aba42a040aef878479927ca4a48e5bd86f42bf8e40f9a9aa26f4cb9bab210a8cd0973b1282c82189dedf84df648690a63923ac87ad611bf363aec9adb923b079fb245d7e47fdd8c58022782e183755222c375d18788a69d38561376e83afbede2667d0ad4be736552226ee503082c2cbef08853f757fb46e5e1eed9a971ad77cba62cea64a24be46377967a6ec9bcb7135446fe8d293e12567448697df910c2f5debb45ec80d4f4ff8a15c2fdbb12f7ef093d95e37203384367f390f922b7a43065c3d355bc798abe1f28d2ebd9cbf3eb81f3f69248785ed6ea1051c175c796d6981e5bbd796164a986bb66c3a19a48019fcd24ccb396ac8fcf9a9fa9d3ff9ec79e89ac93b719c07a5e66d1e6495941ec45acc32e78ce7fa7a58b5ce7acc97536e48f25cdfb6f49a024bdb26a34ded591deae75ca4486a462c7b8657d82b176d4ff10a097abb8a851f5e9dbc670cdffe020399bc5d93de3e12a8b9d4fce4ed33c0f4cd9ee650a86733cb21814bbd0f7ff3bce4b8c9f362c585721c7a4752941e6bfd64a5efeee507bdb9c5cbdb88bfc937bebe69fa784c1ebf8b39d327a11ce5bc74ce7b494d9b8e43e9ca9629c08c318366cd1bf04da4c22bf6a85cb1791517ba025e1c92cbe137bfd74ede3eb7c0cd6f9e7148ba0cab7658d5be7d274f3fe33a56b5ef19cc1de18f0eb9212423fa263f79fc1937c47daa2d4ccdd79743c24842f09ab7195e6ddebe7d93915c5f97f9a6f4b6056bfeda9663400ee6ebd2e335797d860a612efb8eb6cdb0aa5d8bbaba319e2074cc837e798dc69b21f8f8f0eb2515d24bddecd6b7cf5efaf66e75195e99e195462c1bcb2bd80d84107afb405f247433e7a177b0bb7394c80f76611786f9f6a09bfe18d6b19052753ba82b5e3f49200219dd0eab227345f8f9d32174dd0e0a4208d52c843ee866f710b643873146e8c55e925517abb8a18da39ba8bf1cc5abcb7b78c57eb943bfaecbb78e579e0a61306c17db6b061293d0e4d8857d451886993ee81183df0eaba49452ba94524a99659964e616f0d00efb44ef58157d79d04dff1823744f8530ddc597b73f6da47900bcb6c2585184305cb270f3abbc7d2ad2af6fba22da1bdd099aa6a0c211012074537ea330103dc8aa08db74b3fb181dd5edc4285ad7b1ea07ddcc79ce51229b8376844f33a2bf46f8f7db41b7ae3f3bdf1bfbb4ab64550721d538689a526e8dd0c120860e1bc002309f7a6d6161e6f282fd50214cc77304c6af287e3cfc79b1e242d87775d4154277e60c98eb2bea9637053a9255d12f9f7259d0692b8b2e3fe5840fb3d802a6d3160bd8340bd42c169db6b258f39f459a87100b330fbd87f59b3baf46d6374e05305d74fd9455fda93a05b9cb14536801a3050c9515c4b1808e612320e795cc1071ac5ad1b16bccdd0c61bf7e33ca0759d5dd2755218a85065df7ade8d6b55dde20ab34e5ed863c4762f4fef8cd28af2a04fa4235033a8a57ebda72019787dec30203a950eae10b9f825adcdbe786146e3ac44874db71ab6fb752da2a29294d11d2874090976324baae5500d3f93ac3dd4253065aa59f21af03f889fa07722525252084d455410c048574a4f84118379070c40da4107241d7f00111729439c34d9b2e8456bebf72eb3f82152c7d005e5b2a08f35a8e4ea3eddba3a641ef08f692bf23ecaf29e519ee15cc2347ef4fb785cba117ad3c841ed4456c0bd8b20b34d574bb9c7dbd9ddb6b1fcf0cfad28de8cb4f05309dff5c312fbd9df59837fd31e94dcc19f3e8ac8f5e527cc81ed4acb89b46cd6fdfe36c4605537c015e5b57bcf1578cb942cb5f71e6b5d71c6ed9a587ae99e0f3c7e453e5a82131f00a7375f63457ffe13e1ce52637a1b32ccb8a389965dd7bd47267ec71f94d7ece7bbec9df136e716e469ff18bd957d4ce79cc5c831be495ae725cbd7f62cedeec9e2f5694fd786708fe79c2297e5e9ec445994bf9e8cdfefe66c726e41287e67ef385f1b7cffd26dcf29727f1c77509bfbde136dc86dbebb3e736e1af940ab4d13367af178bc9e4f2874fe9c367876a1fc8006e9d3db667df7e3cdc6291de9de03a50f09dcd79a87a1c618f35b0475fdf300b43f455214c97c41f6712fffae5136e79cc353f71467a3c7e45d133e77abcb9cf982f16203ee56fae0cb4a1f09df0cd4e7e53465e3f817df66c9c11f6a295ae7135b44e00f2fdac9f74f6ebd9c1d1a5c9cc6b1dafe73cfd8e1efcae2172996e0b73c3fcfac51961d7fcba9cc7c33ca8d9376f7afc1ae8a77da09ffcc737a58efbf866b7c351df4445377dd3d94df8a6eaa1ff349012ceb31edf54f137553cbee9f19bbef34dd48e6fa2747cb3d3f966f7f3e7bbeff4e5642699699aa671f2d36261b83ce89b70fbf85dd1d9f9870a5a7c45d1315fd75cdbdd6500e6d9a7f12665d74dd46bae0aa4eac3aec5235c3657cc671f8f74cc2f57d240eed0170bf4f6996b25059fee3e519ecfeed335297cdb6a1fe8d33f29bd4f1b2e16ffb47d26eae762c9fc82f0393ff9848f25c5d77c26c5b9621ef38b33a27911f7993c72356caff9b63887f0333f7d13feb5ade75ccad93d549f897aee9b49f133dfbeb962fef2897ae8abc5c2a0656e72cc157243d807b7fc7a1aaf971ef8e89a2ef36982e0e391f8f0e511edb16fb5d816ae6fe935052990d5c33c6f3cf1f34412c3c84a4a4a4a49ac832762803134d7aef5ba7e3bb73a689083db0f15b8e89ab92186be5cbb724774ac9491e2b73fa3185ec74a99287e1dbe72fbe507d5798a57edeb2a16e2e7acdaf652ac1252f3c40dba94e08a3748f0c346378424e067f4f331c618bd5de30da4bba7823efd53ac5a6fa0ae81b0cfdb675d0a0de02083861a3669d84028b6f0b1cc47d347333e1a211463fca6d1cfc3cf5d3f4f712ff8e93f71cbfa76f964a13563109a5ae6d5033edb9555ec05cf1eec02a9d6b59adc9438993804bd8755ea2a4f2815a2de435746128c13c3300c5e49ad69d94df20c73c843e730ba75d5fd820531baf5ee6ea89a286dcc436f8fcd74eb6da4651eee98d1ad7b01a1370c5e73c9414a040715b2ce9d36161848d5a908593a1addecbaef2733699c42f2811aa4a00658dee8070d0dac5afa3d65fb9c07c9e872bc3d28c7d3f6d906da619ff6f69dcd69d04dd5b78a859f210d44e50ae9555c28c7db1590a63d68065eb1a947fba67f9bda680892283250620a306fa4693e3365df7a0ee60d595dba5c20b7f571aa1ec54033b44f041ad23e11aae920cc41076dd0cdddb25b78c5ce5c788575b18257d8e7050c0f7ff28a6e3e37908c2efa0c38f019ddd4351f5d890e3c28f5c05178358d54bf3fa0f9787d4970c31460eefcdc2d908c6eb774dfcc4bdd54375b3e7a0f493ff0ea0b952ba05771a12978b5de25065ead949f919679109c008d366bde48d7c0bf3cf8d02bc0fa0e03a990148a2cbcdc16968416a44262187281540c8c521fe30beaaad442d8c36756e3350438638c51fb52554729a1412fe6c65eb8dceb6ae525c57c767763faa3f2c28be86998cf8875efe5cd1d9fdbebf7820a815f3482a6290dac5a63c3b7efb2ea6930faf92db3df901874d37cfbd01056b191aef9766eaf89aef922fe9605b7b718f2bc838a9b945d54e65687bcfa0eeabafcbdeb533943f69d2172631a9aa7cec2cfb396f90ba45e3d4a0f49b3e78d52afae2ff07e73888a051f5a876b34e4d5212bb39196615ddb4740ef56f5e8f3bad42ff5a24bd55b7b7ad1ad4f46d3adef1a9a1655a36b5174d8b3139530996e1d26c5f8b964d5fa7a51fc027efd45e16b77372bab163458d0a0011a251a98f136385383d6c3a5e818a63ee177f7fd4de91dafc819b93cba5ad922b821c53cef899df2b631ddd456d09006ea54f08a02755c842ec131dafcf63ace5a6a0006cd9b4763064d1734695e5b36da4471997717100ff5385fafcb71ada594123349d92d4d5f513b66ea26ebb8e99bfc98eb7cde5c363f8c197dccf2c4df84ebcdee37b2a25c7bcd10fcef5c373fa3675e917429cfbbd96e9bbfa4af773e974d767d73d97cf4f66baffe8a78c2bfe075495fefea9f06521303b0cbf7636f26f595c38023be97af958581fde2a07ec93389e7b2f9ccb9136744c7f92b6297cee9641b5fbee3721d1b6764bde8d271ed7278ed307d3ad8754cdc50a6e5b5eb3166666679711e3deebccc773c1e1ed4cb790a5d773e2f62fb72bed34a9fddcb4f625f113be7b2888561ee5fccfc45a3ed257be6fbf5cbaf88fdf2cca56b7b79f62535d08ecb225c7a33a99ff7e3c13cf3e8be45181965fdeb33a99f73cde75ef96ce5fa0faf38e9db37218fa47ee9dab7452c0cfbcd27ac976ef2cc157243199bbf3c8d17333bc3c7ae6f8bd816e2b7e3b53ade81068e1e42123dba40b9d4ebbae013297620c51653b8588192d0b56606368038d0023bd40eabb6bbbaab0c4686115548a8351a2c7854878613afe3b58586959f9d4330203c230c11ddce1948f8d9b91860562d98438d1a0871e58d126b2cbdd971861128c0229734c880a383316ce0800b3596cee82ce9800455b40183840a4ac046195bac40074b54e82cb980892a78686026ce951fc029411b4251cd9252ce521222a822c86beb0c34cee8e22ae3cfcc2af79b523a0a5f246587da6fa69e255c499fcdccfed844cd4ebaaafc26f649223a7f7dde7ee337bbef9e5f15e2ef6017df84aeb08b9a59d0b56f10425e1ea47162852d6f54b10286cb521339603017bcaecf4bb1ca8c3836a520e267f4f3d7755df2baae207c70d1ab043b6600664a1512ca088029707cd9028d08a6e881d08583bf6680c55f331863c5e111da08981a6fc26bab8c185c8c75a129afe06baa2bf6d9ddddbf6a46e26f85013223d138719e5986e74f59bd3d84196cca40c2ff603e597875558fd14023c6046fb480a0411042f1061fbd4369155d9a17e3a7eabb8c30be7d07d5402c26e618428b11c670a2cd0984da8c4ebeb6cae8a26b252b84b458424d0ed480019b2dd278c0162c8802cc0dde14a10856d880958519699431c61073d80007358000a7805370843349fca822078f2a104208239b2e8e2a683a308210e314440b64b040870c3082c8796d959125abf2c514448c31461e4a104669076da0516283c609ed04417c700454c831471341f8c28b2e684006111c194d5011e20828c89041810b4680069c238a90f55002e6064ba172d8208e135061038b385f9c31c616dd183248418c31c600a4b1042c55b698011b6008c5a531bec431aeb8e2c2185cae31a2a8c225e0f1da3ac303127037883bc6151940c151f4344abd3cb344841084d093b03b630610fc44e7e3b575a605bfbdb6cea0e067e7657406970eb2704209166451c51b3f3f5da4fc49a5180a08533dbab022cc9817b801471b2135e31fc0e5870b971473f9896b064d0cdaf008a051ea2574a2eb5e5b3248a2b7872d19142184107c083df5e3b565c6cd5faf2d333ff8e93f48d0eb52b8c34050e8babe6994facbd5934d749743895d977766d8fc755d57fc44d0e9438750a452bc05c214339162268c523d6286c026755ab31b6cc56f876578e94c1a346bd47c676d9efb6b6566a4ec3a140b8c4249d91efb2bc21fdd3f0669beddc9cc01e6a11aee88cfc334accae0601b63e966f6e6e2867e70956e666650d91b35ad5fcede706999d858b162498b63cb986d0d952f673837272d4a8437c6a2657e12d33eec12c2e9a8a8472b6fa67dd8cd983163e624a6a5ecba9398931835300ef80664f3ec1967c43fc23469bafe60c713fd72fdf815e16ffd44c935134766e60713d1656f78354f62d8c036bc9a9999e7d5dcd49cf9359999189450f97266dd6811c36a94a630d36f60816194086f3f2fc05729bb0e85cae0b08a7da9d356a604020099945c5018b224680becda5ff64685b0f3121d06d350316daf42980ebaf168a58150304d03656fda87ddbfa0ba745b3e6291b2eb50a8ec0d4c03d33cbbd6c32f3ceb44f1d7a58cf837aa81a4b34f6f9fd81fb76548743c50e0637e94f3d877f9ccde3cfb05659f07c9c6afa83dfa55a43f99bd9962836e78c50d3904bf6758b57ebab847125c1e726e94c8462df10a472a10ceafc33818a8c5b4ef6c36cf5cbec5341057867dd8211cf9cd6e51c1bed9669e79caa55e7a8e5ae21b8f049bcdae5c6a2897e4925cd3402a24512d97201b91df1fd7f2e492966eee9aa5e734bc68960cd5aafc17f52b3b8d517013ba65b7aa5ab585765d7fc8716eb0155c41e5fdb555450cde8a38fee7b565050c5e0c3761b4f1eb5a941ea5ceedfa92b00bbb5aec1f87c4f4ad0e3923985fadbaa5b9218d5e0c433e1e1a48bb1bbaab543fff059da0c381d33c3b0f1dbf29bb0dfcda3cfb0c0aa1dc544b6ee6a1ebdcd68a51a04e7dd4a1eb3e6c6f63d575842c59b264c992254b962c59b264c992254b962c59b264c992254b962c59b264c992254b962c5a16cd92254b962c59b264c992254b962c59b27490558aa14003230d0cce747976bed2ad8a55ec26e89a9a16a01fbfb93dfcc14a7473b13c6f5ab6f04a73f6e5a28457fa271dce6ec4fe4303f5707665a01d9863dec3e3a17dd879784a7ae753f691ce9fb28fa6c33b9d3cc79baca11cd722376432994c65dee4ac6f6ffadcd9aee31da7f3cda43f79f4585c77f2a0ce399d9434d00fcb359ab6b9c60d6d9c739e0a603aad88e9db5408d369fbfad1afac378e06d66b5ebaab815668a76334b916bdbdc2aa18d447d8376b8bda27a9eacc9b3bdf5d1e121c8ace1e1dbaf6ec5ec360e327aabbe8702687c650a1d6587333ec63d1e4fbcb9fc98b99c7abb92193c9e4ac5f6ffa2be91ef3cce4ad43c97a96d1377e6cc6b68300b73c34132609c22d5ac02d708bafc7beca10b69cbdcbd55b1ef6bdf23be6835883565efdd257e81737a4d7857910ef62ebfbca62f6dd4bbfc9baa2df11f6acbf0ebdb8bf310563c86441a36504d1c2ae775b5a6c45358e5f3a63c51768055261b3a635a6b9f4963771a0590263055f612b6dd894696d97e502e7cda75134ffc2cffa7eeaab0ebb402e70dea44113064c63e92b6fb43153660aeec24a70d4a4111366b52c1637467a66061ea253adaea678880ee6a87ef385576f6ee84785e46c19ddfc7975a8fce3b36d199d7a5c081bf669bdf7bbb4198babaeb3867679b36b56baebbaaeab5fd334879ee69737537f7d13d54df917e7931faac290da163cf0d7a5a3c379884e59a5e39b51beccd2772f5d635eee930e5fae48ff51bf8e4f55888e4b6f2055fbb4b7a71606056c0bdd7d13d5e57cb33b7d92fb66917ee89b4703abda5599b72b6061506dc1e48d9af295dfe4935b219fa6943fea87df0c2aa49d97e8a06b9f9ad44cfbe54df9d328c583a47bccbbc73e093f75d3ed85455b8697346da76ebdddd9d683d2b55a4affdafaa208bfdec9dd5dee1e5d2084305e266821370cc33099f52843db4c269349db4c6e9c368ee3b853ce15049dadebba4e47878ed756ab083bdb8e1d3b76ecf0380961c2d6a3478f1e26a0e0123f361f3e7cf8f8c1a32302205b0f37b4dbe33d3e38230b84d34e6094b743a7ad1696dff1d1655f18856dbd0d8b4c625784cdabdb0e37301843c68c1ba09113458c7123175be6a0128695248268238330ba74618089420a655191c415247803c712aa80315366093ec268a2082914083d30c29b2ace00230b2308c5189459b39579238b1405786d7de1c5f7786d7df1e51263238c9e11fdb8b301600117a5295a10042410417ff00332259042046068c941735759f30c9548fbf6d1792edf8f5e11fe8efbdd1ecfba3e4f91e5df57ee860efbf8b5dda211f5f8978b9f54a8001a78f542d00c38bcf0c2b3cf212f740ad43520230d1a285de1461942ec280562695952c38c125a64f1023484d85d81582478220973a0a902040a9e1062572910048a98811557d4c8c10be4d0355da665032d54f5d985425f5859de2e344da912599ff1152a110de21d06ea7ad8150628f2d7b1879b47b8fc972f5743f7d3e87777f73b8ab29f177f6d79d1e627fc8ecf283d07292047e7b473e75d8964aeee290cd2d553502298aba794c8e5b3e7a7934266015e5778de61a0a0f6e11f751844951bd231999cf31966d0d1133483b61430830ede37fb060eaf7a4c9ee3416de2bc24fece81e40cbc82a2d3ef5c879b9a1b3a99721c72433afe33f4c2497bdd39bdce6e235744c737e1ebf01d3e6408ab7cf81cf2ed82cff4be5d15c886f6619f3e80f88cdfae0b438fcfeb7bf5c3e77682cf8e473b54223bdedf348ae1db7f5835c4045dfbfc31f2c237a481781e5021ec3dde07b605761ef61fec27784b857dd86d88ceeee39b28a0be8932e1ebf1f1f8f68a0a61dff17e5021ec0b6767d128117ef61d0fa240061744b1b40239b020a3c501948a40032bae70424c1a42ec41bc3239bb0278a56ed2e19c8e3de2d977fcf0aa75b87226d7117d4dd13766bf615408fbe42e6ee8866bfbe1157cf61786f02aea7439261318715cb9c11180b094c6107c8d11485c397344112de80db36258c57d0b66091d84428c830dacfac1f23386081e84a9c10e949cf881c2c1620c1a4a5520b1244687d084155a24f1850d1c255033569ca104a78a1928097103abd8e49ba779504d1f5211f84751dee447e09b4e5833bf79fa5a7f5b8655db62150e36b4a22c155e5df96195d60a6b6bc1045521021246e0860470c81881b58412644ce981d252173037246184379e88620d29e44040126914e1cb0872500432cade3e4d30e1fb9b26b06b99a77fb51a4885e62eadf4e7a6f1ec721bda30ac62ee9508f6fa270c7939c1334f7aab2db463dee54115d2bead068a3d69f0587ae6be7dae99e71398d957ccb7603e4845c7ec37f06aee057835b5a503afe68f0dbcdae75da38499cdcf9f67fee1554b815858e0484205282893051c6e08b1af1905da82c00512843082244c510421f65d52205609daf801992b5eba98a30c21f64da340444f4851831860a9e28c2f8458a9877bd0f011dd940f3b253193b93c0d67d0bcb656b006fa0f41bd58e62a3d94b1eb50ee2ad594bf3309fe99876e865bac82be54a820dca26aabd5df447d1241bc04a3d034087441f681aefb291539a49f2a9176ed6e96dea62e44c7bc2a52884a1517a252a513527561ee20c0d4f9e89c1620faa6d141940a619e0ab5a78b4687002b2e747d7193becc467043845f40099941803c94eed979e8a52b6ca59dee289175f555182084d0a1b3077db505e8cb15e1e7a13ed05f4041082ac8439f1d90875aeb8e36906a6bad210a13858f5e3828f22f5735900ded137d5369aa25d547d76c800ebfc9e37be988ff2c7ae81aebf269bf1d15d2a9aa3df33655b54b4f2a5455fbcec3df9e9fbfa328dc77830d5548be4ea0296b5d9001e351396464a04c346537ab06b28998aa0ae9642719b6e9eeeb724535c6d1c07abe30a99d54ee308a03ab3293a665507a2a3738d12f85ecd053214ce71ac60d6d51faeeeebe66dad52b935bd749ed3a4c88ddddf69eec376f1cddab06858231c6c8e97cdc2084fcf08aa84ea250dbca3590f2827a3af3984b18e375c5cb932a75af6518210ad4cc36ec356ec032178c900d8410c218e736bbbfd8b5c63ca46d869863dc10c69fc618a4e1a824cb94d5b12007a16388686624000212531440403820120d4704224d15561f14000ba0b45058a20ab424c86114858c21c810630021608c88008cd0d09800f40f0c638b469e26bd230e98288ddc9bc24d1fb3585e0cd9468b594011a94601b63a690eeb2e266a487c948927d4143bf522d2db796244f0bb6edaafebdcb440be312768b2983e671bb2b2b6a1d67edb566789620659ace33f194a8a921be0b1c38a594c0eafb24a5103c5552ee5fe9f06310a98333fa6a1177262cc1b519040bb1c6731153f5fc46af61179a5451c1837a7ca0c8593d90e2bf087aa533eb4363fa66362f0f10da10f87f75f1160d9e4ca69af7a64b731bb74da88abf0460bcbd9377eb8ff83ca2dcda3e1b185d77a722969f118698265c58aea1d32816d7719fc1ec998568c540d347e4e5a04d2fd405169fc4d39a4a21b167a8034744bc485fe7d70445b58fa4ed54655f137dd0595a4e5465fa90730da2dd171047bd829d9341f1771f530aa60dfb2d44d2811bea6228e341d9734daba9f71bf0b800a4a3d1c016d5deb60fdcb6130e16df5b435fb9499a09ac060bc88abb06415564be5a6ffa2e577bd3b8689755b87f045c5c70d137c066b8647aeb9283e84ee489a9a471b8a250c169e2a6fa3cfe40ff80b3b675feb5ae24638e644914f6a2bad2ef78e1774e61daa4c4dcc95969e3518a67187faeab1a748b7850aeb0201676f8e86351388171f08ef32d05af18896e0a5d471dd1f112e7b4833d98069953ec3a5a4925356291095aa4bf134f487c5ec53dc7e24e1a8c095028405cbf0767ff21da01d9a541daa18afb1644ffc3027bf82108c58f4b5607cc97ff6135172d1a585a5074405ba64be41476bac379a7d92fcb6d60f289c002c62a82bdde4f97e78a462f0ea2014d366ee64ab4315ee966fc2ca87c700d9623873512e2c72522f8afadffb712ed3f04660fb02ff4e3c5a620982585d1c70250cdbbc77d574bf61334475db3b278706bfc0847e4fa2a2d24b500e76bcd596c492da6537026843d30525e3a2d55bd6e1bd4477a18de7b924ac6c15bf115ba1e2d4b33fc8b48bb842119ff2b38505a09cb7e338daff0a755ea194ccce3f2a2fa255ecb271ac591fe7dd55100dc14a9b0dca02a45beb3bbb95ae7753e75c7b5cb1078ec8917bcda6440f9457fbfbbd5edff7600e85b29fd3d0fb53fde9cc6ee8f9a49f3ec0dfb0a0255f8cd38eaf5a0b36fc818392988f828a7b104ed184f60101974c611bcc54eb22106f459c2b626815dd2a5895ac6ae3e11183856aee51194bcfd0270fd6a0abc266f7d6f1edc17b80af3b2dd9510931e4e6ae48aafa931af5fa228c244870597ae848c097843491a5d950321e7e0ec55271bd933f836ba0092545bae57aff489146b87f8e928af8f57bbea36c5a85d2234fc9e1db4e1829deda15cb2b1948b01e5e9323114525401f27be01a37813612cad467c836aa91e1602acaef3f93854ee459d89ddcab95e71c3444654ea0c1631c4da25e69474c88a03ffc42cb2998d468160408b1b6456b46c284dd586abf3f9542e56538356f6417c137c8e1dc92a3262d7ec7c9dfa5fc79a61e5e33e0dacc1def9e34134795f7cd1d54c18682821ac55ee7f519a2c7e4a2dafb18b23908197861b54e66687afecfdc4c0609b00f347829b45b1b8b4f2c7de0ca777f05c60eda0e7af9d17767512c19873024adfe3a251aa7dffc2d1a8d0184378f0416430c20b3675e165614495b2b6df9a4b3f16eff2a6b2b239329bb755701b8aec423e074d96e4c5020df3fd2a44d56ebe571c3aa34ceb8975d20395ae73bcae9c7f538f74fcfe0c6ce63bc23e5ecdc9d3a5e322bb77b6f5ec4761f709dc8a4559c4743a904d387cde1e2b4225bb71c40f08730ecfcfe43b8dd4440220d0d854bc1edea2f4de56e31a91a6abf65660d0fd89db123f5dc6ddf1e122785bbcf4df050537b61368815d8ca95fd36cc8d699c0a75d60a2cae47ab6e97850ef906824217c4e51dd2aedba280f8b83130947bcbeb050b8d68bb4500bba08a70ef4701a68b61cbc758c14f7c1a734ed12319fc872a26ef6697c22e841a761d643640b0f4935f199d3788f6328e7d92d8ad3c19c7b2856a740d52398a86c2bc161668a0a8c52000c159ea9dfee555d7175c371aee093bee505ab91d238cb8a7c4ea1f12c7306674349e6df8721805b4a1b732ad9b0dd4a0256231c528b3d258e0c3ed4e4b6dcda8d65183c87d2107db38ac28bf52b9aad0ca78dd9046d0cc6be3a3596fc4698589a5c317ed7c79b406bb469abdc0f159c01dc75dc4ffaa87c5f436f98c73c000a724d82a8d44518ed25cf930484e7eaf0e601df602487fe7a85b4a735d84b80e4b411c2187359f45fc3344c382bfea7e4c8a076b7298610baa97b24d50173d6ee222a8c032d3a1135425ac553c29739c8207869a5bbadcf2388f63f11b0c45cb970a23c14472453605c378c015554df9037ad7997d38f4f753cedc4adb6d6a65cd4eb0fecf4884ef1f4502ca554505325f56c39f3299d1ad4e378b622def40342d38dea50266bf274a943374df4860eb27362daef651775c933d2f57cf886c964ab42a19973cd87b225ec72ceca6ee3daf93d274b39a91f65c19828d123af5c45029273ddc1ed2811d90ca0cdcf23b0f13dfc1215d56c83b6cd61ea0456a8ba9d44733a791a2146e90282190e16c26d4523aed68506ee07ce3d121fe4913358145fc2f288b32c3e1ddf01ff1f3e2e1730a4b3864691e36a33e541749ffb09fba0e94aea336990a3cbb20eb138eee6da10e90ef51a27f30380e0229450f6e3d4061e75726c6b70235307332542acac56db381498ab8b3f0b773858f88cf392ea1e6b9e25a00dab566f3adc54fc883b43acf6e6a33050543d5220856b07b3d737d839d3af542d0e231177716fff1c3edff133835bc7fe2167a0a0ba158f08d73488608acac4a1ec51762f3c6731493e98a067eb193795ce221ee9455eda27aa2ab2548a390df17121a3e411740de0a55d6d147a2be6048cb17900bf0d30df0ac26125c59c05dbad5a37d0e5f758ca55ff294ce936178093a36df7a69cf629ecbd2ae658b2785baade697b94a201c229b12d76776e9544cbcfafbee214eb120b5b4c297085f5260c0f122371f4ea8c5d19192318a8150dc4198fe2af6c28581dd684c5bc4579c0087a129ecb663fc420efdc5c4022fa90e52c559cfd17743cc757b551b6516e3d97c8d099ce31eb815c0c0d6c5c7d0b7a641679132e784b912c0de4a4b3385fa3601574012c9be558602218950c26ae0bb51a08606624df26eeff7ba6a852e35e4f35a996956dbfd47fba0e3131fdd4eb419872a47d23ba5bfc98880a053aaa2d21b14e8fe6789e924003f08b0b6b229812a4e3c03a86e4cc82c56a76b05bf7d4e550c167cc010ce3d87eacb1b043d69b3978b657c732f777415deff1fd357b19ae6b5a18b002d57c4ae58a4d1d462299723bc93fb3bc3b9194af91cfe8e0ea6a2bbf01795126a96ba69ea2980d6e88f0ec0242f2ddad280107392eafb77105625d3c65ced3e6027d589e720470170480721530eacec86708d977f356b361eb25ab94c69da7fbde79ae89f5c0900c6027c6713a20188248e9557f4df7560c8acb22cd792a8f50a7ba66b19c85fb858d46f5cb37faef58f495f49cb1589e96fa46fe2d0a21c79604b4dfc3c0aa4d3015583414d7e847d7de299a7a0d402450123cf9042376eb852953767d360e578bf8742e24efb0c11a9be2a281b4bb5c745e9371336bad82854dbfb08a7bb31cec4602e6d46f889447e620182f0d748a113023661a506c5a8d8905152054315a337a42b5724b105a6081fe0c0e16b0a6e6af3a7721600cc90562ae812655cf20bf5d4e65003a9689b0564b43b52b312d73f9eeac5a34f3b81c1c62c1981a8d141e98998f23f68db71f102a921d937522eab846e522c4849d6d2ffcc3ea4dc7a9e24041b6aa111c0e3843b812007661a4df9a362033433842e382d0e218462aaf5d8ef4f2a40a5383506b34346ffe639a79d9b5bac57f6fb47167382f88a3a14dd8b5d940ebc3ded973d11a7e59f9a03385efdecf6c7f421795c4dadf011c1412db5f9db0ae979a7c3130696b23181c0688335058c2ae8003555766c888bfc6036202039a531c8d93960fa110f3ab33b37045a0b1f41074ae5e5cee115dc3f9f1beaf7d083dbee45328e8bc891dc9151d5310c7dfbea6621a3ebb6a71015c8a1b382cffa015b6d78d8b610f544d51bf1a45b9e107dac18fdb84a65c98fac414411d84bc3b2016f31f4220eb009ccd3f2c811f34775c0bacc852cc2aab06a993aac74b3e04087c0aeea7b274c5f9b6fb356435f726ece2ffd97563457bad56ca3a844419b76bf7743709242156257ebfe8e1f505d207798d6542652d698eaaa4c00d3b93a37ef99ed522b465fc11f675a353f7f5c054d47e11f4c4a061e4c57597a636bb1cb788a62a67ec6130e0f23a7bf10dfd9bda1801cb87373dd7cc81c915984dd991000c90e870fc4992543f8470bde6a401e9461d5207e11a2c2e0abb6c0e2eb34fb5c6bf555fb78b7e78fcd68a331bf85662367dfb4a6d7118696797ae71df5947fcf9179128573fc1a6a2d43b611669ed92fcd32c2f9053f5675b83c35b38483c8d75ad268a91300ef6108c89a6b27c0d920f3434058f7d90d514d15165ee95175f19d89b10aa2617da63d97a555058659864533393a6c0c79020095843281cf38c51d95f67d84f0a032b0cf3b1e9910b9fe4fba608237e3a3fedc5b139f80e53077ca3b1a1ec02c5fb6a6efb68ebc4905e9cfcc8fd31d6aee3d94fd29ae397175980451a1e5f51f6dfb8619b6e2c7664d3bd7fc94777a3ea9e46e09f1659ef89e960a9a205b8aaa4829151b7585d43e09beaef1489ba410e14fdcc87506745a358f3fcd4435e3037c1f007da92b03162669b1f1b1678460bca4968b755d44e73fd38d65a2dfaf632207ad84f4d8f0f35eb1b2d5239e98d5520d041a466fcd2713ed0d831b3b6b8d1526f62ae8dd6b0d5eb5d53f138948a7cbf834c3abd7b35640dceffea2df6660c0b4ac469dd60c6ca8f8251dfb61b1399da5a4de0b3102e807dff9cdab5c946dbdbefeb632d54206fa088c1cb33829411ebc24e4cec3f8f99865e880e3ab5fb00244799324929cc75dc0c39da0f1d302f1dac94ce082555854950b541bea897248697aa9cba58cbce16b55de492223c18f3a1221cf5b8289bf92e07eed09a533b9e3ffa0859f840f7e78352af87df808f7b872e82231d90b52978ff3747babac1a6a0c3937b479f68f9cf9cb8ee973f2fee1d9d47888521871b64cebd3a2c9b2e316f703c37ea6b959668a822fbbf072cf1e872d3fb9c3a6bebf4726d8256b216f4c210a129dffa8cc5e2634f54b3069cee1342c8c9a6d16e4f1cb74690d599d028d653b5af8a0ce8eeab66049cd6c7579da2557832abe30126336c0a9028608380d13e39172bf50964f260d2e9cafc67e105af6a68994fb1bf15aad540258b72fad748cc33b89786bef5653dc57179002e6af70b3d64d5899bb6fc1b709b4a21ee3d5d8536139f89bf703aa5adaf01a5dfee4738215ff5ab7fbe1fabd29c4a259d2bf57d0300013c800a1f2ee274421a7d4a21afdf44aaf1e317949223e38d2caa07d70ddd21d62ac3c7fd79d63fee566e23baaa75825527702ff9f0e895452f3225f2aad03d748d4e88a594ef64a05bf66e6678a24bc388039fdfcef666bc1431d047702b92f2d7a94ae4547706ba12296ea6152b82aa91482224e6b3aabf631573540fd2d6c35cc1dcd9e329ed41e9c444ea71cf2ccb3b7402d4b6646e355008b2eefcca9843bc5fc39308b259a2e1cf16b0be47b67169c1c5ccd2490a3a3ff7d59415df9735e0b7e2f711f9d0a7d14b567c40499432e598392a9c48d6c4f4cc2e2c82eb790541af7cf11589008f4169cc213c549b39a0beb8d262c4f0c201ee748dc10844d04e780d7392731f37e8600de71aef921a713a6930d4a682622d78c6348c78850c1bf20b75e1d2b7277b972006747d9b8e247b98348d19203ba9c55c2ec4d42a631aab15406f16c19ea3438c4b47ceec7350e402b36a2deb17f592e852d14f41f49ee60094f52696aff93105d5a1490295de9de82c96889b247b12e2a324c265309538b48e7b9f7ff9bb10c58d42b7960ec3287fc085af0ccf47ff7c0c7d699ac099f0f52ef12fbc3a1ed408aeed72ba7c1d24798c2688b14e6b1a8e433a67684657e695db23400293bc31be002f3f8423d95c7d653de15285cda8f031e8a56d9fede084d21f27b6d4c3ef7b026701b92d82472c6bae8c08832b09ae7a553272f470bc36ede1fdeb5d70b7b98df88eea7beee4fe7546619036d5c7c9a8bba8ff606930b1db53f61eb245d04c30d41878ea73889166398133037ed099fa2159910cedf6484179f9c681a90dd290d870f82d6daa4e17348422e519bfab74d4164c91990143402b1be2dc8df6592a78417528a611b55093aab15a2255c1672d7e917294e7707ba406dd044fd9de14bbe389b34c0ee5df491364b6aa6788247886bd981efec9975dc5f2e39945cb4bad367dba333efc56521f57bfbeefa1c1c4ca3b112c12012e73ae65608c3a17f12fc5289be6a0b40f865c1c85dae78bdc9a55a944891bd9d9591645df6aae48f1fedb4870fabd63bd66009315cd9e42bc95e5d65300d79a35821576cc8dc758498593400daa4ed2497288bf7bc8303c1d8e69066a46766ee19dc710ccf7c638aeccd617260c3701fb120dd207c5102c54def068b6c12accef365f6b7fdca8300ca308382eb5f124ccf340d58f543fbeece6ef0fc027a3e56925d3d96660bbd26aa825b492d3dbef7ad853e88c08669698400961687940f07146c95fbbbb3c27f9de6530843d8d163e541323a07eab94aa3f9f3fe2809baf19d268fa42b5d82ff0071fe6253bf9f0f89751067e64664d88879d183820600ff63640a25c5b2849b1a722d521e2f8603c6ad60b3eeead78219f8ff207dd85141088407f9fcdf48bbfeb2e9baee6315c851f041f4c9a08f0548cb839cf3714efbd0f6530ab08659776f1840ad38485132700ddc00f15cf3859eaffc567019fb880dccd8971006dbd1d9580846ef70a459ffde443e858a416b8b99068511d0f2d15494d43ab2ac9d0e918d27c74f7bd420f0986bddf2b19b10d3702d161f55c326e9b0f77638b72dd62b938b66b21078f727af3ae55e96035c6ee3f98059ce29a4d5b7a96fce8b9e892874f5382c4d88cdeb05aaea8c5d06284d8229616abc456629458524c29568b25c410b15a4c25e690deefe3e5ba99fd0f87eb32fb9f1ef7cbe47d5eae3753ff69b857e607fa6bdba257985fe4ab3f5dae9fd9f978dc3fb3f7f1b8bfccefc7e5f233359f167716272321c8e5871dcfa66cd82d5460d9ce09094b58a13425bf5aa04babd977b52475d88630d8ce652a9bc2ca2f0559b3b000a07668cd29df36bc5aec40c8dabdcc5346386b968ecc95720028263ac9bf122e5d3bc619c890bbd31eb130c7cb919293c1549552140eae8b9d6d02439878c10b7ab39542989b89b989bb73b66b11c09877657c6a58fb78d86dd3376340e104c23b906159834671b87860235ab8a3d4c5a18a4ac4c9288f73e8db446a487301b6836b8d0c4c10e01ccfc8606947eb05baba540107429f50f249d2b5f33c5867498215db707c21455f789ae760cbe8f3dc55d1d34b4c0046bdafb94bb78c64dbb8bfa625ec9ab6c0ac3a840718d98b0cb210ec5bf38c794e85933f950c9a7c9ed5f2ccefadaaad0f66730ffdf6280a40caa5f73b5a16c89c90b48ca04c1c10a1113220dc44d7f1f90b2224b779908a52245d7cb224d1e42a7cb5e6d5531bab1b477630e73ca900516d8e53f9a454cb0b4eac398af34f3740007020a821d52536c2217d3d7cf5b7cd1bce86a1372aa6e560068a93dbe816a3427a4ad6182acccae113299e78b038a13b44532aee7c9c7e102241839cd98f9735f319dfb8f4e108943ca884af8d134889d97ebcf9c76f5913be1b61eae0a6cd6529d79758bb3777763ada2e251858d2a4cf84fe5465d77c9b803cb705f9bfb46671d9c056083c366a8e8107db4af8a714af5f0f9c3c04095b3d1b801c7a7e8f11d71019df15951be6e9bf685eeeb2abc8839782261ebc1c5f2654b5dbbd61e8c01f8ccc0d345e1ce4e695d31d12c683d5b54f39850f41a932fa7bfe29d25eafe6a4effe70cdb86057b32f1a9c18fa7fd68725de60c3c8853338fe53913586e4b6d9ff5c589aa8d590f5fc4e6a2b76f1467f82d825a857d5c945df0c1c76434bcd6c2fa0730529a3ae71bec3f93661a8fd9db88f570663058c0268dfd432c83b7b88eeed3ffa7f41a1a2568560a079464269d0e8460b3fbbb8fe4f290210e0d3cc33ae14f05c4404b09d478cee03335b65d6a1faa5e03b9f3b9336e2bdef658efd73e23d0525bdefbfb3d75ca7dd305e90317a692612deb4f5c5bf471db4798eafbf78a56ba24e24f18a542c9aa3982295574a4c547aad9b4a7ab5ee87f81fae3e1d1cc0a026e1e298e80e592343817d6ff77bd6104be681f56eed27d624ae0c07c27b81fc2eed2bd622a64c06f2bbd967ac2e310422a3f2b20f1023c54f5393f8c7b93144370119e80010f7d31c0c7d6ab046b54c88c29b009470688d3b9cc61d56630d8c29c5018d7c288d3b50515ea25ef74334eec0a5116f812d16251fd853ae4f4ccbb84633762709d18f4b2d386a8aef427885a507536f02142613bac697bc2f6951e79da28287499a0486207d3ebb94b7be2b54e521e618f81888bf4cba72f5e2b7c414fec27210620d73ea78e2b9c7607becec99a79242a7eb37de320ff046f8282e4e9e93e75497113bfd8fe9f6e1a1a5929aff41682c7a4b53864f62f17540bbdde0be30e274b79ae4c965ad5e212cd400373ece183160c8d03823d412b04b2b75730ef988ad4e139d247613945386c2cd23c2d7bdc8f20b91b1137dddf00e882689f3a5bf69ad473fc8051e48e26e65efb0f0720a3588681f05cd6e143e32b013b8cfff3ad890719c694391095dcad567c8356e43e7af35a6e001f67bbef8409dfa85f412036f7c7f64213add10bf60d90920e45ad72f93dc2a4f6dd710fc698f6d0d362e06cd21db980f895879e008f520ffcafff913fec6dff933fe36fe7159016239a45a621ada66f9e77d330cacc76e7ac5d43a97c50dd22f2e2814747ce4a73698dad8892f9e86aba6bd43a56e883cdafbc469db6bea9c54bdae7546d66668d23ba9665df28ca87da2a9f9ab7a5de517a9e5ae49e35085bbc81749eb54d3f76e4063eb6e6f215db74bcfb2c66c8e5f435de916cf510d2fb8bcc6bbea96759641b5640e98ab64c965582a53c09c0fd93f99eac7f6ce8b526a33e1fedbc5843366f86c2ca74a0a33244e75421c7498bbea84049ca26533b765c860008b814ba7f05d5e67d8786c970b1ac3c7bff9cc60f38a8c7750a81271d9c8073bde2d71983ad9e818653574e8991d4b4d1e841b21d8116b9920a253751b96bd6e14877d8d871b5b4f35946c99dc026a41de58830f2381b918b6ad59b7483ce08959bdc3e83217d6388be12617fcffbb49f3dc9e0319965ec8c5098eec8547f77f5f620721500fbf80943c8553ce0049fef9b84ceafa23f823e03f0af29496eb740fec981ab42657df7cdedaefe26160e549e93420a9171a8d44909457caa6c51ead49645b3db764fd900444028f4b00b1d3ecbb803328e1a5f456dce63507ec11ebe45d9349c08806979c4ef00d23d483686bdc23bde33e2706b23e4be3d0e59ac19b83106316040a2f370159404bd2757cb6e62262856df72ce0e16226797f1c40aa81fcd34319a5a048e8855aad0d5b69e73d16087d18f49b257664b5c456d0cb1eb0f03603edc3cfea642a15a04c65b04647c5b1216faaca3755fa9ee4a9148439585556b7d9ff6f83e83218259cec91e9f9306c1a52c3520e429c0ed70bd509e6ead23306e2979591b92cbfc53b001ed7743b437d84634a3d26ee3a066d7e3b0148b4b199f2689e9f54bfe712b415901113899a9ee4713b13b370d8f46dc729c13e5d91e4a948356ab587429b53407d01fbffe1427ab945ef85cc4a74aeb8d1fcfd3a750bcbe6b645f1c16eaa960499aeb1f58177ae23ecd4070293a15a1b8a9931172df8dc589bb7696a438c3e942c7886a44a2de269f40e04ed4f38716af0536fc64aca773742d59f3c1b7dd544226e4000cd20eb8790a853a78dde80b8518bf223531dd016eda6d61595a57c228262104ed90720955f5429356c16c73a7a54c2aa6c5dd565ae13b1c5d092c470e81519e92de0609d63f82b597533c95cae84db26a6c15ded7c5229b18a1a931ca377635311a6947b700b43e19910c28e01fdbe0ce0e13d8b4f78cdcf4348a3d824ba9811257687758285244ed9cd6b4f1bafd8d7c2e1b7460270dc84a94eda4034b9b945f67cd3198eeae8f576744b040b1b6123e6f15ed7d625b5291975ab9575611aee5e3461f76f23b0a781327a33f141d9ac9b81f98b487bfa7abb2a8873e325198ef562d03c77332c5ea20fb21a74186741ca115d072281223183637e7f09fc94fd2e5c28a32d8b266252ee3297ec406e3833575f1d231b05e349a7714f421b3244520e574ab96c7c524b8dbd4b6ac7505c036affd33acc4c27787c278f4e6dc3993a8a9a9337b049605d384153c3a95e93196ca21b226924a938955d72fa96eb6b7495948b9a05705e2a6564bc8ed0d8d7bd52b53f892a4b4ac2218aebd64aac37c93913db16aea16397ff96fa8d155b32f0b1a1fc14b67f9d6e2143b7fc6542ce02f1427183451606927ad3abe70a43459b88b8ec4476c15b73d7238dcc0f8a1d55fbc6b0ad01c6cd6aa331592d62dd87dff5a452a33e4986f9e30dabc64f1a086fb5b645cbf519f639dae4bb4683c737b8ef81b215805559ac9a14eb4f03c8d3f23fdde53fc0ce8bc0f3c92c10a0d5f65e33228ea54c57435738e7a334e2822982b300ccb6987c114d9ceab047696276a99ce490270a6719eda079c51f17b65ffffac3c64450e21f8a8646735d84ac96efe525f8b5f42b94c83dd426f0e0cb1b4b701234d675df815a61f0258158c7df13530a2e04f93b4012b589499607d422f8c2eb33a05333fd9bffe1bde787ef52b383b540548f6a604d038694773709b05586bfb624ddb143d5eb0d6c00d8e7b35f6ac53139f20465589f01abc59bbbc0c459ef1450c8ef5cf421264ed6d4fe6c7e4348a7c8c95642baa13b2018fad9e98fe31db79ac5cef0b07b0e7dbda5f3d68802ca986a91ff60af882e10080722968c88729aabbd4809c34624359c80e92548c5d2dd2e6f19f4643cf4e0ac5a5a4e404843ed8191ba7221cc2b4d9d32f2eaf6c1f2d34b4eb12b588de8abd0d8a08cdf80f9ecf2e7af37373985506c837f2adbf579ca75cb1f79d5605c3ae4305460842984830ec95cbbac1839fa518c3643396a968afbed590b74c4a86b22e5b80b70f63589dbd1d61b142cecc7c8ac19c00b2dab60bd10df68a55b0116e95ffdccd45eb408daf1687fdb639d85e8739370df5823e7540a6bc570078dd72cf524e5ed3e137024f12a9b7dcf4b9ff7e0ddba0cdbb719a157becb628c9be2f79bc819107378ab9fbc6307e0e9f5dff1a8d85878a35b3cdd9617ab2d4dccf9f08f4f028f0ba9b25eec06701feddb049f77499cdb444ec518726a88df71175361c5bc24ef9b4f42ba802d7d95e863baebe0771af51c03992447918cca3fea3f673b66170c40c71042c8a853764018116ed540a5dad84f8e917b49609d7443ae6e7b8152596a6c9a6b192fb87e86aa1abbd65ebb9d140672747f466dd96caea25af4ee57473a619e21c74ed417bc3f80d45d2449bdcf018831bee489bc37827a3784ad5d3874605dc9e7a4744a2a8fcbcfef564cf51ca443c34380c10c27d698b827ba6222f64c483cd109f2f53b45e581f65437b63b2c0dbbfc152afff6fe2471e0ae45345d9bd05814a6a6bbbcb49c3d3fefef928669d1d8669297faa7ec24031ace190d22bd02b4e49810e6ef19ec7d1b3590dff463c137224d286bfb066b0bc02e6b2d6eb4854b9a59c6e83bad00b373a9ba4c54f5309557e367c3c1a64a122a672650ce44fad49dd7da68578527635c9fe2c75d5153e0e8e8797adcd2311c499b1934344e917d5bf54db9608c78187aaf74e0b0a674a9bdf14b3ab3a8fb73816df44d6a792f7706964cbf1a8e49872ca78cd4c3128eeffe99f07d509ab231b9f4912692e986e3a91c73217a5985b338c35eaae25677ec2ad662a076e41ea3455d178fa6d6d73786bd776b807c8cef0e641b7f6a6e72b33bdb8efd94e37f88d3a5edcd86f05c80d65d7578054f08a3904abaf13df4860b601d7fa70e3c2d326f3e5f13ef7aa37578a75fdcff69ca11b7d2dbe688cef4a7558907e948df5f3cd2f6a8c4c0489b7f8f164461ee7a25f8266276b86822ec0b87686a36ba831b4c6a8feac94a049b1be25a4f34291992459bde01600719c7d0a677e0eb24a18d54ee4991eb240ebbb5d9afb6554d272c4ee9a1c400669b8a527150e2ea068218f879ff0dfb40ad0d9a50e2c401e51ca7458a7ae08a69d0292c896bb49695e048959480d4b997637d1624134250954dbe5ee833d51faa04d561408ae7c65bd91d7ddc9d5db911261c3bbf7863a0566dc534bec8114e932fe25a4569848a3872bd68aed8006e4f6bbf76bca4d1897e74de0371f966430ca4f7be1b6fbeb81736e5776dc7fa6cdc927456ddc9f462e38880d3115726c403803e704eb58104fb1350ef54f495ef40c81a2769bb2b24f5d209a4fcd6bbdc3f05dfd3fc0e5707ab4dc03d74ae6873952191cdbb66d9aeff820b5fcbced5df5d8b06e2fa0e127ffabe46d03a71ae5e0ff242ee01c2d222a5f5a4e8a10312a93cb0ba491fa0f46260a4d4abec19674b4cf57e4272b4a9a6d5e45160495c2dbddbb5c87ebba2bc8c7ae91081d5035a38c9a8eb506ba8b30689ba7bba37b1911746f2bd4eb93b075a2a64d50d8add07ca3153ce3cdc02439d90116f8e7f7a914113073c109c42852a212adc8de2d955122f3f19b43f6a72456e7ca83ac21cdffe6b14198e3f92aca325f43b3ef2286e412c2694f4761a078265291018c3f0214137be14183cbb91300ccdbca75eaabb0ad50b5c561d1979db92b56d0fbdf4e175c2493e9f485ed74eb20dcd4301c76da4c9f98f6542bcabd06b3daee9c293fc2868af4cc309d5b6801030b4de3dca1b1d825492b49e6ced0a17c9d0f9b7324cca60f949f13b83e32d5b45dc3b29122c987f31fc6789facbb689a9684cef4680acc2a4f60f3d9e9d845b314ff0ba220135737d1bf037a3988997c481c19dcbd43f3eadfad23ed75749c505faa8e37d423eaa21217d0f48d0338fd2bec95ae346e9768e736704180b489b79ea383d31f695151039d06e3844a8e413852e889ebb57163ef292ea9995d1644181e021a743a7dfbcd35e339667fef0719c06327a2e75a90c1de28dc2551f69dbc861219cbd5437e4888bcc1c7ad1c87202f0859e234b9640d1144bce3647bd2ab2449c9e4cac9dc3ff15cb89abc150808805b8b6265046a64a280bfca17f34685109cc49b6cda27b29c49e5b374d727eb241709f3ca363a1795c49df012f056b68fb6069626416a8bab9a637defaa4c0e25bce48a367eafb8152b473f02b5546d7705cb61ca75579db5a8327ebba1d746273a32de8d2883c72ff960a5ec940cfd12c1c1087117a3d836cb7e4102d2e1127c4862227f28d26e641aacb905b076dab950586e867e7451596d29feca6167c56acdeb3c2df97756d21279774b74bd6046f6ecef31076889483ccc8d2af7fdc29bab05a5a9395ad74c62a788407d04f36df4dc5b7e0ae2ce3ca534e891133ff12508a7424f440eb1057730df69ecf8ea7259708fcce218e7bc2a67805b45e96cf72978c262d1b273a00d32258c49bd6eacf92f42316aa7c42b3660ac25bea765e08d5cf000e51b1ca3fb205d014cd966a0f06a0c4cbe7860f4152c099cd0a9ea47be8be2b93abd5fe2802c61239ce057b462ad4e6bd0f717623d7a2f9af7703cccb5d3955ba286d47746548e1fd552ae52338eebb8f0983cebd1c95511012d3fcc75934b6f0cb3197fbe0527f1f728ffeab2a2c881f30b40bde3e5a8f06af886ce269f9b8d50253aa5699a6eac1e1c3b71b144897527852921b422c3dc29477491dba2659c9e1011def6ea92529fe90c405af60b6203c44c893214c32c004e156e1c14f01295e49a9267c5db29f499e2d246f41c0fabbb8b2fce1e67c0aa9e08e76542cc6c9e264e24c5971fd55aea756f625122083ab4dc93e8a6b30a90dab7f0d2dbaf304ef7690206e30c01be64436ae2961c0cec50431a5efbb20a4c6bd9d1d6161d2463644b8d506c7fc45ee322ce4f18f52576bb71cfc70b19b701ba9a63542d85f8adf4e90102f23c98228c3228188a0f0284a9aa2c40b3d0f413bf4c8a043a3921b84a29e2d85798b7b3a083c3bd9ee8ff8a4473c49905039775a5a480f47f5b39463037e4c2a8824b5e3d1d4b34dfd8475af5d046484b56aa7a641bd12a08903447691cb5cb23c3f2b158779266bd67843d3edcdc7803f108a4b299d2e7b7676b4b886c89c98f733e7480a1464b58f64be06e4b093746c94bce0e677a2c411081260718290990e6a254b6561d3f1649954302061a25f220614493716ee9166b981fae1ec96b91ecc7c30026fab561613ec247dc300767d5f37ede27cbd7a68d75b4440342b4c2e21dd79c025ad662278ac1b161af115073cbd6f91f53646aa81a88dfef452a1a6e2750082dd89b000d2146086b894d714c73fe3bf00d0088eb090c146736221bcd501ad958766567051d211c6b3d7964ce7d3a8ee0e59087060056ce22dc16eb3fc50c7264ec881754a5bbb26711bd4fc3911020885823a00fba84a6095ff9a95c52559b7919bf277402ad70cb690345e3de94199a5c227ab182b02829d7a842fba006db1db8a7e923f232ef3a3cbf8fb2cdcd811fbdb5f66a4a37078a810edcf67d8a0f2fc4109c3e881d6dce057b82f8cbb53252d733c270d1c9d44ca91815df377f75d02a937b25dc4439a6faca5b5253fe07cc7131808d76cdcf659809546c3e75a30b56a7c094632401d08cc87fc9350d6475c19086c88285885754c05444e5ee1e5e78e745d6f310d30494d5744140c8280902cf041b0aeebb9ee10d935e8842987e32551b68fceb919b3ca1751885ae718c0442c0b07895c17bf921740c5adf18980ed136be6e0a4a005d3e0c68a566dfd36bdc1e723ce9fdbf73dbd4fd4568ea27b26204abe7149c39f1da578c7f64157e8d48227452b9c92f5b489ccc2125e079fd37ca23e4e2d6d88fc4c9fa88b648dff32864b2d524366fd5f1139e016b300f95bf07b0eff2b864acf83bf7c6c716c658c9fef920e3a450cd09f3543f48ac25f36ea288aa67d6f131e6e5bc47f1e49793afb26db171802eaee8f64c03dc5c4d52de00481c05ac5bf7e9668543128c7663e639d4ac16e141118d16da772c37b39f31d0c9845c9ce471a4be909dcad6c4ae5555bd255cc6fec4079683c9fefc232450feed3ebce2faa39dac45c849d16743f3d7d3eeeffeea8d62471989a6aac9342099f90b8ca833111bcecc44b31e17ea6a000f30e600193c43ccb0fbc7d9ea0d190a6941efaa1e85d264b55d8325e69988aa724492000c61993ef3b764c67aca8106aa1776ff5608e4d5921983793d507e18ec487d92fb53673a966b2ea9d54a894e207f6cba097ac96f0f5abb8eea71bb87d4d5c0ef5e3c232b9e7c423df326b5b15775d56a46e925cbe02762428d4ed6d89a62aec03527b7d9ae453ac732b80d1ae2ad62769fb27cf3c484cd875265d574a053d86b569c9d7e7d1c77c13f11a26c3a562961ae9e376eb7be9363d95742fadf843089b3d2e9849ca27fb7e2c39473afce0b1b11348527285fd350ce9b30eccae167560f2f80d3c5217910eb8e399b7ac008b36a22dd3e6fd0ec4a9c8f22049b5ca7cd576cf586f9d42f1bb55c9485a87e300e4b0ff683b40e165b5695da24e851755bc56004c01b2f62798817d985a7bc57276f9112ea9de06dc2abd705a58e133fad70eb106823f7d3e27bcc82dcbbca9131581c9d2f64dc7b223caee2937508ad9381a97bab59e8cb1aa800c2447e3220ed7f962f16bbabc21b668731fc50e0757003e3a37f8275b0aa121d3f0306d774aa017b81e5cbb8714c7afc0b8506c92b3beab8fa03704f416a1108d041b1851ad8d637c2d94feabe191b9322f32345f3538111e270d048d11c556539a028cc7fa8745ed495a83b7a83094fb3ee7c0a6d75c248c4a137a672f4e7826d404c8d0d922220f8efe16c73b21fde124461aaf11a3a0c91d5190bce3b244b8873c417cae45852226c4997692bc26a4a5efc80ba77b2474bfd4a1e3ee4828d4d5ab749ec6024d2d8982a1b54532194db093a62365772f10269a58c1ee994ef2e4227e430a2dd917f6ed541ba58bcfa45e234eef988f8a018d1d6687d3617647777f9a8da5b39b2b15f56a82f3387d3e7817a6e087608c7bcf8cce24983aac4cf4319036f99f1ed2239a351fb0d04597d14ef432a0b1552495eb1ae10e50c10f0a8865c99a035988d83cea2b768841121be26e068209b3a15363e11b9e2e5314f59ed46a8dc5687ca064b89c5d0b46dadda00b3e1c4b3faf85751bc89fbd45c1ec32990ca71ffcfdf4f2baf16826d646691d26c16cfe1c4ae2bc68bfb7f9e01babde0fd7c1a8617af0e8f510d5d8d209f319caf8c9dbf36e02d5fc038d05ed93b000716c2226229274082a3fb0506c1a3c06ee27ef69935f72d98a0dc926281a630062a1d337a4088d07c34ff4d004122549ed27d9df9869174bb29181456136b09a28fd5dc529b079ef698679891cec2c75d7773e9ac803113a59d2ea7bcd8214be581590fccd66b78c773b213e51af6c48fd26e824495298243444291c7ddf0ba92aee68fe97df73a1c83dc80c1eadcd57b0268eb3062945c9928fc4feb7ef1a05534ffdf8a6e58941fa26b406ab5b05969dc8c492a4e623a2e7054687ac4f45852b22eca37e2fc9716298c3ee707b6ed789d9e2ac8dae65b15d0f7fedc875c3d764114a945fa0d68bf058729b9420305859ecf544dd8e2ede3f89dadf7a5610027b3a4cc9285b85529338bba8f72207e7a64c1e7930628c4ba57da682935932600bd2a29ed23e770f65d6e998413dce62e3022e793ea38e71991b0a1663202371234533347eddb0c3cdc775459242377e8827ccdbceda28c29c008b52e62c0d7139ef8706ee322acf4a19abd7004af0567b80de4e7a4176d53d783d61d218ed093fea716f5499faf9aec6e9136683e0eecea19f3e56ddffe4a066178a676b17b036076d202cb71c66d3b5bc09c35a8e4a4bc41e7da2bc1569ffd8501de0545830c94818745b9125458e541f5af63be2b9a29dc6f110d58cfd2d73c46e3564a427f986c29e3bdd0ace0f800ceaefa764bf36da3b828fe94fcbaea10211222429ce42c9650934811902808332eaecc908f18948e3571898d4ab131bb63f9da35aa1b8e3f327af4787209b4940b923bc5ec40b92d01d716eeb4470b907e197ccc26581282cf608c5abe4c7d18f985453e063ddbb881c902da84b2cedd2101a93ac1fe40ae8156f07bfda353ed93b79fb34d74f583f5fa910459340113442121ebd6661c2ded3dd4461afb28f90ef129965e789b31d26a2ec20c43e95394418fe0fb8d6fa348177828407873e8f63c563f9517eb3e120ed79385cb966145742acc48d70ce44a90ba4d77171620a0e7376e03c2ed7ad5105c4062435be77601ab81083d6cf186b14d4fc02c4a4b79b07af7b922912e0807552375821cf6777a03c81af4cb974c3845aae9d26f77585e35dfca58214a0421cc4f16c4dce5de2593727c6203b9172f2206cf1f0aa68748f9cab3e82dd53c9976f592d13d6a63054c91c99370824116245d3551356ff92d912519904ce11cc07c8a724ed17a8570af8645bdb66896dfca684c7684184dd2ce12818fdfc8746c19929cba0bad00891944a8c8a38ebd46062773c5687c3f335564e9be1085293842f15685ffc990898aa1e2f39b0ac0cb201272a79eb1c27e6e22d8460f1e411bac61df4f3bd506c500060bc5f5edb6f3a460b112e948f9843684a946f1055899aefb88e012ea2791b4d203e4436f289f58a3a728ad58e77f06ac34c0ad8c74a9570839e6960ecca48fb8eb7f1981a1431432a328aa9b5c4b09d16fc0db8ef53e803f76dcfba0003815f60ca2016aa9832590e0a462a0a231b1308801f0890f7133c5e2b495213650ad4c60219f0d0554f84ba28c6bd3ca2ff1746dd558778901d7a3d9c0b7a32d2d7199b18b9111a06fabd2ec555851fe29d14d73ec1dce79941cfb1c4e54a05f90b3ffe050084c590f50a492d8d3c2f028823c4a7c1393dbb43803794150721c4e5de599b35d578ca73b3bf9926004a41423fc7fe70b9ccecc4d324759600e74dcb5d8bae1c7590e51c729e3a58872c1f9bc26b37808ec60a1e43aa713cdd325241e94ac490b0d3104b944d2163fe592ffbaab3006db118b21ad4b3a8c55a98569973bb815e3a4d9745a90ce81c84c7e1bdc5fd0d0b787412523535e6825f50c11db31c77e9dffafe11cfdb0a62f076ca08c004adbbdc86bd1103102a14cd17d79745c66e23535e038cea0fee7bb66a302c3bad5cbbbcb4016fee578c804fbaa5697cceb114a75014c806eb97c682a147efdca9f31a6973476144637a83faaeee95077873e7520165d341192feb85dfbcd54becfe4c44c3b8fe222f623c0611dab11bffbd1ed5bd1ecc247781423e0add3797f8c7c3ce008b702b5b6c364bc20faa6c138d1fbfe3ba8748ef5f5b366e96aa0fd1cd00ffe418212d8b9cdd8522f6a08cdb368595c10c49b2d39ca3be1059bee9e567defd8453727d2bf327a35825823b989c7d6b8065c7dc4d9b48201fca3c7776cffb09809a4a993f522d02e880ba737bc216a9370796cf4193e5057dade88b152ac189b6ec8f8485bfaa64b43879ba17f2bf3015d12308e30a2cbafa339504644d10f9948c437b3fa9c5ae3302b6e8c4ca289bb56d5a70c934a8a662d70fc411b02df4d43fb49ede293db5c8315380f330c3a1cc9c32ea3232e50c597fe64045eac81f1c60c6334f06794d0db0cbbf2194508a91bbc090cd329af8c62394a50dc80a674ff3928b11e0ff41d8d5fce2eaa7d31b29d82444df3cc4767452ba85aa376c965dcf31180b36a535a034faa06c1a29e690fa22eef2361da6a751e83c36ed562e6a66b3a4511a959490e1fc24900ce851a597a9b459536417455defaa11e218625457e853b5289bc40f5cd7cbb82a4791d08c548790d1e7f020ef80d7ed90f359577f42106d0ad8fc515a873ce5bbf9d859051784e9a038b8252901fb8babbb853e7ffcba9b9541156494ec04886476e542d43e365902dfefb44f474bf07c066a813bfb919111019d380b94d44f0af2830b4dc007148fbe471a2438112fd406c8698afb4f0e8c8ddb399ba626c317d75bdc6a2ee03c340f35f425c16dd614ea7a56a7c172afdb2b069d6b0f226b820a4560bed26ad953859e677d1ba02b20b8df2fb8000ac7e4e5520aca43a1101919f238e8cd1822261e74473cedc288ff904fddff6ebe38005c160d6f69ad53c13bd04e4f598752c3ce8f31b3a3abf13ac68a09c0967bfd1c9f33ff86130344f82832390644e9916ab61c49f81f2b5e7ebc812fece27858c3a7656a70debc51d2149a3f13408c2377b50aa392e15e6f7a8de440139f1c381fa170e26c111d7dc05b926dfb97617458aecb1a4fc56d2c46e9ea90872eb1741128ef55cc7671f2ec4fe8ea58ce52f8606f28efb2e16531f7789e7f09a31644cd635e2f40411cd45bc6ac05f88dd8dc4655de01443fb31333fd525a053b9b87f728b2aaf1a3dce1dcc76c58452a0649418e6b7d03ea1b09603794d8e3229884a90c651524c1cdb02652111929601e3c346ba2816fa866d08382d61ad09ba635bade0f9be85014d73b28b6ca0dbf641f289b0cfca23b5cdefd3876a6a55c5d94dbdb8a2a010d72b96fb27df6bf3b6c149e1a17c2f8d8757f1fda82f777644371ea32e2dcfbc83201da59e27afb9566206153351c9e2e002537444b045f1dfea3f5a804910a4cfe15e4bb3e76b60c627f5306f3f5db1df971c45c370c841b2b5ed8a2437814f57aafad6fa5ff120971f49862c9fe8757b216aa2b9b22492c4c9ad9c2bf5015ebfe6ad3bce67adc381142cf5d0a4d7ef68fe8c031a86d38e5ab4314e40f3b39fdbcc0412decf2af8cff66ec06eadc0e1d30a53472d580140a9e535e5eff866242a675ded0574aafbcde7931927833117bbc8c2a5e8976a1663160d5563fca60370238b7021edd93541ca478fda0a98ac594767355a1e5908263f61cb6b95a6c60ac6f92be98e3c44ff3b50ff6c8f1561b0598f9697a8857f776ab6349276de3a40183884cad79bbc7d85ec023484a5ea37b4a411e728ee5751c6e83c93e06fcd83b805fd603de55081518687b017231bf35e0fb97c784e574584fede3860d997a38e4049c0f709404611052a5d77c267587803ec9306818e8fa5ff793048a7cc60b8ec8c6212da4b200c1a9dd160f80cce51dfbff3a569245b0349ad3dfdce207e073f2a7f7f6411ee2f49b04372a8205703e96dd1df2b617271ac04dec5f768004ff68943772a3991776c71919f755e243b2c78ee6b7e701630dce508d47b8304d4cc0a027168131bc84a168cfbdd5181ee11a5112b5be66b952bdfeb88b8f94196adc57bf358b9f42b59dbcbbff08c07135f65a92467307cd8a321df1b074ee0a36da066c58dfc763abbf54d67e8cf92a425f9b32a33b4f42c07d8225237ac042da5fc7ab7ce06913aa704d1a4033c45dd3c6e580676c3047aab3f29ca6fa939be5c36e01690210f09ed583aac136004ddc7a043ce056735d89da36ac8e7b4bcf6e517d752d03f6ca77a9b4b7070eef5518a08944d2198e67cd3c9ae5026fe85e48bf6e88fd7c657d2ff78efea72a8dcfae4b4c3c377bd4cc5b51542225c0a788a41ca4fed4b9c2578929cb0bee6a45c1697a0f20001a611522b35b7928e1af7136e988c58653e57002c33ebc735b566dd5664fae29c4adbac2631862f603f0524d8d2f04557ac509249ac000edaba257c1d405fe1438d2aa6bac3a8b0604276730b125087f7203d844f4c8e0ea7278ccf584dc5aea99df689285145a1e348612e56f254df6bdc7312b27ca5c45e366f3e50135445787c0448a44de715079230878c6d00acc5e60f3aa50a4ec9f91efc3e72b1c0c44bc489ee37e578b6671fc0732990cfa6c4e838a84dca0ba9c95506d81b10566d0dceabe0aa498590d74211458e02e63cf6179871e38d0345a2d0b2410e8683b279c8cf1992088f2e3a7cbe3f2ef08a1c3329f8f7fd0d5ddec928a23d829e02ec8416e873ba4b9e6820df5c71c26e7645fb77d61a903df199b3c560040ecec79814b406dbf152abaa87a3803bb1dec7af19cdfa07458df478ce1d3842fe744720b3b777163c1726ef4defc58c93d93bff1b8c094f6d0637419271f6631817e536758318e1b51014a55614a6c7d6b65018082a1ab2973ef4530cf3b71e3075bdcef47d635008890cb52d9a4bd152d4271029496a39491cb123a3295bb361b58c3e3a58b75e391474946674f3714d6cbc5639bddfc0eb3e780eb60fcb41fc63e939d84b5fd981297409b643c9aca76ad1d53a5e7d09292eb42b37be2a5a51c5b2072d394b73692c1e40191fce536b565f9f89e22d593367ff385667a38b96dba2d4d21fece91246877892b58389d7315952b4d7615e628fe994efb80aaa9cdc03e545e3ea2b46baa4d25901f934e0c5a2202581262c22c8d3e87b87a1833ea34994483555297b5eec40c797b9c2a5ad53889fda7df48b111ded26fee88bc479aceb05abf450bb095f4f58417bb8b770c2538dc9c5e05ed0fe0d33f117dd2a12160ae0c9f58d89b8cd5890f06c6d8b19c4c2f0e0b160c2a936e993a9389a8bd3f305b885df364b75690309b066e251da413bed06d1ee6bd88020c11ac0041138d84a320f664d704756cf851b42527181e7a3d86fd977dc74c96fdb05b9eff17ea865f54195fe845fb32f67edf4f2013a1e71b04cf1943d0249bce9786fe3126814699d7960e0fcb9a42498baa5c7e7f46d72e24be8470581e321d22c29da6005cfde219560449aa565ddd7d8e753a4788d345cff0de655261d5b1f23f7dc50936e228970f92a4aada3509f81f9f42c4087b309318c3d478107c04e82fe64fa69f3a701930f5c0d9416afea1a1a00facfb58539464dda19434851b2e6a1a70f85983327797fc8790a1de2c681fa65a02caee539c2cf27e56c556b6b61dc086bf425b3ee30871f224cc1b038c3e15de9847cb6bcc36d4f11a7231772d51125febd86372ed7ba63e8423be2cdbfbd2b26f3c86cad78a87c59f0de614442cc717f28f34bd1c0b1c5fdca408f880bc16f25d7981785e14ef7a5ac34ad5eae715ee76c6f2ccab737a4a805a77d88b6c4ca5f42dc9082c82d53c29f5658bf66dd6d53b1f8966df883577b3fbe0839f000ae416e8b078765d414685fbb7d8dc0124120c3505cd4ae2a0931c7305fd9eafc8417e03add2d0c40cb3c40699f5092b62e4950bc50c1e69b07c7c004852868e1f1798915a25498bbe45c5295cc69220fe807009221a445878871f23a0bef8b00ad4e58ec245e976226ebc5f0c3d009bba44f1bcd6aa5e80f08584f955ad36500c09c077c59bcd938adcaef6f9ed306cfde4fbdbb0766f88840f8afd10629d1d8049d159b8c17a3c29e3730c5834b5be46c2405331e77c5f78bc62ab9392a85f9df3b0b5c610ca36a5e850e2b9b73d3ed04aeaaa78a73de1fdc2e3cda8985c603ab6a22bc424b93202a56ca6053dc1837ec0c85a435c5c80fa77d299f330d8947d54ed9ebdc5ffcaf088f3fffa48ebfe7d8fb011b53a414878961a59fc879096a54cf5fbb35e09d04b470af17b4374404b0b09e112f2e82fefa9dec0f68124bd7ae5eb04cc8f1cafd83ec7f711050eb413c6b87b7efa14640398be1f9290128e7c91092b923147d1b7264f7c38134b9723c1cdee335c391aa5bc6d678157bb6a82c88bfacf5ff0fd1a9f01586bb2327c385811c8de5ac7179f3f7e3e895472e3ff208eb864d004c0071001fcced8bb028af138d002d2a56265efb7d7f1c1321c4eca9cc9f33974483363869bbab46be806905a3a1b8d655af49fec8894130445d8e9ba8802ff0a99167d1de32657e5a75b4a73f69f02e9c6a7b24d2331a7e8ccc5c8455d066e3464fb717f8ab3f0dc54254593cc43ff262a43d16c321b2ac58d6c8831f38da10674308be5e2a93a9a11eb81d55a23c3abe7233fae1c3b1335557ba146861d39ae262c447ad2c6549abed5667851a0c0477537e36bbb9dac6d604b79e5797378015f34e22be96a0a9678542481fc5ad565e0af82843d17520452ea81970547f1d0745a927be2a4eafc9e23b303bf3fe1c2e03753b27565aa9ec6a85cca8537f79ca881db3ef3dbe22b2ef205dcf8b242834c13f714777f3545acb020690ddd6fe9d15e64745ebde4fd6f21d0289599540bcf22b045afdd39020de604f792db723c17adadc5874b35ba4de3a0047c802497fbd530de1ebdb0098231fb48d2b50462e29004bc8572ce17b840ef7f15dfc6d05003ece0f255a42c78145f82f7e9aab11ea30042afde303b0e3436f95f04bf570a3cd6e40096f9ac735ce4556732adf4ad88a562a97975ff4334c2e393ac80544be9181c5006a2336daead506d31a5c6b878059fd1cdeecb1614ef9d6c55805474817c84415bec3b385607d6e45ab1bdc534e9c26427c82fb292b46064215e49f71c1153b335617ca0b7477ea6c0d748a94ec9c9e5e5c41a91711702666c14701c839ef49275d15cb83f09d90bfe57ff095d7f0762d613f866667379ef08d4dd2a28738f01048041a87e1107fbbbdcb8d2a6d36678d3983891b63881c49e0a49c3a09647daa8c180371589c5b4903ea5e3d51c731dd91a2dff9c1059e0b3d04f11cd2ce182f98d14d9552def90272e3d8ac5230aff3e89d8bf932d27f97f27b8235ad11e71c24fe0883481ee44dbc2c62c017fc7118d705b925a1aa69790b59f8a9ff6e675737328d995e3ba8aa44d014e7bf924023f3d1d0475ab908617dd937dda620747940598b278d626bc7c4cdc871e6811d6065d0487823073775a8e88204e42c56b3af7f5cd10460fae7920093c9337c19d42a1caee5cb037579f0347401b4b70229684f5cfe9b6b54dc789aab3c50b031206f748f47a25926eceedea00ddb6df2565590018f4a77ce5266bb8a5a74251b95763c7ded8e46a20e79b139b76e55cf9d7c3d4af292612a93b10ed63bfc40bcc7ebd877f54b25d13a1b100e43be1ec0b283457b346fb3d9133ccc7142000014f430cf1661dd80877bd312aa99822d8e609865f53b09719ea606270f84146fd59056acc1e0432790bb38f5b0ef84a7d6bc32378c973d3a320168ee768fe1effac4a3b04f439431df56bb549a4a3c77052956bccde3712f8c2fde204a167ec571a8bc741aeaa4ae758ce242fb1009d32e4288dc5e3205755a5bce871492d7193adb34f7bfcf27f84ceb3fbfd878128778e5e45ec3bc074ad030d6f383f278576abacafb204bcf19376fc9301267c30f24cef41fbf90010791294d25325704784e6132e7e60f5cf4dbc9d42cbf46de4e530028ccda45c10a6a247a45889fd9a85f5ce9b546c39b29f81078cf7f39dc169fa09a7814535e1e635fae2bc10f71121d2cb13611eed994e4f9274cfaf295968671cccd4be2702b76628b93dbea3c0048a1071d33da009862ba4c405f1167a1489791d206f406cb15f4242c5bbb5bac320119777958e7486343c1caabb70e62049ca6d0e5aa37bcb9410740aac13c83eae70c05d571ac8fa51938790634d76b30463a166e0c93823ac653a2d0ae6aa8bf43fb9964cc7dfa1ac412a413b8ce11632ac0552cb7f46c2997bf0209f284ccf0c12307500ba1dc0337e17944693c2bd2b467c80d202f20e03af65efd4faa467bcc93dd9ba03d13056796216b401418850180b1f7bbc10350656ad1d6afae7d1a0071d8e7188afe81b7f8a30a498725daea0124b0a8723c94d31dfe1aa8ba9dda42c251bf18bd28a768f74043040454ce095debe5bf4a20219c245d494a4a25ef0a629a847aa78e964b3eed0f0a1e5eaa454ee77e7162f85aa19f87d0a5e7a1b439b86ede92b127a172a68c23e3de3f3e8059955446b10c958a5ae4d95950f3d99e05e72ce86f5436d7693009449291a84e9342a75c2165289f511ef05e6020556748e13a1292e4e0bc57494e453437c154230042c22eef61e8f2e49a617d8ee77ab6e6bb0902f06bf9bb43ab345fba8a6414c08534e7ebda421824c943bc88573aba9b5c1c652391a1316f65d47eeaa5b5bbe1c932d3741b308fc515f4be874a09310e8b837983465a835e491585ea27c63c3f45dba9331f13d3ce3a2e77192982b5dd4258a5105baed6923b2e66cd673decda6701578018cbbab6bbcc3eec0e8b0cf43220b0de3f4862ac5cdff67c2b6a0715fc81a586aadfd9b50599bb095a0351c421c43eac82ff305239fb4235eb089a22cae26a82f6d40365e5c02065bc524c4e40a4b572a3722104dd511042f2109e999fea80db0a02daff876644f9f1760772fd3de435316dcb89108091304f63a45459840fc55d695cd209fc56a9f98945eb8debeeeb5fbb6bc0841cb6aa0017d10e947c400ea0ef48c80aadc2f89250600b96755de0fc364c3370bc6741450fd933850e6ecb317b4bab4b780d235187dcb4c89041b9b96faa2d78339b6fb467f544403fbaeb801f5976769c1d95ce9f82f531a8cad0aacb0f86b74d773afd18285d3b5b337cfb5dad58c58f9f47c02249a471c428f3233a29b96023da3b66b34ff13c71136fe4bcd12c99227fd7d3ac6f9889cf85e04917a0efe000bdf5f460b5315fa930a538eebc438fbbb3a7c42ee136ab78255d60082e68c97564006fa7dd8edb3467091b3ed5ce0acf3bfaea8e7e5b520531a6d9841ce954c017d350d8ef99ea74e12af4242759e38db4eecd38a44b0458b8be6d834ee0584f6d853629cb19128b112cbb07c00ba3f66c5918e3352626fcded5478a2fa0c787c9d1fb2e08c87ad2c5d9102a2244f99865c7cc8412b9303638fe16f0977d6ceeb9acc294faa75cc18bf31d78becd677c57a7c6374703b7b9fbd75d74ba83fc2ea1b28286c9ae4c296836e89842f4192ebc391c9c26ea7bca03741c710956f5bad3630b7695f9d5e04e2d4c2999af622e05c79f6a5a8b204b6c4e93fb4f88d8aaa9b07280db9f7b9462e1502677b7504935f01ad003371e9e9bd27399150aba9660d68e8ef400ecfbf72d3bb91bdecc1844bb8e9faa176c01a9eb817d42a615c959dced413dd28512e40a7cd072daab2adcee8bd7390b61cc8b5945e7201fce487c81b7df2f247210e3af78434cb5a28110abe0a7bf94b919e5e129286e74a8fdd224aa53722f65bf95e565638ded7a732a9042f74b38ef5fb376f81d9de9e239d0bc883112b447cb71cb11c74e1530ffc1b945aa7d3902ac84947f4c6163e492b7d12c7ad38fe1037097cd07c9f02f3766927c9993d9189091136f17c860108fe5fa8f781edbe0bbf008ae1c954b0058305e0d3cd0fa1ff61b0ced78f314f9ac66923efa579e5e89890f8a267fa3fcd72e59818c0ab181b495da3a17b4ae5bbe4824572998cae423646650106881c8c9c824c5beb5b617c6e4049d38cc5dc5acf2c31151b2c47b65c2a8d8cb43457ec140b9079c795533134985d418752af9c0f1ca405b1f6597deb6028d4186343f1911ca8743825775ccd445af943154d1678574d4d2e404a417ee29c47b76d1d9916326fbacb898042633e165a3e81e337cda34f64226b8d0e6a264f6940ad142a2b79cb50a5978d5b0e233b581a91e5e4e67d810adb98c636d648c34874b924af0bdd5452c7a54c24c3ac3586a8d7dc2d19b2fff3de2a641df5c168a2faaf4f6a91165049b17d56061dd184dbdc98c5701aa2c912289b6aa7ed92bc3bf0822be20034078a4a18bd297c2ae2e003e65a07f637cc8633222faaf83cdaab087243fcccb7cb17b23b29a94722b03a7b245d0377757053256b562ff43d8955e6615f229f347f378083f9b41a3d439b7e7c935b10b46654cb1af15192286a201fd84936415ae9dc4e29ce55c77b08190b666f8b1d11ddcc6ad54499be68c48cc349800465d6c820a50736f834af2894bb77d70fe4881eee13d0922dafb55951028515dda9edc25de9c1d78830e76e8fa7781b03dc7bcbf1de5f5f0a382e1aa864245d2ad898750e90b35a49ea10748cd32a32f8040d7acf749f3cb92b23f1f5d725286a545549a67fd87ba07b4bc0b2af37ca57ed27e0c32830684911d39c90bf6a9fabfa7740d1585296098143f927b4189daffa354c55f500d8c0b04ee4058d963451eba57ea22a6ef3976a55be169333a4012e65e487f9682a70aaecf14aa5fe2b4439aabc30ddee577d9f5121d507ed423de2b3f872bd215b1e3f562e54a93833370fd4e6406bbf0c5d378d666ced966f57988e9455bce157d430c77ef30014734366a5cf1694c476d096d62060860e7574b8e3d355ce85a3f21fb8607e1fe097bbd8a7fa05a4e847ad0cdba43e805788213f94a8873379cf7e14e4f2839b59514454d4c37b0af6b4180862065d3a5cb1f2a6141b26ef496e2689d2f9058a848244128f09644cfb10c90c860f877a0ce018676612467281bb66ceeaa5323a4d34f09e91f156bc2afbfb32ee0bc5d6180a44910dae8abcafc707f02d5d9a8bd9f4bfb1409ce1265317ba0ced83f02fa47a39b1d634430f07988e3aaabb620dd46a85c81091cb8a3ba2f7cc54fd6f25a1d018cbc1bacd482174abe3299718615323b65ee54260c024dcdd006414bf8d8bc4a19420a8ffbbc035e16e3530c33dcbfa84d6dc3bfc9ecf8faa621fb010b8ed52d7754b524da3595a59c797d6780c85139eaef31aa35047ae383204e952f3d38aac87c259924de9a76f4a2915f53f85f70d239f7eaedbe41300adce85d452a05ab4b4fd6f9168e2001831bc6a9e49710c2f5cced6214a1c37393fb588edd980ca5f7c481fd6c50cb0824259b71604956d48c7d45681dd2d6d06f2b5346576fdb710b1c6c8ae2d29943f18e1301dcaeaba5140b82fb93d987bec8d31baee6134e8ab8785ce2686d6b3637443dbf9ac24104dfefa5d7aab0b13afa460cdbffebc0dc2f0dc9502f4c3f177f1a77552cf2286fb701f8c4dd379d7a925c114d850e8deea4ef93d9b9d92a81deb7bd1e7578ac22c770f8ddd40116a621b7723e4342a0c12a8a20ea9c9e2da579cd5531354419ac5c3f1c40c9ddb65799357bb60d138d55f5feccef2ae9b8768bd582db4746efb7b65fe8fa5e34f93f15384f32abb4338cefe82e6029009e08b82d3defdc4d28a67f8b11650f80b075b5c11cfe5b3f10145e7a1300d6e7117735f250013f84ed4202a2cde40724ccdc289dbcd65ffdd74ecf7547f7d6565b7cc8191deba0f0c935aaae38932063a89308dc3057ab36f6d54f240f0a0edc4e4e53073ef2bb3995daa20dd4ebc25a122955ba63b53d45808550bfba052158f1d9904b6e606330e91450706fbc5fa1fef6426268e2918cecafb46d8f6da499d54d5be9afe306bbb0b8ab3bc92265001826b6421c9c4d9d37a92434ffdafa70ac463f29a3d2dc146ae88bf84f30442d68075844571a28151b625510c743c71405900a2ec33a335d7b8e70758abe05080959f03c54242c016fa3d590a153f5408d8bd4e5f36c7c8cb7f4abca6168a4e7d510c1c17a7715e24cf0c8f7e521c981224eeb6504bd0b39213f676f090501c88113b605b3650ee10fa9eee810281e240efb6b4bfc71e6540b0b9d47ebd6cd2144f7110f71a0574b306422f10595d8fad4c0d1eb60c25ae0e216c4de916d92d6174b7bb47c792b3b516fb6f5c29fd159c5bb6117a2dd5cbf3ad68ffc7135b0064726cbdce4842e9152f9919ab7ae51c734f5c626f1cef5f29ec9d7fc6c05640344c67768b2caa214d201be8b7986d6634800d2665a8a26e031b36c0e94526f8ed2a0b2672571ef14c10d86d2020c44d2a5b1f9332deabc1bde39f1a8d1ff6a1943253a03220453575a3c32956bec26c13e3e75b0db01396fcf5d33c5cc9ec1ab55a39c8ae136910039f4122e2f7ada64b81c7ae820bf94cbf4217ebd3fd6f660771ba5b6d2fdb891516b7348ca35a510723892936dcca0fff87999bce2c94087e672d3791367358bd8192fc36b449b2d4f8d640af7d9754afe40e9aafe0571665e5ee78d56e6c44ce9ab54c10b718adddae310c5fcc17aa178d2d94afd7b414b439f2c659305428c17d806357515656e7ea584214f785844b7c466b527a5c99615855b1cf872a5061ac0515e1b1accdfcff4c5a850c250d6f18fb06c6aa7afd1eb63d3d35c76744b73825acf0c39d82b9a22fabaa46dda1392b0ed60b3149806ce792f5e0b7bf69ca27214524d266a7b2739be36039746a6573b3c102e6b58395aab29d0119c62c5310728e05a60be81ebde3c2ae4be17b128dcf39488111f4619f40f3fd6a36ccee88c93ea2bbc3ae4b08b63280251a89ba98f74720b1cf03da560521a148ae1a9791ee8f279c8b294c12e05b84dfaab6c7a4ca1ec3f4036365955285269718ca87671a04aa512a210803fba6b14661877eb9233501ec39f2f4d8cc26aeba398c0893d31d90decead177bf5a3394098b07fb206ce62cb085d551ff85e39c4739e6158ee17ef819fa0a439f848a644c38e6ee813f50dcbb0e2b7826f533d1b8c778140529b21a3ebc61d0c9e06c369729c1629dd3a8f45a6369221127b7d02be90ea0c43d9b9b20c28e8f18f00a392b221e825551549c6f8ca4fa9e902bb63e3b5162bc8330c9805e134b491c5be108b466237cc082a7a8b56da9df4b46027104b51c07d74fb38708f7b13461c7adf4edbcc69838984f308c04321c2cb0018e0687500391bb44d83c2c4328d9a0563c6bddb9307983407e46a9f885e61d166346ef1f416e90b2d7dd8369911c1c646ce5c84a2897fb2e84167802173eaebefe291c1145c35449bdfc7cd96836a915458addee29fd0d34e2d9e132c17301824a280abd0eee90b4b6fdf2b8111a9c324d8c95d6e2a866dd796f9daa2075fb32f4fb9b02ccaab183ae6777c68861faa8c1266d8a9a09f3a411198262e33c1af30091d0a73278279ecad31266a382e2f2ee3ed3041af317808a9fbc249bfcf144be6c71cc084e904955a58d4abb0eee3ab923f16494edd856bb26ff2c6752aea586c7cdfc04a111e78f40a9423039a90a682592090424e07fad2ac3f313bf0cefeaff6ce9c34dbe744ee5e2e9dc2ff989a4c3c27a8e124fee9e038409605adabc2712c04148012e60406b57e7af909a70eafbab0ae7c294e5048912b9b40f5abfd16d2f6eeb6a5dc3225295315089c08a70762107591182bc46861b11a077756dfc0f2d2378ca1c4652c2ec3c27c11fb75e2843df4e5e3d83142e481f4c6c7baf23f97e9b9c69db81377e2cf0ca92324e3b26f1ca54be92480b159c1193b98315232e30c2f8cecd0755555b516c9c0c41a2762509489e12595c07060c46852b3306070bffabd24db487bb71bd95057dd1d8694db4a5eddffdddd514d3d5d868561834bee322c8c18c040238cedabe26d8b1cf11f010911c21fed082e7c1f1b8d3c113e33845128420805cd6446a5cc1e82d13118693064599665990bb6335496f16589ec4b0b603843858c288db1c3982f9c4461f3ea32c060f3608071eb65181858dcef5d920c162b8d3bcbe54243a9eb054410f1450c982368b620438406e6881fa494528209421be308181e4c603c216563169826364829a794609268ee4e012762d228b5717283a2315fcae8beb0b1e1cc972d4e5fc638f32c24988081a1a463f0a28a121a222f70f8f2e5e562bed0e0fa971a3c39a3ba0c034388bb5d8681c103d2c6accfd9d42f1ac5ffa25152f76461a05206917a27b32f64435cb2a52424b0c8052324aeee99efffed903f3df872ef7b2743929e5b1d15fce54bfb027d7ff16b83dcec0184bb02f095bdfca022d9fb4f5b247b9741472ac032464031f4a2ef1f54c416c9fe8522d30e7597314e865efe2e3f32a08796a2d117af5cf8463a2a20e9a8e09fbd7f96027c71efd4de89ef9e752914e3ec6173754f7cf16f28a30b517f7a167e4caf7dbce1ee2788fac414badf7eb32fdc7cf7fc9adfdb2bf56d7bf80d4d85b9f03de9c29f4a5de33de73d356fbea3bf75bf759bd589afc521a4de817fc345272e5df8a788244e894511ca852f5990df6df46ffec6be907afafcea6e1ee771ec0bf36f9e5fdb47db23aa1e27f5aae7ee673ece5604915cf81d940b5fe5e9642ff452fd263d16e2ffa05efb7e167e4c7ffafe212a046dcf3dea5508eabebee937d8fda49e3eec7ee6df3c1022fa37dcd3bfb13afe39462ef40efc9437a4e370ca855f03625179f0713c9d0c6691d55bf50e0b2dc03b64080b29a4f04358f8d151c1f4a847d9175ef4e3b3f0a3a302ea85ee51cfaf9b8fb6877cffd4cfdf52cfdd4ff7f3e1bdd9a1135fe8457ff32e057e11991ea5c2f6a68fdd4ff7a8f79f9f6a5156a7ff84e413398f7adc3b264fa72d102d5ca8a774a3d02bcc855fa40ac948add83866207c819264c595c1da46a44be94bd52fc3bca8925929a5172ab8524afb0961e9c0bd11e0051077f52c2fa4c8000138643a00010c1723678030c332e332e332c362fe1e9da424498a998d63120c48ba408368003998828b26200870015b368e3c91274ee63133e69149e6219b7974641e11320fc839b2e70c5aa0ecfbb32cdbe87c38679649e9de1db38792d2cfdea7a9173f3e77d30e9917287e7b5f7f0b423732dce2034b18ad20096646933362782943bda42192524a19420f5ec6c8e00b242b8e08c34b17e9e5cb922c1c015e90322f591c8959fcb3ba88a520eb52579d85865857ecddc5510d5bbaa87224eb228ac3bcc0b8b8a28b973362d8e2858d912d5d18b97e19e6a50d0c1366cbc6ab8d2390fc6cd5da67defc0ffa989739c74b69ebf70e5cb560652806b2961fe6e10146faf88ad4a3de549fff84c83353529bdffdd679274b354dd3ec273ffb80dcccfbb4d7fe03a2d9cf75b5cf7a9ceca7fdfca815e2bada4f5b3794c7264bb7c845fd5ecb78476c41576ce9903b6c4fd49dafdd63faf82b1ccc231b41eb02f9cbf9d2d3fee7f64bf9b515d2bafdbe725ba9ea8697fad51b61a47545251bc78db98523b6bed68daad8d2c99e61629cb9fe3a99f5010bf3c595ef3382d18dbf42e4e19e86c53f21f230ac4bd1fd5c5d6e7c7db5c617910f65b91f8721eaa60c0d0d0dbd8e18d685cbfdf8cc8d9f79275031c53b410a23ef0429aec45644a223f23098fbe57e21dc13a8a0125b7cd9b2aa4c5b7b27c61bb115a770125bff55fa1116bd600b172e4a64a194c5128a59718c363f7a3407d5f47cf9a7699a36d9f5d01c6eddfd980fed07341f76fed303ca388d5e76cfd5657e62ab4b84f099999919323f33335316a26547182b308a512b87e9dfe8bb47edc7613829e9caf845fd78c9858ef41d3a74e8d0a12ff9922ff9d2d7826e3f78e5c67799e4021656b04072a5f6917a23e030575a20b734c981d898f5412cd9cfeda594524a29e5b6bd7cd90387a8e572e7efe81e9c8fffc33c6e521f7b3ad0f14d118cfac121fa5276f2b9cd6f3c209c433e8ee78a3b7284b882fc5455a296be4f3fca3b79dfdfed3bef5bdded39efab77b32cf8fde010c33e96825a9a59eb104b2afa19168b8b3b4b8bae020b2cb0c0e26366768662e69c33049f42e60898cbf43ee6c25c70b874308fed59370775931201c9bf6aa8709effbb27c7b2e52de555a9f138aa6fd5556ccd8f7f83e3e5781f77e69bbc566cdd787c53ffdd5323f5f16b7834eecc4f7935b6e657cf155bf3519e8ed89a7ff24688adf99d57426ccdf99b078473ec883bd344234e94426ccc929dfd5c5cecd73dec84b8ee085c17887bd37fae0b3fce7eaecb3d4b957a7ed5c5f99b9417426cc557793ae20eac37def70e7c14ebc2a8eabaafdd73533ffe8dfdb89fdfdfee4fde576f6739f622900b349fc26a741ea6fd5a77b2f7d5d3f3d328ead393d7ea9df8b47b4eddc7f9dcfcf9db8d3bffe471ef4c1a5bf37d9af33c9566a72b0eb131ab094e2221b3900045546ee2c20845a5df81eee186455d5a57bc142b6e22842b75c41df9f3b95b72922bb6648dad0f3ac9acaa8ae69ae5b8039bb18f360863ec7697de47e1cfae4764a272a441659b4551f9e5896917b9dedc24b58ca391e7eb97b25fa2d00f24d94666669ea7cf85eb973e5fbe2634eebd4d9879c039f8a507f907178e105b55eaf71fbfb6778497155bd1abd4f831ae1c8bcbd503a52e16cba5b8b39c4a77dbf0e476370f3797633618b95f7d266ebae8c24b182bc65822e9256b58d2035d128492299734312d49a29d41464912877339b64489c883d0b2aa414a29a5122074942c51839221db85e5840d2c35a2e4b87b0d486e8c2459d3c5922c19f128b125b0ebc1e5d8922d9c18b1246162349ce154dc87254a2cc96283d89223624b5ae03c7814d70eac1d76702663668848ae40a2041457b2023546e2104dc468488a2131e34d6092180d4a33780daa005c8e21a9e1d2cb312446ee57df091db1d5905d3754c6d1dc85feec510877e5432faafc1f779d99991976195fe2364008c970f8524a292d941a1b66f23f0823accef2249ec491701d23ece073e7123eecbc90427793dd04609b0b2d074188ef9d13d5081db04d8e4a2eb688aadf8f93cc83becc7eb60928f87ef805ca5ed2df340e763ffcd297edb04f52da217da9fd917d7ff505cc23d9df799c393a203247ca1ce319f21de5bf49ffe1ed9d09dd8fd8d232f3606f1873d818c6d09fcdf0c5cdde6be4c93e8f9d09fdc33ffb111fc82d3bfc60438f426e11930189477f2d617777b7f71187dd6ad444e82cfd066666666666666676c8cc9176b7c69c11b165936a9491e014a508eba24e722c2eb34927d214a508eba22499cd0c698a528475111a349291a62845181a479aa2e4ed682a7c8e3132121a1e02db60e11cfc94892d9b54a32e4e824530a53805c96536e9a4b308c294e21497d9cc262c8229652f33580473e92e4e8245deae8f93e2c7cfd5624046fa1ee21c14c9356575a385630cec7e2b19b0dc8fae748064cf37481c7a694f7f528fe8c88e57f648e84bd32cfdac57588018976d151dff2086bd4c90bd09a6f37847cbfcfe3a6c81cc779b02bfa6f5288d88115d31026b3195633214c58860311990c48c68d1989967672ea567cdcc4c698f3f9daddee1eed6e0e59986e6b15b55d55428bdb90653b5db602abd104c9d1782a9d98560aabc0da6fa85606a5fa8a6c1d478e199da602a0453f9c2ee66e61740bc0c5dc2acaa7076b894dd4b9d06323377a4dcdcabeeee341542f66e4d3399ee10d8a604b75ad93802d9194ba8c42d861063e906234d9861e5c92ba361cc0f59e7e00be69e1e7fe7d7c99e8990d0177ded277dcd3bb2e335a54e6681cce71ba4be6896d921a8def14420a327b1351b88ea35951f46c7c2395cb0f1c3e6a2765b206653e3336c4f0e5125ae5c89c120e500172629c560d0a1c916de32dc8681c95dcd180c4aeef72e2e9d4645835935a1ffd7dd7d0673dd3b90fc5f77b7730e57357fdbaf5b751be702f5d6dd4b1e7d72b4ae2ce4876da03be55a0dd558b3c15580e71dd8530f20bddd5b07955e8ec14064a6f6e5180c30b49e955d8e0dadb9dfbb620bb8a7328ece85495fa2b4b719557539363405c34007b8456cc807dfa256558dcbb1a124f7abef63623118c56050e28b17328c911c7091e59003a53065a4e164062a7ed0021231216d90430ed264c96231c88028076d96ee0797616dbcd0a0d22e495dc884e992d405c6d8ed2edb5dca2c9bd429ed08a97332cbe6a454cbb849a9a6994c1be71cd7117294d34ca66de3b84e8a939f4e1de1498b89db38aeeb4e2714c79dae9c94885f2b961b3f15afdcf8cd4d4778138d622b3e1b36514944c2260eb1594292a20b172990a4d002af4023284542291742896c96988c129309c117426429be10766c30c66e779979967584198cddee5266d3e7ec08a7cb2c9b9352cde4265347689a54d34ca66de33aefba8eb0336d1cd775a713aa7aad1d61ed4e2854ada9d40d8ee3e074843837382a558d1a39393656be5a7584ab14d738385c7baa8e0478572aae6b6c35b8f6726cb80d1b1da10d7ad2f9a1dd7e786d0d7ad2f9416f3fbd6dbf166ee4125bf14900c0f52c9c838cfad19b8579d4b8f12512e6c1b208843436661ecc0f70b9ba50eaf262012d585ab8b46069e1caaaa8bd050e6abc0c6ba3050c6db8749b2daeac4d1549ce18da4193e9595cd1c689b6a24d12b77b6a66eb13b1a4ebaaaa56495dbbe7bfe600257eeb592d3645f1bba7c23648ae94affad51a2ebdc3d71b8b34966e1a4add130d33d0209399419a96ce6cce9f73ce39e7194b72cd90949a57a9a79a454f96b8bb9b6ccde64472e75cb3e4ce396766b9b2fd2289a4c314279aac408d14a32946d0e092a1e1c506ea3740038ba6ad29926b9e2ca162cb10a533a24471521403353138b90116353a487244120eae3002c240c514ac29a6602dc1d45c11c552185ab89cb4c0a12524b98485961a97614b692c29b95c4f50ea6a1bb86ce052d3449a2d4ca4c1228626d248414313695c90c60a0d583460b19c7067e9c0ccd25df1f4cf00564d976169a6b4d240e91dbe144d14344feef72e16d4bba1567cb32c6b256a9691893bec73e64cdce15bef193637434383925e227e218324584924e17259a1d4c562adc09d25b310638307667a20e308326069626d9c28992183929819be3872ca828b252d2ad690d1a48c18a041451a4c94aa9041490b0d4a324872c40c9d1589440198a8e2654b15675af0c3cbd1788256a144690a1532a861036592b0220d1b2c49b460c344c9891223251a9640c2fdcaf59beb468610c0655819626ef0949158dda86678c0610836a3063b1ca5c9218b1228af5e53c618f432ac8c2ff7abdf040737d4180ed496c102265e860f45b10c1b70f05a5565d0c892e818944933835119336df485195f28a5410207931129a5946664e08c0c718619674e666230d243dc5c8695c1c13d5d869569a24a952a5554506688895c1ac3c6114bd0a4a1010d63a64063258d1d90d1040e195b984c778ec20c152b4062e2023162f0924b6418a1c8386249fb50868832409041e60888a652260565a27897d9a14c9319740a905863460c9728a620238b57a6a67d6032e5871e7ae8a187282edd250a678ca802863136ec808997a7b93e6689a873305d869151811363c28859b3039929469e909142f484cc0e469ee45c86913172e765d818362e970e94baa0b4132762b28c2145331163461231638e8809539b536a3e516a9a465ff33e97cce8cc4cfef4a53702d6bdd973aea5726c49eb1fad49562b2456b12848775e8e45792216254b94a4bbe2784314a3fbad6edb703b1605caedeef662f4f856df52bf15d111fc23ff3a8adffd1e9bc4760873390685cd55c0e51814282e0acaa0c2cf3c787fd2c843a46596eaef55220109f2031d00afd472fdb1e0604eed43e0ddb7bad9f79fbe3ba3b195a962cb646b6c490d524fc8ea6631ce69893804628fbf9cd55b8c506cfddb07ce452da17bb867f65fcb1f429972dabe3b6da8e74fa55229aed6cad55aaba5b1957a2e95f284b0525bada93fc1d3e9943a3d4cc1f789a8f7899b4f8435b39fe26abf1b39aa3d7928ce7ef5b954f53e1db7fe16b9cefb84badf4e5e7fca7efcdca3de27deeaf1457944e23d71efd39dc7b1b5713bba07226993fd5c5a547ecd9660e1b45fad99fd562bcb5a5c8a1a83e24e7c487469fceaba291a5b90720e0e4c856d356c1c514fb7a7af3dfdff6edcff00a07d2c1caed953ffab97a5657a7214aabb4fad6de3bcc979d9d5e4ed3cd8bcec3f98db16b3d7a8e697b26e705e7bcf79f16a9c07afe0aaa7f25656d47e1c547f5336bdd79e3baac5ef366d66b4c1c6ac38ffdbecc7badb7f8f23f2a0e8b3224f2a46dabd94379cad410512793e22f2aa3e47f5f283dcd878c9fd0dcfcdcb2f21f2985efe96e3a9bc4fa272cff7a328705fe3732c8d2dc953c372ef4754be1cf71fe58bc3fd8dc7bd0dee729ff2b8af1ef7288ffbd35f8e7a94b3dff6d956db72f226ba0cf54d902b3cc2f4d06308bf4dde27c4754dbf2141e86ddfde10deacc93399ece7baa69fdf8f7ad8537b52ad4f556fe5791415e5d5ade3788bf164faf93e6d824e047df18f1367a2990c2148076cf263508ab8f3fde7671bc7ae049fcbb685865d8f21f3b5211c7013906e5a1fad7933f840c807eee24acb9c43c2b69a076f773ee0d3f9dc6f363cf9395e7b384477be8e09fdf33df8d963f5f2eb57b5f6f5618702a572670c8a93bb7df7587d7b355e7aa7dfb8f3c19c43fbd8a13051d8beed89db3a12e6e33cf570fec603dafee6abf76d5f1fe5e9ac9e6fce73eaddd359bd8de7cb1d09edc19b3c1d13b6f7e0b7f7c0eaacac0fa18b7a1d136c7cea6d7c0a8811f842c29c804f7ef61f7424f453fa363cd4e778a8afe17dddd357794037cf1776284c1cfb6d4f6f3c20fa295bbd8fa2bc9307d459a1bb591ff3a9d5f1e1d21b02dbb4a674fb7da2469f48763516b4a4db432070fbb7c9c14e024233c0417f7613f09f1ebb21f0faf09fdfdc75e61cdfe9c64f876b861a2fc7a07cdc9c71087d7bf03fef6714dc7e413a3ef8af4f8ccc2df67ed82a9a60d0dddd0dbb0b213b6ea83cd144c80e22d2894818ac0c8085952c56b4b9701834ec1f1a1a222205d0a64d1b28a7704ad77f63318fd4f3fb8c1079e67f37543938d3c9e4c5ffd1f9cfc6473f5a7bdb384dd334ce033231d5348ef3807b9397517d4462d3cd7ef172ef131b767777d79ce9eb6fdb9aedff9a6b3b5fca6ed96c8072bec86d2f763f24197ba651d054f3b31cf84462ce43ef73a59bc12d49558b2ae00a2eba03da2c673e9cf0e1e7bc37612b74cfd7edb9e973f770e4c1f9b6ed7d1e58cd9a7e7ef6a3f31fd8afae6ce4fc90d0c9400774777f1f7cf69f2bddd99ff3a80e8559df03fbd940d9cedab05fbd2b5bd4f0571f5894fc9366bbefa094fd2b1bb61f05bd4c8f29a7dc8df3f87673dddd39ed6464bf4fd7f052aff2268e777a68faea7d3662673b1efe67e33ab34a883c37cf34f53bba6705cd42fbd9b8b0a87ba01194b6e8735d0d6ad502c9e75ef33237f673eda8b0959616976b33a9c3c5713f38f8f235096146ddc3366aad151a5d7f2fa3e372ff09d171eba3fe73ddc9fdd7a5bc15e7804f3b12b807aaf68393f380e1969dea5df842745ced51f673716c755e063a80dbac97c994aec9cb401032eb2eb4826f84e57237fba5cb80cbb127b01b8b3909739f4cb9db855f7437d476ba71b97b6e9e9f5615f426b4f5662e1454e4c7f4452eccb6ffc9be61ddac4fcc5040db9bba2f5ed356ad9b3edfdef424f4fddcd21ffff11fff712940fd45aefb74ff1f4aa193217d81fc3b14bec89107f51f2ca2db7f5a51b6d9cc7ed31289d76d96c1ac33da3e0193fdcc7ed0ad8ee9e94bb7df0fff9fc843ddbf54d109c8bf33ea295b69e6d5a13ffd81dd6d93d7e4f1cdeee6b9dfcd7337b5cf979e7ff46ebef3389497fd4068a2f329a4260be4bf3d9d2aeee21f7cb85920a790c2ee895bf7126e6feaacf63744fa721ec796c97edce5caeda7c8ddddbd08143ac50a83d8f2a1e363000b112e229a103181d20212e82006145ccc20091d62b0a48913242c405d8ee1d0c4f5cb31264ee05f683f3f0395c00e85faad1e7a10872270c0b26a23eefe6cafbc62fb8eff98daee2ebf050984a83f3e34038729f7ab4546b75f5584453e67f64a0b42f795114909087deff44b8f9b6beff4b730a6eef4576f774a4759209144d1d0d08b5f4740fe2db82d81002e84c5232e445df5b3dea56347e4f9be69757f0e7a5e6babbd7eb2314b29a59452caa710ca876c81e4bb7f0b42574a6dce39e79c734ea6f4e79c43e6d52cd0fc6cfedc3408e9fb440ae16681e86bdab72074a9a59e7955b56a1394406c5bf450e8d5d65ed71f3d9d1ff2a3058a2fad4e5b17bab0ebe19ece8ff8f0210a11a81fc8bf877fc34e15d38072039beb830548961baf70a3fa715207da5ff63cec5350b3e7ac1eb35d80e3c6c295331195c59bdd882dcaed506f782cc09bfd06335b05ba0761191bb37a883832a09ba73797524aeb0f5f4e914929a515c2dd1891b01139a7121dad22035a95438d4f99d8200895738c105bfd21c456ff8ed8ba41fd7694c05dab6ac5ced560c1e6035e55edaf81c3a2f44607af56a9209bad55477777e3d8d1ea68371e54048fdd8daa686ca7ce5e8e5949e2320036669a3ae10667628bad1471e19543549746f57bce15792015f8acd8035f3a2e8435c4c8d37101f5a3d7dd5b273ec7ac5cb97c5da8a106268473ca19b7861c4f71072797632b7072755c8e59e1e20ea90c816d8b5919326f0b383693564f1f65cb7f116857d58da69656a2a3f9da536dfa4f6d121dc13833abd3b26374629b95e8c8e5db8da308fedc03a3dbac368cc0c62c8f2d2ab35459e7131dc9ec5954056796bc11b9ebb18aad4e511cb125bf155b3ad4afd5922f694b84ece5e37051f99c7c55579f088b336c3ee08d3a57beb81cbb72c545c0e5d8152a746aef1e07a5a5d2c9fc8d47fdaa08a69fcf6a51b973c6ae2c4958898eb46724e6c95eabcc437a268fe8a875a2151b57a2a3d84f74c4f07915e3cbe8c13b3de6563501059f048a59f6838478853208c11c51ba9f0b07100df8c0b123487ae2cb872b378ecbec8a155bf11b50fb617f8540fef287bfc41179dcfa0c153ec74c763da4f5c14952a09815e5931a4585ffd1937f8d3c6dab6bc110c2673845e50f7a28b850ba8c3540f51439f2fac7070448a2c667f931faf3abaad48fde8f0265b9abd8825e09ea105ca0b8f02bdb18637b6c8830b121963ec6311bc7addb6726bd23082ac78630ba2e042f81455ce900861152186152050b2aa0d8e0743936c40d3dd420cd10479e644963a4c464cc0f323c7183105474a00249bd1c33da6224867feeee325466668fc2e5428352178bc5c69de58f8158c70c3ce9c10a1bae4c09c24a961c60335471819ce189519635555470b7cbb12a501a1a195549e27277a476cfcbb12a4baa3881b033ce609d11132216841a5f7221ce18491313628d106a9a5083060d0721850a0b7460c21a1a9e10c2488c8a127412c4951bcc28c0e55810484140d18519259792cbe51a8352178b25c69d554605676c10834a0c48c4a092840631bacb312a405cd4e5189526b12958623e34e1654c81118589a960e86a97632ab8329f764ffcaddfb431eaa317741484dbb6f8db6b36c8e9db06c18fb6d6de993f9f0046f89b6cffb6451b9f53e36d7c8d0f3a0aa282f0fd83c0777ff89a0db2fab641fd2b0bc4061d05c97968835cbd333fe76dd89cf9d0ceaf6183e6abacc9db1ee59dbef3381b74a45301d36f1fc4f49b0d3ad23e887bd36b3608ea4f360816f5ce7cd4c38796b3412bf4cefcf8db66a30d8243bd337fb345f1a10de2de89366828e82808f7d00675a077e6731ff4d5957a7e7112cef36bde3cbfe07c9f3679fc4279fca2de47e7e378414741b6bfb141f0b53dea53de0bdc9bbe7a2f1c05e12c9cd23bf34dffc21402b400652dc4d23bf3096004675f083a4a815f2f937d413ebf3e88e5be4e1ebf3a8f5f5c7cf84311d783cbb11f90eec691e3a98ae417a7c0afec55802ff962d64409759c43b09c1586cbc3804c2a7ce67ca0925d8ef920855e8ef9e0e4ae78eae5980f45dc556ca954dc3d91d33c550caaf61f77551fb897ea7f23f2401190fc2b8a10bfbfb626e51639ceeaa810bffb14e27756e86ade4737ef83577b395fe89a3ce642659b622c636bc5166c4fb6ec9e340e81408c9db58c2fbd28a394d2e37b73941354f85e21ec87d2ec933c7ffc7f8aa8c77acda3dd8aad222afccfe5ea6f0777d140e4694fc54b5cee5f8e3591729d68b9aecb31273a5c1f887a58baf2a9f65473d8dddd6e3f95db4fb3707624ccef67befe4ad43ba1847a71700e20d588fa71988fb9505aeb6af5df0af3d33dce71f95c492d2e5a5cde700a17ce5ce9ca29fc19fb97aaa4a454f453e444165d08933f59d7c3852bb3222933e945ee45b248baa7a103951c90dc8f48949e8e9b7dbfbb5377ff22b1fb33cb1004ffe835991b63190842fcec63d643987b80cb31295de00b3e7cce05578aad289dfbf491e75f30270ff44a7f0533f86c03223eedc75dea2d82f6df3ddd2fe1778fa655e10fe1ac41f5f9124678d7be1f47f46a5cc96f4b1b5ace3d6700d21cba7f9194a80b91e70bbaf37d898f9588c0db1e77334f155b120ed51588c49e1adfdbbbb18aad88458d485ce667ef9efc2ca34f23fc0d72974d9841977186c8e3d33b9c5742e4690b2184d095a82bc4567ccea77bc7e57a404602b2421aa184cd7e9456cd7eabd57fabe5ba2e38fc2864c7bda1f20413f842175a874e9ea8aa1cd46fb56a4aeb675ba44871fdb31fcc03e68023f8300fc83ce03781268ffbf80df9d96f385e777323f264b02b5fc704fad9ab5ef2edeee633efa3ff657f9ad4a35f43fe473fe321e9ca9f3c20ddede9cd53affe0dd4670fe737954fb78e63759d0fa19bfa1b91c76977f33854f535bc2ffb56fdf62aab73f33a26e07cf738df7d47bf3fe771bcd4df783e846ecae6540f0865af0ece66d973cca305b865f6a394c24be9a42b5c3a69072ea573da2ffbfe4deb4af0819d8fecfdfd9d7340d9e307e72802855471b639c9f767aa924e4deef6747aee9db539eab93ba1e449ca93e41cf0396af22bbb1edac3ce07fdd94f85b06e6730f3bc6357025fb7425826dcc86286177d821d467d14e554b7d97ef3e5a33c02145151af7d7dee50b504be284ba44d70d489729388940240c1e6428a12fefdeef9c6713fbdeb411f763edc3f227ea394ceb0a1a1a1eb345aa10dc62628a594d2497f52da9a169d6ea8948c97753d2b95aa1908000000f315000020100a88442291581c282a3f14800b819a44684294cc22a124466110434110031886100490210619800c5310950db8b9487decbb16b870bb779ab33b89eecd4a8736ee4e54b10a74597fd539dd3c0962ae8d94a0b1330dfefa2323acc8938b83c455d7f86edb66ff51907e74422133d08a4ad7300e99c72e07814335cfe86ca3a006d9734a71854a6b826f0c5db1304fc6b2d620fac81763cc28f36e067bc69415b3ad4ab3028bed93f7b979d2e5a812716d5d392472d9586bf546d884cc0c732e5928c989cf89625f56a243df0d1316b46f48df0618d0857824e96e94b9594f0c4307d0b54e336cc0e38ea1326160a069f2e3296ec59eb1d41affcf7112dbbc941e1f4b63a980115710f101ae39d11874ea4d80c0f82028c09dcbc7743a6ed5e8a38d256a3c1651ee822c30b056b596b27f5947acdd0cfb364e6c296a2b16924099aeec05bc53c675c1a74e8681c32e548f3c0236092f8ef9006a017a6c2621c001554b7534b16ffde79d7ba54e31097bee1cd9c32047e3edf741789181dd0fd16bf769ad2be274e143432ea3407aaec67c858d1d85993126b5c3aa42bb3173eb70935716d341e26363e602df3ec3f27d4a9b27da55a3364e4398825148020ebf0468f31d5d19a0c87c201c35ee498df889ca976b531921bae8b6c80f43728d8821cf433593f60ce394331e811f74346342a82c0ea2c6d7fa66e06a350536fa15b0e3c4cf1848538b03fba583abf762dbbf5f45c9f08740b0b0f87f844687a81b796da2bb314271576bfc5d8216e8704f2ca98a7dc53c7112d5a7b5d6b6cd57452d7a79bdb5ce7f2305542be036ad9a2a803bc091874075fe3dd87b84b9d10eb7329a0d4ab3e91a3171cf0b7afd53b34136314abb0731c7a88ba0388d2127477d9cfa8523e0d5312a8b2e8cd47f1608863599087f0571239a352f61bfece7320464a6c3c4a8aa792e1bce66e4361497d114c4779c27aaa826144e6988815d79d8390db00de99a02d6fcffce31b6d51be49e6a8bc1439ca2a6e70512c805b9b3193f5fed83387c4a2845fcb6289590de2562bb7a341a0ac7e10499c1e8886687076c7998f2c7fec71aab4479a7c674c087513d4009e4047cc0d8939218a39390e05018255475db8b34a728e05b490f0c76f4fa9e5074619f7efc3d7ed6c9421948c1cc795f1808677a882ba372c3741fab95bd364dd0002fc6171d719d664c2c7daa63a3ee56e445f36969bd6400643c365ff4bb8f42e4b3c91a35deb352ef7230ba2de2402ca190508478dd43515a067a616c03b4f2fb7d860796ef4e0f9fce28626e50f5717c3a3680da6ee64db7114deecc45f9cdeb08a7ab284565c0fdc1808b46ee8a2440d39f4d6bb67639fe41187d809afbf3f8b043ab4cc7c705f0bb34f27f301837570d1fe8adefc3c25e124a1c0baf9d95480ba510bfbed4fac13a93205b4b031226033be1d3475919e59c75fe09bc6d7aa3fec84fcde876b61543d6d61e3b5aaca99690d15a292f149d3609f94f56a046e42cc88d6227650e63c7e18433253ac1ec7667b8dc138dd4d8b2c84af624f26a7279dd3badb908c0b3aafd8d541211c0b54513d810e5dc8d0502a8ec2479924d5f5a1edcef96e161a83e8fb1947ec8883d49fe8103aff1b8bec7cc613775a7789043995124ffdad24b5646e5967bd841da040ac1132d0cb55e01c4d3c261346848786eb2daa08e77b14ef13836bd086d6ea5c22f1104d80bb463c7b48e8d8dce2382befe04bc0cfd980fbf44cbdfcf5e394aa4afb93a338855b0112368970740c275a3279508546f2a34b2b7dc24b76f7e65c7a35969a574c913d5ad60306ed916aed0f559b92232ad5b1ff2add0a210299a5642e93f518218e5ca1baf7cefe9330dcd7486f6e355988d5c4dfceae45db0af3d2d4c587d664680377e9bdd29230d1fc4db7d2180e062baee918b725d68933710f6874e5cfc41c515e897fe1b688fde9a96de822d5d7cede8b38b54a177b9cb040efdcec84bdf56a4582cd55295bccc1807b1500ebddc7c28b8da64c5bc8fe78af41ae0f1bbf48208edda6873cdcb5d77180bd3d038d47edd4703f5096dd03e5b9222fc90c6c28282974ba37fd8d973220824759d07750e240b91518543256221d55c328195f8b5a01083c69a90068f89ede62f0bc40601ff289d58f04445a6a34fb0a9ec00a2a647cc5e9ed3ebe39ccf64fa04a31e4e72e5cfc495ec2d35a194874765498a3400272cd1ec68591f90fe8c7176c4eb6961b01c5309602261ce577b12a4b0d32f514ff0acc1584fea77d1b11d3246faaf0169be8adb51eec3ff63e8312a24270a5d9a7593940605d4df10ed39a900265c1afe1f9e4b2dbaeb4401ec90b406201ca8f4558ee5e2522b2e89d0a35f1537ea907e6616b5cbc97ed074083794b280fbadc5d714fa10f39913684f6b6927cb1a6265924b5368f31b18f090911857a281306310447addf598e307c6036a3db7f42e2ed14e6fa85dcc021aea87c864d22b34aef201bffab1e24d36e457d92363e27959b628f0f0d797e33807c9082b4b50971c27b3e1522bb5ba9f8e787187e138d06b2539e5ea0d961ad06dac6325fbb99a238fba34ae8012be6b85d4f52615e9836f31a71f5b20eac2bb463267e6b51d227ef3746a84b6e7b4b35d125f40eccd4d5368ecce02026f0c64680f4d7fed8656b304943dc954e89393fd4be6feb13713f8d7998671f58f9beb62079e440f0254ca6c64ee414cf58d1eb072a2fecd70d0a6ade34b5ac4c5b41b67fcaf6fc09edf332bc71510efeb1af6136700c503a602c29ae2ea59a6dd709c6c06a3ed10a50d7b9de4cef4d66fb94e9dd6b5ef22740997b2d1b9c45024f392af3e89ffda40d0609606b6e7616edd4d614960b078fe5a23900549c6f060917d6e6e46d59a2bb9b33b2abb73c0930b2ee773d2208aa2e2362a046dd49093cf92944bee1c173c2adc0bec65b925a8d5533714cfe058c07b24b6f98a312736ea4d0a42ffb011c69ff4ce544a00f7446d48d49c7bc47c99f7b76d47a8f1652d263d29954b249c6713f7e3a304e8cdf03398cf55a77e5e1e6f15ca0967a5dc8d425665c0687f1284737aed8c1690cd41086267a71e2aa31c2d8042d0c2fc5948e18c99641d3e23fd7f4462ef690f89db443499ccd987304da9e70d87a373b5c40a131eff79891b11dd3de640e6c22ada37a7653c2bb75470d2a5e2e2caa2e4884e87972f283f8ed3f920f0274dc8b1efc3362a9b1b45d0fd03293d787508e785e9ba01e42cc26d9a6b9ab08b98feb471961be1efae4a4df4e973d91eb1f294098e48a4f0ce6d3a6fb0cf64583571ee60d6e69caf361de4f8f0e82a02386aa11ef6831a1d012ca84093dceca2fc40011a23e8c5e83dd60004e50e0302c709a87fcab13e4259c8ea84b1fecaa1e583e9dbaaac2d04ef6edb744cedd7fee52d7fafd3296ab6dc2dc591372dce6b29c939440b1183c6e0731b868fb7e7573c0b44b68d95325e38314d4d793c888fd799dab845e4c6632a5ce4b9aff7a715f60e5ff14f074a84fe71ddb953fe7c25c29c2e581074313cc3d31932876ddb4610aa8696206d56dd7e746ea00b45c788e38b96111e94d7d87a1fc21d4795624fd6840bcbb1213682968b06a4ac6e861f6c1158eef1c4e7d07b774a7fccb78ef705830c51b516750c2fdaf09685b7bba85303d3ff87f144a1182ec91a31d4b94ed7a8541a2b08fed867e22c70392c45e0e62799905b1af6ea7a2f8efd0205bd1b14d1898b6e28f2104a27759f642f9773c8343af13c66e2228857e79eddf3c62ee595c2d1bca7f36b5b5753903a597c8fd7b57df548db65ca90b965541996619ab58bb1bb3ad07834560a358c30a0617b3685261ad0166c2002aab63fe35b22d825a8b41eccc3116552507a7f1f17b5d4d9d16f9071041e746c94ff5b839310279c5f46159702d971c14e2df44420461e0a510adeb8a22212ea2667045358899340b78610a7ba14e5412ec69be891c8f5cdb21363b86a31a879dca993350d88a9531d1b266e8780646ae2669689341f97edd51b5118cd491979b5070eb2cab717f6490f36fb9901d34c1cd4f533a9436a4c320c58776b97b4d564f57057e59817a9ef1d262b4189816b94435dfa6bb2eb5d22438d0f717d699c013243ad78eba644d3f43910e116ebfb823d1d662ac444adb5182c40ff484fe3517597a783ffe08fa15d69710d149a93b40b7fe07c9a5cd639bef74c44d72fc88361e378ee0c656559918d69a9bfb51a7384823ba7bc63f9ea8ec1cef9c7f2d73ac70a683fa949536eb29e70e5ad58a9a97b1fd85e72b6c51c0eff1de7893d5ff6fffe39c484800e1d093f186495e34a65a5da04eedc4cb02fb768da16d0394cd0acc8b8d8f9173c24fd5c0d47c0838c04a42dc4b1fe328c4e14dc310fb4403a8697815b5508847f41421387bc8af3f47716fc5bd01be1a6352f93807b989261d63fe7ee0b84c71401b0201b5b9da8fe78f353013c7d6edf3742183a04e1a43fe5fb9f8fde412fee769b58b2723a59b1955ffcceca60143fb2e53d8308b587ab5cef6be365cae962f9d0b65178845ec43dd1eb40fd02f324b739d23a57d066aa2c8735695c768643061e99f143bc1e4b654419ab5ac74817143beec154700e8002c166b5375ca98ef663fb5f76a912cac0f5482bd1411091054a651928cff3a8641622a6af8635de343c35c1a8cceb78a3fee0922a1cd02b2561fa14ff7fad17392a8545a06eda029c0096b1028678b54cb318d9019a85784f0d5f2e658eca799dd549802c2e46a6d41f405dafb8be0551a4cf2e3d82ee2f367f454ebbb579a3ffbc66e80e7f3a133a7f5cb5d5caaf1798c0086db9765d0caf8559d7929cef8111cec84a186d8891eb2a2e8f9c129c4d38414fb5a4e85e1b3ed8893ed3a1d3c258749cb5bbe3db2888c06835d1a64484292c6e1617259f3aec67f794aac4490ba86f5a29d83633e030b1b21a6d2d5b4331ac7c2cd8c68a26c25fcf45777907ce75afed7414955eb50ea640c39f20033b0518d5efca057d259d9395154536c2b0f6f097756e0d4a37cdc647620e06391e597dc347de0449486b9c8cfb29ef231fd306c85b850a03086c67e6b0355989d161192651c9a9b92fc7b6ad534274c6cf743a80842cb26049b02a4ff37b5751edcc4279b18544a86cadb0d8b3fe5a5a65ac822b31a020001f0ae5618942296673da4a9411b8099590c4c812729762c4655e04b5d2886fc27b348b53f9d2ca39da2284d6c69c7e98a21af99a74e2eee6db01fad56f91f11933d7c2bc8753785b29a654123101be02d03e0f3206e3369d63ed00ecfd299e87a821b2d56b3395e6714f5364cba0b4723ae672cbd89d4a73869127575ad5a4b9e0e4f4a704bf99c404d1700c0803ac83ced0127b4aae3e56b7099a2375580e6fde3a9a6c781c033c7d309a0dad60d51e480096ab9d9038c5a04d3f1940f82356dfed575ef67186f2c7df97911e2922367658f1e86f7bd9f0a63285e4a67320cb0a43660647c9b8b1391eaf1c8f13b8ac490258dd9cecbe900baa477648680ac5ae59ced62e56cba955bd4752de1babfe75a4c2174922cd028aeb4fc36c8b44e95e410e6f192cdc0fb130bca33df78213df77fd00f2ea8699de55cb9f8d0a595585073e2600530404e2f7084851421a7d0748488a438a9e6b72da854a7282ba6a00745584b44923d0b4ac52fba92109d980a1911516b3edaf98d61580b4870c914a7beaf955999fbdd7c801b7bafeb0b3821c00623f7d2438ec0173e80f11fb535f50ab3ea49bf3521fdedd4338b322e6c0d5ca34f1834b7134b1fa43d2c52af31fd8878ff5fd50e14a10a314798be0d3153629d44d23224ced63a61044a2aec12350cf5221ea100e42b4a124cadd584f416c9a9f2b0d8a85c20c62bfb42005db7cf47d3dc2be9d6ee0d164b19d32124eb436030fb091f97a78b84a871efabc849eb455884bb70797262418ac2de5bddfad7bca2107e129f7ec268d8a9ec3d2c3d35a229558a3664590a6c2ca801b8064983c3146ca2398a49adc1429b7c32452da5e499ec1ce2e0a90740e5585c67a4aec6cb2344dcb8cb438fb8557b3a82550934e860ae36a32bf77bc9b7e5079eade869abe2af8f2dde1730d772be56d5fc6d2023c01476a9a1e3d9302dbc177723e2c777784709427e10665357aaf45dc339a5666fe89d12814298694857be9761b3cbb5e8c3be3a07309f086eb7e98bac93b4d5fbc158f082bac2d6467e934d90b35bd5901f8bced8e2fe727a290cf828470d60fd0e24824d76caefecad41cafddca4681fd53878a9d27a40a6f942ec94f635149ca394af4dfa1c2c591533aceefb97876573d11fbb6c921aec099b8269c530e0bc825a33f7ef40015cd423ce40dc2a053a9346365653c48868d939346d7f524b5c26f27d14e8e952b06c6062193255555d057c7eecd2d4f8a77acffce9088e43dec79fa84169eb2bbb4b234d39aef8340279a62544927f5ff96a38a3d127f40e38325965208328fcf0796cb72981ff1f102068bb170af5b0a0bad31281876559be5b84ab8a68b59cb6c04955729a9d4183e54cd1d7622d4aa7b6277a60de2e41baad3665a339798c59eac62e9d41f2531426c1a17c6f77e247408cd1f114eeef94733732a4c6907eb634f7982cab9a8001826550a84c02336dcc746d713c8a27e3ebf2183e38e732d194b5cc3c4f9c6e49609c931ac405cf550f3fb73081b5b38d2646448163da41e384fdb592ab434195ccba2becd05cc02392acfc361aff70cb22f8a161c4b33536a5a674f00f9c1d36f331b3c7c51386ae9363fc2f2331c2ceb3ee1ee9ce5cd7ae6949050555bb61bd1ed61374b74927a7287d01104bf872c00bf3cbd892ec7010133f5b22eba18fb5dc7addc06ce2971eb40539ee47952e672aba117abbd4535e38cbaa58a280a2d7d75525b00658c1049480955afc4a053f876860df136a589a49639e500accb05cb093027c85fd56415d2be7ed6623a6ac5547975801a15d1dcaa60a34bd242d377aa36d7149c8cfde3a027519af65ea79f89cf76aee485903e93a74fb231d0faaa6632c5fba8884323ba77188a968b8dffd8e2866469d18c165c3ae791d33f66f8db8c2e935f6b167d39528c97db2dbbe57a1ff3d35b8112d9560d6c2aef6a95f36dbca3b6351ba7aa1fa478473837a09a7475c930a6a96a2496da2b370f9ac70ecc03c05485d0c1a67503fa765a6ecbce8ca7f075761a6c797b6d098ad2ac07090313f5914ffd488c2c889e4db679ab53796a71116291d6c60d6dc33e9bcb429bfb611d52c2b981998fe1aa37cb392f75a0a7283bb990be9ca34c3e8441c8eae40df0a7368f1d3d0ab60575359016cb5225b507d4844d9e05e9be665dab6d23fe76ffeef260a900f49e82d4cb1418525cc684b49cc3e8db6b7d49aacf1f765204ee4955cdc0750c454e758d297f5057833d27fcf2d2a9d427554147ab0920d001d723720cb85f7e843f46244ad72aa78f645972518a27dc43114d4aedfef5c7d5f148f103b99669952880e65138c95eab664c10c34da16dd381a69812437dc9156106439e7a9fb369e1d14df1e8597bec0c68407048e936d5cece053592cd92e96312924d6e2a6117878587f7924dade00945045fdbab82824c93418fa500f46b19cb063e5bfd022180dfa39f21802c267fe9f01b01b309117eb69af7d1af750304ae688e64f29baa554f1572251fad8118f60c5f0cd2e0f0eb295082e46706da5b012ec4fcd5063babd2b5c647ae08a1dd3362fff943bf59e883850e7b4a3577fb0124df9588f2ecefcd117a505fa111da07946ae088bd31f9c5d6d38a7f804c2f6deb2a870f9ade1b6db8359ff539faf478671429a982d10704e158fd3d64f9a42b90610da7959d381945a1f5ae5d4cb7119a61747224bcc8cff3406d66b1dfa6a66491c476c45d80ea4ef6370933f87c703d35fe6de2114619bc2a92000ef7e0c8cc2fbe286cbb0e1a425ea21a8074aa488e3e77f5825e2f0009edcdd71cdc5b7a3625dc56855d5de79d35c3c0d60bd5f69ee0c684c5466b494a036783b3ccaa573a2bddcc6b27c48823546a8d749ca5b98da42af9d053d207b1c1e27686bfba4cca16a023abb002ffbf244dee271fa9090b4203b3a12335d5ad6940a4dc0e4a627d1c4d49d2207c5f328e6d0720fca9cfea414da7cdd17702766f89259b605e1bad68291400f8a588b6dea4e96e9a6f5aa7d377cab6b51283c79d7c5ec9744757e28b76774d3b1d623a4d80af039cc5231ad6936f3135882636286e7014818f48658b329341c2fa7ccd17cf5eb470dc9c84ce3461f5a54726204fe48edec4a252c28de8365dec5cce59a89bad6619cf9393785c68804d4ac01a99604e794d7a58fbe30d8fe69aa2854e57b685b1645d7e0146a76823bbc1bf492089500f20694a5b316a37aec1c3689fc80874dc34e864009918c34e54173c79082f344c03126c969100e4e02be20d952fe2693306bcf9c0782cba95434023bc7a4544ef949e11a689f9b75ba1b3a22a5ac42efc204add8b4e5e9f6edb3b0d9856baf4aca772b6509478d4f519552319ead2c22e2ac292a10e8b9f01174b7218810706e168c907fd9585fede152bbc54f70351dd0cb7a5a95b29b98c29f342c1e39c0b877e7673fd99eb365b046f3576bb992fe2a2fa8e2365c1a910082dcb1a24205c001ef6ecbcf132c7a7f2e992f8fa49b3d15a6885047f311493fd94a22e7cf2fb2000cbf7037134b6421554104862286af50ad02e8e13d797e8be1681191c2473ba0969a0093c9e25a3d3ddbf8be77d6a0947525a1de6da7c8e9b7b76349b949f89c3e245fda1431a7bc28399d8c9a370619824738a9786332260db3a684c28158fe4f2e3641cb9cf44f6fbe96420cdd6a0ddd6c2ce4e2685a89921f92713675d77527fc86cafb4a87b9a4aab5cb60b1e41976e241c9b0aaa654d5f3ef2830026001d15130508154eda19d8bcb8a66cd9354765d5458dbdf82075211a7af0225734d14442ef83eb60beb50e5c26514eee8087107191e817d74ab2466934c451a1259124b933c9ff92e87ec358388004ae971ff00a9a9af7924bf646144e1e5e3a6204374532bd829a214cae9ade28b3115a7be393938aaca27f85ef0648c137c6bb38256ad7e790bd513375069db6ccc594da745f97480ac1b09a59eeeda7ff32789d48ebb25ff9f75733fdee77c1d88b5b5c14a81a7947950e2532b0432e32b79002f266aa557f9a5c3c8cd4cf7b4ace54089d2617ab2c0d190bb391af53a9ab14dca93775bb612275e8908fd44dd7398fd333fb747bd9872063c2b5da235bc80ab1103ab1257c97e689cca3bc323c295bde0d8aaab0718c040f4feb759f3ec6f3856a1e01f0cbfa54b38fd51d11203a4e76ded5eff7ca605a4c88f9d81a8508cf2c7a15eefda10ad358f09ae2f8af4ed2d36308e2f7b2b4c9b6c074f3cef23b6055c66b0c85a0aeda674c922d8f5ad4d2d1f79ec1c1b85f152485ad2948e224512af3442daabfb9ba6a1fb3ac56eb49f14922a03f0f84ef49bdc7fbbb6e0e811b45b89d006710016e893f67852d5949e1591042fd44fc42873b6d01ef4f4d02b0d384435bdea148785606349262631986605f9384319135e845f96002ee7b71689d45010272f35dcac969ba312cbbbb787df33a5f28cfd731a6fc75a51b35bba175feb16b1ee2a030a8044c909e18d0c73f84c79cb9d3c9267923b6d83680644a10c17ba83c058ea8eab6c3c84a942cd629d637959fe56e6e2b0121fd36523abee45f8001504f75f671e5eca372f24437a5122ab714c777e4a801d36c869f052c43b447ac993fc5abae9c319c9ac985e7a7d6339016a477249a004cd2e327e09db219b1a3c07f6975ae1ee117a6fb321acb84ab7832c92d904a243a5500d5fd20218fd63da0aa9293903b8459eeb081e0255ab2ac67dc1ede5753141898b711060c9380a4e21ed9f4b08a699e5385926f858485514cf89c60d37eb0b2d35c0c0efa3913c1d63ec30a8cacf0d8b94c607a4211eef0066a1f16243269ed4e8842f075a4e5f22e332bf403916f5a79f0bc9874d6f1ec3349a81207a085115c3b82391c584d8c29d3175488bab3ef948aa3529533d41a053fd8abe4b1d0e6735305412f3811a3d79c3ad2e8ad074fd9530dc282d18c4836fac87960cd38089cf05044e0938254d846408909c40aef514b229dc2b4baa8aab5d154a0427f92609af96a8c9146851875c6f47db3824a43bdf4a052ac72c16d00a27fc26e26e9cbb3a222fd13233458c54496d9212fd5f09a505149179905a8274b9ceadde06238d0888150b30e2e210fe224bb19f005924c5221eb0c6c7c4853c1c24c6b0604cd9d39d9f9beefce2985d46675f9648450897535984c6b527ea3c8ccae5886087f3bea0528b6709304f2945cc28f046cbacaa08c8d19e125f843351fecc822b301fe6cbbee267821148ae0ec3b6749427a059114b267fe66b17f2d1d7d9a2cd49b18b99ef1e632f5ce4904e950663dd55e3da0de4551b5cbc5064c6b04754454e1807019e4a88d1ee053bcc61d34e45e5c9fe0c8e472ca14612347f2c84bb81099dd904f4b66c6bd5e8089facdbcca5a515dc0046d7853f1553bae35a39df80dc632762212158890be7de1366b22968e1995ed9fc44cdd29b3dc9d9224851d7396577528d203bcdb1036a5d30a6f66daf58bb4ae8a32b38117009e08627ec2635f90a6b0bf25d01bfdae576409297a6896e8430269c85151b495c113309f99046a99687ea3f7d8655ca1b023151be89b3c7dcdd71e671bbe3b8fdd785f62ca523b76b26e213b9f4a815ac73e3b623573d8a679e30dcf8c474c2b0fd899e5e9413ebae93b8cf5884e7c3b25d760a4d4e6c53a7e1ed1ec4c918c9327bdae0a8b7da141a84b9d538a924f8922e32420581414b934d1e80f1b61a9780430bdce3f5cab0a02bd9bd1f5dcfcab883fd9e86cf855437ab84e66b67235f03f477aac0fe90f77a891efa49875520bdd6063f1cfc76c1dfc9f761ac51034c07411f287621848b5aaa2f6ae92034d8e229df569284d7f2d00bc1288511fcd845c4972d642beec2a47d7064cc0578ebed4f788476fd7c5044ff7856216e24d6793f50935a7dba16608dafc7d83ec48fa872458e81acb505dbfc321310e458f68f4ff5f918b0ac1c1825d3c993088c64a195e0b155d6119de777d599abed7ff5a23e79d88cc00228602e88e5e1560bbcd958c32352d02dd2b73e0cfe146747186f1a681f00975a5e65cf045846c0182827835c4e7a1316007f96a981a1d0882cb982e2c8a36e954facb1a4a7ee06c48def682c91aee9e2995919aa1590da68494a87cd02c21500def56778225a8f07e6e38ce48e054814dd4a2c9f7a71bcdb208f097cd2e6894606ecb3e37d4fd6d18591c9455d62978bbaae1b821d437f3d771a624f26b04df3c2441fdf579541f5d6aa2633c7182c721a0b5197a7dfe0767b00b34d8f04636e1c6bfecb7750dc7364c8d1c67ebe7900c61dc44c43c5a74d886d7279c985d20b2b0483a449276887f41f8161bb05e6d4e751d61172841dce59efcd3684e9bd7be371d16d958c7be6a79fbaf2b150f87c8fa3ab2935ecd623b5be9b0b3b9d31493f75b0ab44a7e9a099fc4ac18a777e8be0e450c280ff1ad88cfad712088b877ae506d8afe2728cb27fdcee15163ce06f12e82e91d91f98178f1d2bc8759ff208aa359b0883a32ae70cd22b7ad0927b3e476474733d899ac2e79fc8385d90581558d3074a09a3bfbf04c26285aede37caf835a61579a9ab23cc1f40edc89de0568038c39f91a00e12ff10e3db4949d5d7ec06559566fd2ca9e27d6e1336527915c77fd6298f387197205f0b2cc3e645f11ee8e942f230b4efc0d83e10537fc69f573a59a10767c254d74f232a6771c2724217b850916b9ada09fbb5d424b9847280f3edfc104fef8d8670621b2370e369cd68cbd418e74c18b825c415777d09faa021125bcd283a794e86f3f724051ddd1acd0a30b1f8a564746ab5ab265ddc4ebdb8bdaa57f6e097390ce6bc424bbb0e87f0040c8678ccc0fb460c9ff604d741c21ef28030f9fcd1f161f91caf79d3dd1c9836acc994c01e6b04a3571eafdd96541925dfd66481a9962771c1015202cd1eca436900d82aca8ac2c2ca6b451ac11ad0662386d23b31c33f6982b1e7e0a6f636df85379dc78c058cc84d5545545515fbe1c3606a7a30702d223c62de54671b7082e0918a1d781e4f37468eb301e01ff03c29104a251833a2a4f8724a23b0650c7107063f2617774e151bff7b7db3d7eba90580a771ca1d931a7b82a70b796100840485d00d8818383bc8265ef44911b3a207bf05b36c8076c0b4f0bc91e25da8a96be76dc12047a3751b192c70172c43121a10ea50af10d8da1cae133eacf219dc9d8e8d92be1b9414fc2c0ca47bd6721d540f92db5da30491304d3dea28f1527da36d3493768e7c1968ef8487d0e08db5e28cab7475c0fa8a3dfba8499888b7ff171604c825e4c1787e9cc960a8f8858ed6ce4dc015bc12cd59933466647ae906e06e36ca8379c273c4f8a7aca8e35102abbb0c01bde36ae04ead0370bdc3b5427f4a033a4cef92f1daa026808ee65679c504c86664e9010660615349b458efebe62e57ef06d97db4f14eea0f14101d6b6920acfa14f37405224c583d80c2c0b5060f54ded923993f59053d549b01e1770f29595b5c9f240bd2871ffe2a36655f5226812d28a8c80cfdf815ea340efe713312dd8b0514d4b421cb09eccddf8e17b4d5b43f1b10cc9f6214cb3ec3511a39cde9aed1310a4d083ae3924e5fd23eda69e3c40ba06b3a3abd6e0d32c6583624a91e8a49c88b68c44c6fc87471f574aa4ef32e540c84907f0a2ccc099f78a77236c1c08df3acbe469ab359110eb2129b21a57ec81417ce1129ea579eb94e0c284235d6e52f0bfa38ccd7f08324e1b3d374280034dd5705322c022e3b895082ca715699363f9131175431aba4912e364e7559d89147d99d65a5761ae54950748c830a86d20fbb472d390466a72f1d0d1d02df2a09b5ea1b7fb9a939b0b49f904fc7fa90e4e1f4656149d39e87b68b9cfad8b8a544a0ec6a34a11cba620f315051aeb110f4878b314f7d3ad9bf0b84f04e0bd35519f967eac13e9b947edb83e3e877040691cc25d6b2b05f283d56bf898e8d7edfbe23bb900e2d2e73f067ba197c8ed56fae2e344238af3b5a7db379bd1205deb399b25e524e7e0bfe21f0c93b26c4fa6930887bc1bb42989c3266fb3eb540e4abc477283e9870d1773ca01f3501076a00e382243c5f61c5316e301c852bcd4274653e420171638ebf81a81920d598223cf4acccf62434c398a433a76a0545775253f32fba4274fa8871a21da06b38f50563a39468ad34b38f449e0f31315afca9ffce5a8ca59c0ffa79dd640660ab0a77a4ed2047a65047f8c8f65bf420f4a640beb3a3d0c30906d758452461b6fbd2e631921f5846066badff5059f0bcfaf657b157eadd4172a7489aaea04b47f18de38ae0ca5159f2f9aaea2712ec7c51c0d982ae69300ffc86039724651080d23f54fbf8b766248364b455970e5f917e4055629075ef5027e286706894d814c8924cf91fb989d5df962ba3f0c31b3712390acd4a36592ef0753d4b6f40cb5d31303c5a3d658c0fbb84f79924f576e78ddd96fbad03a0436d123f3d4eb73923f735e501eaf33b059a721eb7513f70370ead11b8d2f286a57f7f4304b097419ab560c009d82462de52da8725676e42a3de20ae6b405af9a1a5801fbc813afc487a928869e5892e24f1829e49ab82492307b7c5c9edf959ae2fee1e10d8293ba8822a2dd62fa3a66b56eb14592f4ae4ec840954a628291bc0f34d02f65b13bd5ff9cec78394e2c348d690e6a85030da89c08d06c96a55230c18675b05828e9df2530e4cad139a86f5b9eecf8f0ac386789a7b8115c76f6e746ec320efe1924780ed635e6beda5b8fb37bd72da64e1e5e5bcf565c8fa27d77836259a79bd4de05091fda4753aab91ecdc39a0e2d297e21b9fdfbd645bf2f51f508440ab7d0e699ef06a2526ca5883e3456595ab740ae758d8d6a5167174fe03b6d42fb2a5582716177294efb71e7b4f5b1c7d9ea1f241e02fd099ec2c4153733e60f557c21021fe0af14159397f4e4bb6b89be9455707269dc5386c1775f564254dadad5d74f96da078ece2e9281b99c1eb67e5059de3152c86bbdc07cfbabbcac93b1bb20a24cad543bf20974112fded5c1665e1f104f78579c085f0ccba22d346a9b62073cc8475153ac00c39a4bda38776a6f5e6b789e4a1ccec1991330aa50a05fc8112636d8cc3f7ecca1a5457e8e7b9a2c9788407c0bb49ae04e6ecc95c5e4d134dcd205a0268040abbcd2cdba9452b768a63e28fef1f09fbfe4ec8649fe922c0adb5270f94ae26f6d228ae9e705f20cbbec26c59df20b6f0b043f1073d52a85a4bdfcf27b53bd66bf8b27922e8c7b4c44eef2434629e2be5a983b1654a87987a9bc716cba4a409990a7d3dbf6594cf425a1ceab02b77cdf15a84ab39c0b428c4e10e38ba81b6cf9c12de399dc8bcb8e476ceeed16cdf9d4e883c9c9e75758be1406ddb95d3d14b0592981cd75d5942a128afc8260484925e28c68d6c161baec2c21173667f7ee895f33be9414d7f9c94e0ed0bdbe2cb5d929d3f479a5f2fa7ab21ade01200364b408b005965918cba90630d894ef04a1a46882850ec755cf8f1aae688faaa0c87b67093c211ff5f1dc78b9067eff494e4f6652495f0e7c26cd7e54ea31f74ae8a97bfb497c1982a3f16caac75f36c8a502c87ca982dbafe2140a9ed03cf1783b9bf7aeb81410ef0e0a90a4519e8f310ea43da4cd4af02efe95cede715b9a6e795878c4428569ee77109ae7502c81a8174c8e327e6e18b106f1f3e63bb2e7dfd5986577df20db447ef10e8a8183685337a7b02f3fa78852ad928486f227eb9e1ba0eba95cf2144a055de7b45bb5701ac4f8b234d70b1f3bfd031b847e9affef977fd8d817fe64f357051fe71a9ba0943075d1f7800ff1f9cbe8e02bfd3e75fc7356a5088b567e0f9a330a6fc8676f62f3d4786bdf53a8b204fa35dffe960bdac31c5e902df61ef559d8d286cbe82dc6f34709246c4f1fe75317f6be97877a27bd45b26293e3477091d93aba6283a42902258d3138e1a2a02b626f138442d391d18540690047105e596ac303012c5c2f8a3c1731f0fe948c7ceb8f21b7e1033b36ac5b202bea8a830150fd0925ba509e17831de5573db26cb4ffca4464066073b41ee3e0150b94a90c4954334cb5e181558294961434e9b78f3c0249e3386d6ea4bc6ac4142264220b897dab1d615f6f18950bc5c2f0268a4aa935bf9fc96190f2af3a75bfa08983653d20129f3ea123301c16f4dfbf1619e93e0bdfc70c7302796a3471266ebcc25b08c9eb7e99560ed179a276391e0498677215252363e6e5df1f99d3f2bd011c6c332c010a54a88fdfe32a4dc73dca00c87d96499fe55d9cbbb997146303462f48c8fa803e7674608f36e38fd146a5b3b250ea8973e544181a18d12ad1c540f27c72c48c0357feaaea7f1084bb669109b2d7a3b14e7369f143a3c2863cdcc0afb5ef2c71beca4a50b79552dfc6cf3be2d0d45fcd332cfbe81653e69da6d2f89634644d96a44b31e009f74e02d8978e91f9bb0a7b183a167d607e0476b2d1734334cd3b440b5e2f9875ea3621e6a76c2b926012072377c027fa7dd57428635e0f4c708010a15e1e9980da1f141b180fc896baa034bdd34a005a20355073870a0b3e9e1dd5338aa27025b4460414bf5972b8d99b995e73cc4827588f2e0c884fd38a243fd22ee0f5c56b263ee4acc6818e193a997a19d6dab0fdc84f04929351eafe02943d4c84dd64bc2868c7e19cc980976b4ca6526033853119b92a91ea141931b227b532199d19cb56d9a629d780010826c358fb0a2b974c2fb5ffb85261adfe1580eef178b850a29150019504273078634fdc3bf8117a4867639d84d34d524db60ba419d9bc406306f2a712656bd64458ac349d3890eef07303f273f309be11c27a2c93ac2ee51256585ac63eeb4d9517fab9918dfd2c690d27813249c12ec914586057a8df5cadb40e100259960a4064cfa844a1f2b1f075cef85308a0439ed680688464842bbe71f2561f2cf36edf7c279852205666c9782545055450b0471172bd6795ee1b447f37011f2b60506743e533df3b9cec9aad2e1b81ee89bcb815f1a9ceb955003559610e86fb7915321cfa7bec975d5c48a07a42119dc6042ea73263ade5617dbf3b88f8bbb536f4124ebb98aace766d12f66b5a40bfa6a5590f0b223c8e229404682313731e40c1d09e1b8f3d95ae190680d591077bd9004ccdf0a2b7ae7a9a61e1cce37ceb6bd0b1d6b9f4552822f249107e29df26492fb6938d3c94a4cda6a543eac138e05b89adac66be15748f4315e570e1df80311d574ac628b32ee87f39f31d9a4a7fc92492e38f201dbff072f95f81632129cd08366a47c8e1b20838dfe82a565c8868aed0a6ab3a70749b0f489649d6c6a6e7f494ba6b6686f6fca82a5e0117786da85c741e284a48c2991c57d637cf69b8cd0cbcd3778478cd9b7b07ad5265a38694432482eaf962516516d10aaf088016a9fb8edeb671c09574409d071951c844083cc127601d8069c6ebe8740d8d778fc76ac1b3dcb1f3994627f9d4c0fa20ad223a265d6637ef822e2acfa60fa1b84e5a5bc8c4130d41175c2d8a33083afab62f6bd0c06dd2bb4c6879f6a5d6676196cd1724f83d69e8df7f44ce4e95745f92454140554746b7a5f4bfd812dac211ca2f252b33f83889a5ea8b28b709b83b602b15d40dde01cbdd7da6a44920993e29f91d2826b2b3206785dc3f6f67c9e3d72fdba9a10bc143cc4d0d70abad5a4c3178876e17f8007590f821d68973c1a7e79cc6ded77806a47bf1468a097dbbb4123488bfa4ff066831b8d999a41e9a661500b4588b79bae5e90b97839d2158863c5f308cff6a83c1cfc3326c880fe98dec68fd5ee48eba4c5b5b1f2015fc2f08992f0106ab06ee8580b5d6c7897b55f07185dabcebff9b2365667973b70ab0e4ef2b0009e1defa78decca39be1f0579692e1df7b9f731510c9969d02aac610f178740e7eba302785c8ebaab62573e1c7fc25a360b2fafc0a6b57435809ae30bf5b29119c0a529dc0b280aa259f64095380246dd97b18bcc6dd5c4f19c02ddce3001dd344cf577d48cbd1424ee7dc089a3bdf6b326455e2dc38ba73d669b364b5845c38ba7fded13064bdc45c119a7fded732c8aa84dc315a77d6d7b2c99a4b2862beb42f1cc624824b9a53a87f26bc7df33e5b1160619201860e00cf08a5df4579c279e9ec3c07010f3eb17aa1e30864b0fe1beec384e64db0265821d0ff95577c3e7e0d64b9df228ea765790f171e8143ac8efd3da6702bbb05725d16b0543f8466cc59ed5647316315dc1fc73766ccfef09496080371ec9cbeb75e433fb552f6261edcf731df608713c89bc032df3243f62e0a47fadee91e5379662881582309b6f7780a102bfc4126ef058403c594a4a2e3568b976de7aa80297dbff9de38e404d10b386fd67d8347a37bce3188edee21109b3a84891639c5bc66d984dcdaef028b03837a5990f65f68c58670f058f4ea5771431ced58cac96311b5d5969680feafdcaf924c7084fb5a3b63039cc3385abb62bfec3a91f8fedde73228ee399f6b9295533e0332f10bea5ccf969c38e1fd74744cb9c4151a1f548c2ab2c2b75615dae928c4dc41aecb974c9b6000b669cf4674101790101729520cbbb961439ffec4cb518ad11405ddbbc5e7e5f74d8501fd41d81fc3d8a0b56bedf9494b80e90f92a9ee769320efe09dca0ad21f20c250f8bab1c27a19806cc2a0e30a6c336327a36454e74384294516b21337331123e0283958cd60b915f6e4b5d56cbeca3f165db05e6c098b4965eeb213efc1401f3e8958e946c0616fd48a010bca7713178a7ecee00ad14a874d53901c57c24462a0a20b09c481f319d14f045856f37f5bd2ccc6809a9484b4b03862bf40bcc00d7c3a8ad971de3b9f23cecdb891958297d0105f49bc23dd0e91cefc9c464925aa41a8c195debba4b4c1efa68d184ef28a6f27e3f96a25386d2c2057819bf71a17b1709ab11e5cd3d6789c0c86a03d611e577481efd7f9c777fc77726a8e4866768f5742638b8058434c7be31385a6249e729ce85baec8bf9ee2a2730648db6c84cc53d43b9988da93b23b3857ef12bbddbb27bc3bbd35b71e09a2d9a9081ba0d42b199020725b09ec35b0905ab8a0a982f656c2d65b4a814493ee6c19f34dd38a03ec0021a61fa0ea86c1068868c80cec4889836675d2467b39c41aaa436080596cd8263fb7735cc519cc135795155692f7b67c093aaf045dabc40f02c644f24907024611d3d5a0383f8731fe4dce40fb30c6fc8856b838c508f256cb5706de7afd8bb5267e67d8f2b60f41fa652d13dd356ce9ed7bb7497f474b42b93e7d48071c3d182bce81f45c19f619ad4358288a0d705a3dc122f9247967a817c0d73dd4f7e734a1ba055ac365fd843eac66ab8b168845904d8f417925844dc0cb416965828df841671bc66be8a44f8ddeec0c8cf35df36b5a7417460c0c11f7fa700333261dab2bdaa23a69ec3aa823a8122d82107a9b4a3c801895997f079b051e098cabaea0b792e3cd9df5187ea2e1f8355b17d3fa872ebf83a919e4a5fefdd3d4dc65eb46c3a5cfd6327e45a07eef17a1c395ca0e91a76c52545209bd27c160db8d304d38f7434cffaae8406057421250b4fe6cf67b0263ab548d57d1ac75411f6d06f4c36ca62e3c194e64cb8caba3e43b0403403fb2eed25a8bf98f2cd2adc55e433f1cabae5f28393f9b48926dbe92c62b838df6cf268a4460c90478c3ba5bad35192042edc056a4a53c4acb0b3206ee36f50d7ec7029734ce87806342a622c534e5329a0b7eb1f4d703e2a8df9f8f1a22612ab4f924be7fe4388404a083e2429feba6752b025d490f5c567888c520ab9a15847f260d878b5d0521492da5b583e845ac5e7e73b3281040f7b5a26ba38a33516344fb482acb93436c2414758cb148f026f24477c72720bc33682a456f0247da050a5734829e96ab83fdadb085b151fd82636fc11478b1cc1adf47b06eebb72b21d4ca44438ff35bec63d04bb8e7f78efb2a97abd6c50ad410fdae7d55ba23e3dc2f052bce11d5adbd21e2e216e2035f7e91f55a205dc71775f21239742f331593b3e8a94ee2bf6106e8693c704d7b63719819d4f621a6c7143767fcfbfef8466fdd1764fdc33b30f1bed29d281188603db9960a8693bcca8ca36772d8717a0e79c461f78837fe9d8f7348db3a784cbb915eb56ef0e4c8db10202ce49ff8ceb62e5628e1a645d86d0f14a85e4090aacf01c34eb41809b8147193021a182789bb272fab02d28a1e79d75b29df7f3e4d69a03d49b0eeffcc92544701b3375b6187eacc2b5dbfbb06e76e38693bf162b82ba7ca1d722d57648b73dab36c176fa4f176eaf5a4dd7ecea5fe30c08b189815c6e94669999d708556233033756d8f1867d579a12e347cb8fef58c9c514432776823415d31422555738a0e8341e141c99d600d7287f46e8dd6062809d2e6c7b8774843b92015cf8acd29084abd686c249ea5459d09c0ed238c04e58ecc4b6c7b7085208f45fb26f5b1ecaace9c34328defec703c4808808e003d6c0865857f1a1f3481aa3fe3ee2dcb20523bac3ebcf4d0de6a9caebafe506b7f299b1305ac0e9f96998d814c4d1d1d2b52ae8d5d848a922da1bb97bb8812954da07f076f91e031013d5ab634e8ff228f5a992d89d9534711ae3ce24b9dc734ec7d48633e80f3a85cf8bcb9629af263833f6a8ae05b72af015ad063f63157818d8dc2b48692987452f7037f5319fe86c5dc1b26e42a334fe56d286adc89059e2d6b9be48ec9d6dc7288d396bd267f7770abb5be0ff734184ac5945b39a737c2f2bdb935839380224c15d19cd358737b30dd6aed65f6fd8f5c4785993caa0055f7d4ce3a51d9df477637bd67969053298fb9e2365f562af11066043d4470369463909ad6b290fb1a725bc9579067ddfcd188cb86dabc9d86882e0dd3e5b8e112220730aeca3da047a1cb0ca5fe1ab6e3dea0a682713ea374e8f036eb6e61ef00f1ab0d457bc25daff57cfe1616c3ff39df08ab784574a044d9a92bb4e75b7eca9b316a8164a140cc296cffefa455541c3199970865cd34950a18d348d6ca6575625b9828a91026b77ed5b762c5cc76cc2bee91d8e0e96528649258370e9eae60ff9e81509c61a63193099bcf717025b40a64781e69ac384dd00d53db06dcf1937d1d21b610d545f6899bbe635497151aa58a2b4409d26b08746eb7107defbcc1816ecacfe67672f4cc715414f1b3b92347e1bc8a81211f10a87bccaecf88c15edee4c96f1430170459d0d40727becbabadf19b50971673092753ae6bef0c7398b0beba046d4650f79a8e185d0e891df4292c7923b21b7b129dcdf2d3deab3d347c41b72b6a6734bd5cd65a759c4c7220bfcd56da0da0c19eaea17ce3ca8b2be300ff2304d6ff3491bd968967a03ac6ee3a00b7b323ddc845a51758fb2fc5d4423929c692974a697292449473a2bb6cf54e54e7b06b08c93cfef9bd5456df53213551fdb27bcbcecdf39010ced95eb9b94c8f9931823cc52ad318169fabc421a5eea33f54d319205d37e7782e54d4cd87affdc1495e69a6972c0ad5ea496e4f544f3f4ba9b1a928594230b3a7261990953f56d3004ede33563baeef9c737a58cc5e002a179ac7f4667dbf06d46e638850b1ebbb785cd260ec7b8e5f5abf0d30c2114eb80ac260b8a1bcfcf9c17a7b0990067ba3b867b2e6671bbc74a0b0d26fde09fe36596024247406e9af2f0f91d07f423bf5cd424306c7f3804f6cfa2620bec1efac49de38c93ab8d6fc54d02447e3bc45e5cd9ad5a116271272c10337e7dae9e4fb2bb5a6b99fd87adf3f7f437d4e20dc0094a80d98e30705247df6f41ca6349054219a9f5b53d025175956c36c5c15b4746a5b417ff2c11ab49015fcde18676074b7f3e262a1fdf6e43aa10f917afc9e0dccb1bf7e707d42e95e2af6bdd6a07de57c1eba5aec36e0440c553d1ef03905901c0208553c5b7a3ab666fed918559b82449852ae2bcbc856abbb94d3076cb59bacce2a075d903135f6c863c5d0df1c08d92de5e45a6d797b4afbc0480def944c7cf49d2098e276cb1bb84d29cea20d3f2fd766c1f5609cf6d0639b1c91268cfa6804e76e32d0af3ed3a71de2b0bcee4282d64eac5c8369fec71d8f8d1e183ae0e5ccefdb7f80843ce85284f8934d26aaefabb2f5e87f7bd23b0465e65c390b3966c261abdf9af332d8d5b050ff0cad66b572e2cfcde4e90572d8a4abeaf70b3ba30221d53a29c044f981fcf06fef7219229e2430067a19812a8ed3801541aa4d20d23dff5d192216f15fe3279571b568abba97681cae8cf248e54c1b205bcd8c6beab7b99e3ca5cf5854c1a2089e5f015f9fee771d796f91cf4244997f6e9100f4d2230e3b5d9d76ecbef6e0ecb8ade5bfa0583fe143515905881fff3a3dd6063e200259347404bb6421affafa6e71248b2014e29cbdbc65365a17370a49f8f74727be19922277d085c3a3b4f007433abcb38770b87ff77c0f220e9b10d57c11d65d9d47d0418e6adacc05764d181705ccc810120f84fa4973f53c598eb7e1e7e2bda9be0b69d4d6b6ad3d67166da0c6554d3493b80b6f9af9fac4ec833331aad6e0b9ca0f4736fa73aaab133865a028047ed950aec2d413848bd55a9e3b43e165119ad29101200d014924bdca1a915c2ef7c31301780a9192c2a1139c1027d261b5abbabfad54c22823a18bc5a24ab577b3583189226671d6157dc29d25f91b6f04ddc8fe93a3853a9721da17f98a578f4b650898c47b451cb6a91d1504c5d7502db3e1180aa4ce9a8541a5d36a949a3dbd76ef180d9eadf756b81496b9cc69502d5de0c1c397557317f07d1829938d5f5aae9a0feba2471f1e0b41250e409fb5cd15c53cc1e7968d18f75618be3ff2de8c86824d36220e9f259ce0191db8cd76804885147dc06a7da99a78a756fd04268f7f2e5653f8a58dd1122fd4c314d2befedca15a1e5af8f46cf0f5f728a1b0a5b6c187e1d74edbbb0d8fd28b09b1aae5c5c2d101ca435ca39c392f8e9e65521ba37215945b9700459a55d55c574d19d71dfaabb59073697ed77dc3eff32dcb5df2eac5c06aa2b67151d8501da482cd1b0e1e465e8a72d453468ab4d7ea88580fb8763afb805a9801a680eb8939527c0992b4945a151d7c4b8618ddeb290a1cee3cd81210a688f1c46d1ccbe74fa8381138fae65331c4706ae49527f95e8a98b90b29349cffbc47b84e6c65e7ec1b28ab186b49331f6bdb7a1e404148977f07a5791fda942f3e43f0abc42059bb0e58cbb44c92540f9b796f4c72d6870a7701d5572da1cb290614d520e85b4a06a55c7577359183693cf9074e0fe9249c6dbfba39ee517774f8be997f1375b238e0367342b595d07433a9278f10e1a780f4e9e214fd0e07603e4dbe147f1c841dfa4d81d5427f6d3c16d5e8fb19fa38d6eb6bb8819886a8b752a7b68d3c951a16b6193ef55411dc369bc10b31c1371f43fadcfd315c0fb7f0c96d18a3b5a0b93154954a1a36de31d34137bbc48dfbf6e9c565c6eca88a0c274ebb6707ffc6963b885ef6a2d7702088a440178c4b5541f92bf5dda5d3acba75a0140fd3a765d3172f159985b57bb09bc1687e03ac02653cf152fc30ca18248d32c36e8bc0bcad0c069178df309bf415f016812d70596e30a34287d1f134d12cd64b5248465443ec1e2e36067a1221eaf5b8f63cb252cfc8bc27082c9d37c98e464590b24d8bb41e1f329ce7ad10efe2cb035ea4ba77991453f9b7b49493f67df5359201e5c54f5904b03aa334f60a9c1856c9e62cb72b1f1b91d93f35c8a3cb1fa2782446047c7e626068c4146e0ac02ddcd8adad43c95a138ad184851d10aa419b712ef67ccee478a6ac320c98a94a72351b89e65b1f8d9906c678205445b5eceabfa347e04e749ced433b413e59dfcfcbd59133cc3d7794a1881224106ea5a33c580800177a3c3df66835720bd098b95aacd64d284647be02079049cdbb7d387d6898acf6104c185ee10231297aeba00eaffa52ab2d6a59160c9f9be076e35a6ca29f8dc65969e0bad5b423c2fd73c06a49b73519cf5222e3f673e9075608b2639a2de9750dfc6693b17f6a59515d45b5bc4ee40de38843d99dfc0f32109e042fe0e24fd630c472c6632fe332761a1a4bc0de6deca9dd8cedf4c0caf69d958886b16a725d3122492d99303b1e986c078660e66aacf4b2ec202d4aaa22c98af9babf64c728ae1080f676d2e5e4a2f2d173db7145cac31381da05f17f4a4c4a9a1242664477c0514f24cd24c02f06ada9e1c3536960899f0a192a627de1eacb9759a1116ad6ff572c2814e33fbee04c020a2784715e6db9d70a3534f2a369fcff610762392e787c3feb6c423b042be20c65c7178ff284285745927b79ccb654aeb12e55e6bf57fbde4b0c25656ec7850a482bac79b2ec1fcaa39edcda25343d216073a761ccf5f3c70813627ab3eb43d918d50bca626118bd1be56957c2be9a6cb813f29c38a3cd92d9866e500b9c551b01caaf6c207336dd274934344e0ab255693ffbb84bf663f91a99ef93d18dc3ed1726e998009c69cbd6c7eb0b57ef3a196c778eb27618d6eda44b18d1a232f0645ce594931c99573aa1c89773228299f4f24f18d38dc81d52c411ec747de7d3e5cbf66e90e8d5786386e0db0cc8154561488475532e3cd488faa892bdf34d42557727924d6f518054906d7d70354a374876389ca3c8126644aff1892f7eb7d09188ed26de5b7b46d3c9f12b583ed1372ab797198aa8eefa1867f65115f40433af8c7468e52d8409cc0060a957501964b402109f5fcf3136d53499f135f051a7f0ec680da0762680bd0658a7b43fd30e61f881e04e3578041688e010a7cd7400c0c95939146b8aa81f8f96f3881b0896ec4bb8e148ab6de245df1e37af156de9c5aacd85a6b51490cc4886f90184b03f304ca323acdb7b054f81707b04c92aa05749b79813a5f1530388cfdac5794227a0dcbeb602492d9032f734143551ab04b2a9155928babb891084de5121b144818b350a2a36d11088fd7d7c0cfaf497b2dfb9c24d70c5caa1145c8ded7c4d40598249c1b3a8acc78212eb47f7d9cf7f869ed030921e7452674313f6a56603286d102c0fea8cd9cf77541a8173599ad0e9172e9521942aec678af80c8e9791d046535960a947a1e484162be717ff021f2cc1d14439651a24ad2f24f73b885436508a007e0d568e14cde40703fe595f90c47f334e148d82b80348d44f6975cd415506b4ec011a78b14d5c40ce514a1b69e56acfbeb65ed934fba59b440113890842f49440304c237a3d1c634772ac96a537a3471bdbf406284309951dcf1c53fb8ed877803d064adadb3b409de266002cb3295822112634528b7008e8db6e4d526865777777ef5705fa044704978990dbc4876db3c316e5b861beeb44c98c0a46898b1ae74d929a21331685321726a6a2d162ab4cb164967459ba2f8440f161a3ecc092e3cef0bd5e336a98978b4bc39b2b44cd153356479915c418d1627b9872a52cb964962c1042ac0c3e2e8e1d974a8e223ecba669864da1c9c575bdb932a8a945989152e6ba10636568a9c1f194aa03c74b7860b22c10b14c8034c97073b8ae0d1cf64dab7b3f2fc40c35fc76f8c2e145f77ee68f4c32d5e7d3397f7ea8ec605302a698a46860a6218a2e12132c92221815490a4414c90b44130910222447903829ca3a7c21e9921d8931632f19c9d04b2cc4c8ae0e7e7bf03b832fb529b24333ea778302c1e373a61892a90951c784c504adc3d70d4ffa044d2dce9662f46d47ce6869ce49eb104d86bbadb5f662a78c49b673ce39d34b524b30be23b7d486a58772581fdfabd56ac57992e5b1939922c7d4a46fd267473bb1b3b373c105da09da317d61c2a2b3ac692f5127bc8e9821c2428fb0796c2174436b0d416cece03b9c57203e2a10a5126a16ec46d71f687ebcb845dc31dc46ed61a85f249e75f8ba814d5775f8baa187eefd0fd209387c1dd9d22f15a6eee1211dbe8ef0781d69e18851861d0af491a8236632a0b50e5f4c524c4368136c510a2f0079213982c4894b5f38378d939e9eba85d29eda65db2a178cb74d48191ad8c4f0f4f110c9d02541d674d1af1d6b7894f0e221830454720c42e09e4f4f97db8bc851ec91401efe305d3e4fa2238f07f66baf752c38e0f180f6aad75ef5b0a76f50fa400a640e05ec3e9001d20732206a3dd91dd1d313e9cfccc1be49a0e8a9d85a7827642bc47262d150ea58829ad0a9138fd3fbe9b027f6c75282f00ac2244e5c86c79f93888692045cee9a7f05897a7a058981012d017491d0498b2f71e2a4a901a64b9638452e402969be0d5a2df86600f94898894e5cc8e13b402b09cb97c3d774011106337c0838fa2010d492402b3211f219c0b6a8c4d1b1c3c702d8038e6f44eb4c8b143e1ebe6f417d5ccb480be29223d08d960d9660c05185060a683e2c546cb87a5a1668edd8014a479714e0380111b383b7e248f996be0bb45edfd0c76a4df930d08a4264aee5a98822578ec068ba12a48696abb6c07c0068f9b026478c6205881ebe305fd4475b302f6aee8b196374a0c10508a3272b4a64b850c3f7e4d3a055e3bb52e5f369692d182512a1e4e3f24da0f5e48b7d73a755c407a4d33a01062f5022d2d307ad347d5beb06269f980f56e18965720b42c53e1746aea886cd9780561025710534f00748442b4e552844d40106c1e633e173f215a005e653402bcc9b4f95410bce07a3a018d8e6c670f3e32bd1e2624374fa20992803ea30e5b3c013dc5224c298fa220a3dbe29374ed8c1e3fb7e7624c1e1ab40005a43c6c42f54a0191648f8983edcbaf111a0a5e51bc0103f7c6bbe9c56035ab08c8f0fc9d7420480f8742b886fcbd764b7a20a4a4d5ef8e098183e1a86804b8af81cd022b2c317b3c02cab16ec12afd068e3838db225c3b780160fb304473e24a5d9f14ae251a5c80c574cf3024a7d8922f851a40c9a3466a468500172a6e58a2550e095b5209637369ca4be335f4453f441a9f7defb3bf7de7be9941b27e47ca5f4fb5c02c1179016379474ad07b4200c4ec5626b96ef67ab577ae69c73ce08b9f4e0e26393f9e9a957762080830a0f3b12c617b404caa2731d6e0260e1dce1c79757fb40eb8045a77dca37212508f2e757ed830f74fa93f3bace7521fab2be7cab9d90f346a552a964c021d507e09b8c31caae87c709680370482db5db08ef78dd0037e7e4d2678305f81c71cf47023e90ee67a33b88bd002c4d000ee90046441efee24784f11c4019810a680c04c0e1434be5d6219490f3b60853facc6ddb38eeb649a208af0131fb8c144bc4cfb252bd527eccdede0b42fd95ecabbf9c9793ba4d01844530fd3e8414d8d76908c23278b3fb5ef63aded5cd4c9002187473de934e35f7b81d8f169d66908c5e60908cf17e46ba52e6a4a49e710dc55dfdea66aca9a4aa3b2ad979f3c9fd0e4012f7ee4ca5fbf46fa7a3dada892c3f759d73ce5caddaaacb3d3252d58fb9ec73386fb3b859e585419283600512a8beec22c4a619349ba67c0283e693f89c93ef45a92e3b4d5b69efed9e7b601087baa702c2202cfbd8c998b34cc5d6b44ebb5deea171d9dba7d93483f89c4f6050002009fa34c2ba749a41d16992a02fefea55efc5a19ead3ed31eaf3a2fde00c02008b50ea83a2fd28f4ef777ee114bc4dc23cbb29cd4f3bdf7e61e51033b5971ebe14bdf7df5928b6b8b54df62644bd116285df7ec65d17cfdda431c638cf531278463ac3216d9af992cea34621c238e594f9f8fe17ec9ed87197d4c1f4b222c75649cd0fd113a554999bd248a25e467d228fba8ea843296ac114be08f19cd79cacae93c298b68975d2a3fb8347a30837e74a6f4299544116359d4bd15bf394d43b17ada6d69599ccaf85c671e4cdc792a362a5727ac6cea3abb5996a5d01d4ba22e7f47aebe8ab36f624a59c327da8f1f71ece0f4f805ddcf24317fbeac91ed747c934fb72e443ffbfbded62f0d21d3bad6f1dc9f19e4c12431bbbdf56cbf7eaca1c85e6877397b0f76e7a9d83aedb2462c91755eee648da8417d2b8b7a9435603960e01a1a5690c8c482c878c243a7cf8f3a05a2c30696231a638c31c61863a46f4232799cefc06d44dcd76a135f4e66e8133f421f28fbe4668792b5012b4ca40fec3b1dbeb0bce83ad34e64a0c4e7670d45a69a9d551383cb87401b4739379e1d840cf8a2358658d64927e7cd8f31c608bdd8d92775d239e99c4e66fa9c73ce39a70de2b483edc269872fa7581503a6afe1573a4f872fa7214e3d7068d8f39344fcdb867b3ffd470253fe0612884e6ffe6cf0726af5d9e1cbc946f77e627082d1bd1f3070aa97355ecc4c89593760943855229e6449420291129254d8617358e1b2e0ca8d7ae6ca50858a63691061e34c91282b864b967b14a5d5042b728700a97060d8a2e3167d56ca8a0c3b6585846ba54d8d2090ec706b1803c3161ba6985142e5062b4a96870c6b58b85028581caa1059a22a2e7c1451a7d2dc2b5296894c6dd3c5a8ca3581090e4bd6480d178619507059352adc18546a6ca162829d317597a801e2831d93c37de1cbad726506273d90d8d610db828fcbe3851f38ce4c29ba4a534eb0435a906488da821f6c0c65ae92305706161750ec100e503678f9f1e2719d72d8afa52425c68d22a585106c6a1a205ee830c413102dd6871418afabc5482c88bd11837581054b4617499419294429c13eb9b937a079c2430ca84b840b0d54ca2cf971c47ea1c1b200a98e692196c20950685436505edc296fee1721ec911e96c0ae0b5e88b0628534c1c2f48390a81e55ca05eb3ab2e1898d9be4098c1b048e85b2a60633b54cac05989be4d4e4092c0918221647920a3b8856e0e2e4c615e364c88a8963b51071bf33954c940d23c6be90658828f748533553a40601e432c17003cf3a9ef0fc59a32633ac0e4d5cdc2c6fea17354b6676286382189b468b7532e5de58b2c2920f422e0d1fd5cd8e38392c91cfe66032e30d131735ea4dcd41cd8561064d19eb8318bb434b0b5396dc1296ea9310383e9c769c90c3fef0591296ccb82d96b8a83bbcb141a8b92c98b93bca14116391b4344da93c2cb12d580afc592cfe946bbfa2d9df57697b4b1fddb3ceb39f9f473ebe9dad6e5c95d7e73bc639ff0de39c6f18e77c4749a29744de1c9244bfadb4adadf888aefdeaf14a7b55277736de776cc7766cc756f18a01d3d413d0e10b8c51d7ab6c3921ef3efd117aac5a57ebaafe4abbd8a33df6fc26a486cae67e6db5d2562bed4d4c2dfb159c907ed8a3b6ea346d6bbfb35527c2ec2b6f842e1443d0afbd09b9daaf75bbd3b8b6d2b44ea5f24ec0c2e37d0665f2e76b0e775e2fba364c296152ca28b794dc36c9925376acadd2f6de5b4a2959acbf40b23695d4f68e6d3ce330a79df1f59cf9cebff7de873d725a07f4c678f3083dc2f8f9be8979e7534ea83eecf1de4befd31941a0f9b3f572d41fd0e1ab4b941758f6e2e40529e7fcf9578f41c62083219ef6544321743b2f665bbbdaef7befd5b4577150fa689db759af62751eef563a2a6d5f9d751e8b8d63364afdeea80ddb513b6a85b9aac50c8a479fbbd8e24afef2dedc795c074a12b9f3767ac624f20ea7c36d2ab6cebbee72abdc799248b56d9268db766cc7766cc794b8bc5134ecf1a6ab4b38df462a37a65848841574fc7376ac61dd5b213dda57482fcee712d521d0e18b8b52972d5d92ba9e1afe2e3a3a023a7c7159d3b5dc76efdccf865efb3c24f1abba216fefbd72e5bcc4f839e7edce1fca20d66f0e9360759e4a753b9dbdd6a9d8fd6a141e0b217de967f9e3c49d50fcfb56f73abbe9e2f292a08a4c445daf464820edaf8410c2232109372cdaf289a6a56f3dcb792ab66ea1eab59652caa30db5942be45ed915743fef3cd979184bfcfb3dbaa9ed78f6e7cf28b5526a1a0ad5d36eab5674a53f5b451939e7c8d435de9b831d63bc6517655c1d6540bc37cd97f513b3ba9f2464c4121abeefed7ea3f6dadcaff1be5bcf1c89ba0d22939a91de51b5850e09345d12e8f2309ad288de30cd09693867adb5ce764c6a99e91dd5a356bb5af9a6556c1b8ddeb02da5a3ee5bbda3e2c7ce8b51b362bcd249568e77ac6a6dc376d48eeaf7e7cce4e5546a92a05fdf7a3a1ecd2495a23954aa53d5a6529a51d75a6bbd631b36b17c16c7b949a34b67493487b2a6f14d6fdb8ef190445a4a29a5de22afaff9b66351839cafd174729a41d8494ba27ef57b2ab6ab336da9a66d3bb663f7de1dd5e38ed5160000b3779186291545230994fd6ae84e9973ce4258e72ce5cdfb76aace931ad6ef3cfa3c55e563fd4386fa2996889fe9a82b354c43c9ced3b06efffed4bdac2c598f3b0d25f39cfa29670dd3b09eedaf3be33f8f661075cd7a5f27675f9df9d43573a8abcfd7501a2adb455d4b2ca5ccaf9f34cd65f7591ce7269545790acb1753386ba8782481e6d4d454ee34541c92457b1e79f3284ff5f8daabac56a7baa7024ef5fc2ab935542c113514d5b07e73ce5943450d2c55da2173637e829293260856a8400265cf730832377ffee5ac53f41261936b9da5949c90065752decff9ca55279457df2396907f71cc71af76e7c97e6bad55aaa8f5d62a3ba1fcdad7ccd6bfb6fecd4656f26326a6e4702c91659ff9ab86227bdcf1a48859769d02cbae5f87a0b3da698f5a8ad8a245c74d3d4b811f771406f7f0be1c84190f0d257ec2efc3fe3d3ed25a6933ff7b317e3db51338c79efb82dcfafc4d3b4e8419e7679290ef83ffcca07f1c84cbaf1dec389cb3098eae2fed7b527642d262250e218467b66ddbb6a6268d2b842bfa7c9c84cb1e3756b1bbec324a1f28a412485ea8a10e5f63da740574f81a33a5e3fdd4bfc3171453d773d78753ab1fdc0bc28d1327ce54975bc289f761d55258bce989979c0c5937c2e6e0077eab7580535889dfe2d87952c82e1f0523f058c01e82b55fbff6da41977602cf6d6246a8e956b131775f73573fa4da09ad3b1efdd9531efd90476f5d9fdb0f95d4591dbea08eba9e1b27e13e12a85b1fe12da0cb13f9a38a7268d3a376225aad038c84536d37d450e80e73deee18630ef0c6633f7b1ac2ee78ecd78e1f7ec67994c7761fd48e3b082db431708867783d45bd9e64c849392923e51e4a34dfc9a2e630ec396317380edaf2334e7ed434dc91c3f425fecaf70b7cfe7dfa3d9bcbb60bdcfb4ebf4702e5a7ef2381f0533146c4286d400373536d03b0d7c7f5df462855754aa9ba1fe6871e26aad3dff4379a8112c76f515c19a56ac67e1f7674ffc0f49cf3e78ab288de8a6eedf5846aa762773acfe82d6f0b0ee8ecf0294f7e2198dfee15175f8e45b71ce5f0eeb4e326170d95a9de8454bdd62a0e05ecaafd50e300a5afea549da75f7e55a954aab7aab7af82cfa29b9051e3368b9e39155bc7dd8e4dc8b1d051bac94918e3d873c7e301ecf951c09e3b9eec85ba0c94384f8afcf473473fab9cb76fc6d987da65d12fa762776b6374033b8d54d26e47d51d5525d1e7bf99c3dcedbc5d3b6fa7d34e6fd88e4da63e87489ff93807973b96c3e5e281d424c53524816097c1101e87a00a344cc3a4cfbd1ae6a274ba7aa57148eba8c14bced34f9dbea59342450da8d151af2f8ba44fedfa5c416762d658ed0a5cbe1787e21091042ac2d4656fc08c8e74f4d86dfc35318142c569cb170a5568e24dbc89a3d0c4040a15a72d5f24909e5fe697f99484a48494a48463671bf13d3e4a5fae3b0a07c70ff9ce36e27b7c50e071671bf13d3e3bb6a5b81312dff9a42626bc69c764136f9a7fbf297badb3a66d4444caf0b663302243b1a8c72ec66e87f3d67e1dffc66e733885a83449ccc7986f1bec465c287fd551290e8946229304b24c221409a4a7916964325d1d3f6a1fd09e230fe9e3923e481149027911b7728df68f387ed8bdcd65c4e388eff15940fcddbdd83d0d1535883aaadbd74225baa2ab33e9f3476071f539803e3f2a4920f8d282d4e7c7214ab9a32154a5a92495665074cd1af36759ee94228fae19343b0fcfb79c869a4dfb864b67a3d1d1d952554747e7374c474767c3747474ae0effd58a6bd4d6fc392cdee5e88ce8fc731ee7b0e8b6633bb66314b3f0a2c71b1dbebca0d1af75395c7bceb5e73787131aa16bd16a57cbaf5dcc2308f8335bb7f61acfc9e13939fc4d4caefd8a135271564ece73bee2bfd27272de8464a954cf73567ca5d9f738bf39fc572baed1ca591eee2c7e73540193aea8848b9a6f6ac8680600021263170000280c0a87c4b2284962100ffc0114800c6eaa3e5c42925065712005610c04410c82300c83208001002180106390219e7b12285989061505bbf42a524ffccab071422f8041f131fb41f4ea57c88942580a91d21206dfd5957c35a087cace3608d4af53f631e923629cf242bd6b238b34e904b0ce5b7f01a4f816031b351aac327b018fb6e08de38248ec3c0406579836e40df680f21543d19647dca8c1709e25d2d2e42223b002236802cc048eda398371100d96029b142d45552b44d513346ca8e30419721618688100e2d54cb00e3b6e9282cb298871f17224332117c69fe52952fe1b85d9ae43eec032b537537551b0903e41287062332d29d0ba2baa6a3f39c895732053a45a408ca04cc99e106cb320380fbfd17a8a7e306cc4011c7c5f4b210706b847d4b28608e61da6c0932a7ba02383eee82cb8e40bd71dfbc57ece02b6a45a909dae7f4883497ee5eb436ba06fb5b546009cb726e1bc5c89fe6c80ff25cbf94fa41454aac0bafa08a913962517afb442b59dd5ed0a99e8017a90a43f83e9afc886b1d7f8079f3dc592d22052a7ddf6c40140b0d42ba52b1c9e048e8362aa53c99aa07f45516c06e0cf9f2690b77dfd36d63d03e3cca2a721bec8c19d0228ec2e10829aac8b1405831afa231d419579ef4ac00bfdade159f3d0efd14ceba8280e363e965f39efcd065099f47954557afb14a5faae48d512b934522c405f60f06b4b096254871cbb5cac52baba6fd501e45310654c026fb9cd18109988ced438ad539f0b139629e627598d09441e38c8402e25cd909a437ff7bed280415d27d9089243b2b78c7ba06b7c7efe132664d2e6c06e8f985a165f394c045ac2ae8f0eecae05e13e2cdb3e988e7323852480c5fed4e68995bd69addd322b905fd46cc52afea52bcff65077c51bb8b7689cbd01447aaa1acfb753eb39a73dfee37e857684450a649bc244ccd84972ad63c19a925126af4d9415062ef4b201a9a97dd2e82f60eb319f71ee148742c8de8804d711ab217dc0ae1e09b79a24e16700be7e6457fb8ea0b59b5cdcba1ca07fcaae7c3580b324954192cbf63918b35a1be051838fc7a5cf816d2d3919383f194f3a70b437ffc2b827dc5ed0f73fce51379a3478b9be8cec21b7db064eec248d1d3c6ebec5b3454c7e8a638f52192201e70624712214a8517ab964c97c2925b8d20d8bb29a1af5e9b72a2398de0511876c6d453dad12a739f4878845cb17298466153c89b67e19e1f0031141a0c5a614fc19a72e4982b104f9dd0ca88750acec475a4ed8ae5f173d9852e2e6cc14114f76667149c0c1922f9b89b6f1690cbc52026c51fab2e231791abfe1364a9d1703db4d3d54ee868a487911be2de2b639eae8d9d2509dc853244d8a36d8e991292349b101136531b5a890c115872e508b048e2c652afbdabd055e2bd9e38ffeb16f0faada31a32bb2461f7623713fd2d18e0890920fe4b86636bce3053b147047be03bfb3529d72c5e5802e0204454be567f52fcfd73f4b9d52d191af71f7e44b740c1da1147d5298ffb3fa9a4652106f7fc200a56f16315ec2e512fb65b0ca402976666e1a09e065c07957871234dc00a8b73131cb9ea2c6ea911be9da639322c56a1fef3dd319152a81513ba9d7db4a096c4c9f1b9582ee01ca68de7f274c49598b2592fed116b8f4b26f0403734cd12cb4c5de65e8f1ba23ccc247b2058693a204c72ed78c5ccb124d2edb8493b71576b3bc9b2e165ce5449996e816f38be4a2ce4cb41969253a7850498d55d2251106d16cf359e3489abbb41597c40144ebbd46d2377c070c7135ebe492ffb985c4162763c21dea30a5ab08c8158ec2a03acee282df7511fa57ab1b9d9ef4942eeddbd80d92ef0ed341ec4f4cd0e73801824ac66a78e8e4454a8f7a7bdb7f10edf0589c70342278a66e72a7824058147320c3347881e748d8f6752101d7cbfc429a17f18be2bcd9f6a68d63bd8e323d12846ce5975e32e1334ae166e5c469c20769f21fd061015a5c6a3eb40387fef6d6632248b3daa0572d09a8479f3900a037e93a976d5a594096cc59ba950c3742f9a3dbdc2f69a65a0920bf3701580df063a4b8f21084e1f33fd4ac43110a498884761d376b4e5c63046106a9acdb857c31dd66d8923f189cc33bdbd3004074818d10e7980f6056e9a8895fe77bcfb8280a49bdb9070ea3d9a3af6f5f9ec5371f8f06fccce7f871141478448b44a8e36cb3d6db05181dd8a2794891e1029c7029ad671fb96cd3d50e690a0a4a88dacc32fa3b62825e302f65cc46387877117d1c1e0e79983c5153b82ac19c7571d527ad327204b76ee2f86e769e4d505f58a390d26ec42ba120b25001084303ea5aed2ffff6ca1981fc87bb847a8d005c42bac986e49cd9b637c73097021b1013f16566162967fd72fd9e57ff1150229e2a43f64aac838ac38cc276a86ab93e876324de5543403f7d0a8f7c8f75d20206c00fab05063d9d7d39135b3e6b3e9056543e58a08ef323f4a3fe72fee3b31b4e3e2bc567eb5f2cb0b2b6183a1d85f632008dccc3246c61919826e10675b4341898dfc49f4ba208786ef151d01f82abd2777e70631310b439bc71bd88e08b562451f6c13ad40c0c30c818959500701185f5487c856bc90a1a18cbf306f67f60de17f186b07dbbb02ec6bccedf91219e0756470ac1f42be1ffc999362cfafb21ed2c2968809468d316c612baf839c2f7c3f96f305a2e92ceb9e17d75be738f641608099517e481214e1b78e6977743c8974b94efd6c2bbd1cfc333b891694a0de3006aae9c5aa2b72561b269f33c0513a4ccd3bfd188cf69ba98caf9e8af7ab279a76d8ada680a440eb0cfecef94e105918882f59205d1c8c805825ad4a9c30e13bcb5cccad949ebbb605232e39140cded5843c395edd6e1097019807e8079fc79f5c34f247719512da4a5b3571b58d6f18bd5c351214db35de8ea3b67df0fde39b2f4d231fb912cf13ec7337544453f8b591dffa5db801340cc3948f456f740fcb19ecda00a412502a18ed7cd7fe7b866d2a25514f9be2482ef115d2e21ce96b640f7e9a5aa3d3affc44a1418858f65431158c1ea9568b6385a4e220248a4908f0b3645d5e3be1eff1cca793bdc2c9750093dc66bbdb94215f9635b507bc40b7b2819e7e980d9ce23ea910ead898bc62caff1b1826bbfe2ce9c05ef3af21e170fbdc531a4ae23715b071156f55832c1573de93b8530763c5d6efca80b8605a84c2c646eee8896322edf47f634035016c2e2657c11cc623112f60cbca60433ee5b4fe73b69dd64c5b8ddc2d865b194d82b2e4b6699c488caf6592c89228e0254a97d5235da22c752f6f5ec2f82c1cfff3a61ec180611ee5eb6bbc848c185e43ee1d0e5f93adb9e8d22793a353e13396236f9b55d7a822aa4f35cc4fe3c8e60a61220a9bc42ba7335503661e28fba1cdcc5cea3410881196f7705b84d8f50fd09efa5024ba82bb415be9bb3227c09ce1962f3501fd15b9b36a244154443b59c4f1c27fea4cc7e089a5f126476d5610b78316be5c8dceff1db8a25c9f33e2608181d0afd407b0d987211a4c8e28da6cfcd5221b035d6d71f88bebb36891dd84885bc75124140356cac656dcd8caa024b84ff8165b8f9c6a2c3818121612cc690631739fee5c18e1b05611c2bac91ee1c9b7f5d3e6332169fd6e3f2c84589b08071db8944460142667b47fe71ba0df5dd3d7687d4af463978018e47c083f352981c2a86dfc3899f982e1f23180c343487a21296c82b06d19ae2fa4794a51b7a8f5dd33f042970f955d609e6135fd93568c2f229850b97446721a79b9d3e6be7bca61d3e12ca5f68af2e4ce716438f61d1d5ef55e02f030485e598ddc21d2847a11159c042c5783e8c0be10f9b0707f5044e7197c0140e167f2b2bb8939c6da83b570aed6c3f3a6e765b60a645f251dc9c4f6195339fb9e3999ec0e384cf6391e53883e5cc08d34ae58db4a0982ced47f77112119ceca8bd38e65042d17133ecff691cec16dcadfc16f9a0fde0dd015060f3b1fcf5273701ff70b1b2487a496dbfd8d0f245cfc909181bea33aa8de057d789db5f2e5b807aac8c39de7fa5d679bb1d628699cf460801b8b6669941812a2d40932c863166625e0b2193c5a0058786e8d4889a6998b03ec721bf088f6fbf775b8bebbe8b4c3cb7615e176f14c1c415e5c33a5b60db46937f9d18f6ec146c054cf801d19a4081a93fc12c40675273d1d5dcc9edbf003030ccebecf46ad296ec9ed5872b9f75540330ac76d06810c7d9075f16247dfb12a9f812b2ab6e80ef9deaa2886b0f6250f302280bb3d723b937e952ab9e39b196242178a6b85590ebc58b78946e1ec9ae1b8d28387c3e6fcebc7d82c5453e46f00992d3da5dfc3b2b60e5507058aadc114aee88f436404c83e447a1391fc62ece3e738f277a906ace77c2f3b8c0b1282349c0200121b4f91af0ad8711b42df1c8fe04c71ab8f1856e3a68b9a3f1f4f2f246157312698ca91eb3357cf3bd475dc736ef1ebadaea30c5cd21add8456b732ae701fa56ca1fc0e9833386df92c52ef2268aaf62f9e5fc7c80be053f478914bc4f0c4a564a549ac72783b785dd0c96a852fb7b337dff1af8ac34a30f42c4e29fb5333bb488c24a98bf7a375c06866cd722e7b319d83ac6996d7e64c00d7abb808d98069381ad45bbc940d5deb2cd38b243aff4f44e06a1337b2bcea17606df621e91afb89ad78882813f158a856dade3c3598b37839078cd4492fd97df384ca8578e7901147ec689a552d7bea48d4fc0384825a2f1861224ca6ac400928a985d69e88e1039977920d81cdc332134d4a23a779b15c26b2e1524f8968353a2857e4240805d5990f042c0ac72828e1a62649b3effe9ae27487f18b48964d21050514d7dc4945646fa3d075f64901719752252f70acc0582d447288922c6f1796773c4746814de5999dcd63e3ec73b9f7e0091b4bf4e620a4f801bbf89c22d38a9f3927852d7850e4da72ddee2d453cf940f1032471a3bb34701fb9e343e1d996d1dbb7d27c7f51f26c4e96c7bc89b7ad0c85cf4a37e45cfa3ba7c10306364251f68eae030bffa6f5894d689b18570ff5ab356433e14b7a46fb573349eadccef529bb6c67f55e58c1762ed6c847d64b5d91124a74d0bd79a71c9f14161576cf6e8c0a8aff3bfc5bb80859d0c14a13a3289478f9f9151634025ee203c77da520574aa464fdb01de988b0fa2b5c54dbf78fe59cade7d20e649aacb0872c28e986c8437362d2467b7b6ddadad81e31651044962f96957d73496fc7f9ae35f6097496263e68589fef7aef3b796a7fad04909e621c2070c7f80f581425f54cb3edeeb1e0bc961d84abb4dac1f08b3bc099fff8de6ebfcb898699f891d5abc5cc4b8cf7c7634ebe5aa30b44793c123e30c614f037b082e747a6e9075ba7aab71e0edbafaca6d25a2a547597b12ddb0c1c07fd409e83fab13a9205e844b0e87012ca7638c8ec6ecd76796bf692ec240c2ce19b6f3bf5a0c69506fcdae18aac1a76c8b669ca3abc43785cb4259e83c8bdb52303a29380a708a14dd391666515f32b65c68f23a91c9fc51067ff88ba8b155d9b72d108c03023b7dbb6a17e558f03d0e328766e0263896953f0e825e356f4c58f0d1e912cb6f2f7022bf535543a9ab2bf4cd3d90103ee0042a53936025d20dc3ba23a1ff0a9dcafe11039eee0f7a626c48b4fcc0e0b92e579f9864523688d463fa306934b3996b8a02ef49112d5b81b2e971c3a48847323ad4920851c79cb4b0e80169ad5448ce4558a0916e547121810510ed0e206db320f318eaa94cade60999537d28117abcf165f80c38e05d015c6981421ad270bf5e14506fd90e0cb38d91a5fba838550d7f0e53db78d89737ddc21459bb68a1fb10150240c180d558e5876a365108e0b5380a4911750a06e3dddf2c1085ab01ddc5ed9bd80ab8984f0c034f1815de7f29218807a76e13d2627290025ff30a473e155ee4c183932c4506c04ec31d35016c15a9e321908e1637521640a3f59a0c85386ddb5c65cf297fb72404f00955036a9ce1cdab79fcd821f707ce6c22fc30875a307b204cc8e886e26a4387452694e1fd30ae90c7a81776952f4ce127e6e54d850a87abb3f9ef7e37a7c5e5e15ae1695d76d70c14e73b15b7570e0f3e871928fbbced0b11b4ae64ed249f9c7e0b94dd6d2acb43724a2f5accaa85d615a54f2066eee2657c9a10ef10145d074e6945eeadcf5db53b8c25d25d25b8f3c9ef0186d26c477df9b27a306732599a14406ec202ff63cf0653cb81b1d56d556c93f40d86381149fdf1e79a49582f8a0fc1399897142b0f91214e90908828049f645e7de5580cf685569c9475fdfee17f1465148f36754fe8f01649026902e616786659d5b70c6271e0eb5c59ecf1e266a256dcc6c11eebde91e0e72af53bf529ce83d43684e26c509cc1a1dfccfd2dc29efa726a541daf583573c8565f04460bbb2f8de6c0a915fae5674f9d082b654ce2a1421474a70ea084d44b4a1e250aaadb09ab39fe8a67a9f094abb653c09e457b62440d1faa9e7528e4f86c5e1ba3b1ee7a54275a071f2f424efa21608db5e4b22a4368da19d8efb4bb3abd0d055865992dd9bd6a27ab54ddff2893ba3082ddd5978919901ab251961bac96050f53a8ba042fb554cdb5416cca7b2ef1fd5c8c8a6267f5001292297a6b79877a0ba4120e485492eb0e21f87b02dc6a92bdcb56ef71afeb5c19721657a753ba7021a9e90a87710293847d215e54e2f04b3038f05a0d49b107b114546fd02f3ae9ba4cbed4deb3875b635dc26129f68b5541c494adea44b00187267b0fd0c9735815947d1059b7a061d2e3c49abce8ef25d780ebb5bc3b44ac192beff2b497750cfa1e8d62ca8458e7029105d176cb0aea24362e7dfc200a30326e1298b1b4c0b598acdfd859d3640539d36eb7ee1e1eaac9421060ff108486fbdc03260808a696d8401140ca51804f06885c508fb231f815b0acfb967e7383546c8fe8041dcc89671588d6626dbfabc87025513101d8a6cfa0f9e98597a06ce54771e1089cef20be2ba89d3718007a2ee898794eaecd5ff42d90c3e8c5313cecc040b9a6f5ff50bd43e7cc5cabfb2a90254057a557a159c4b28aec0569be68de25df16aa9ff960e317a413b2f9e4c17ceaaf41ecd9e49feb09592baad4effe2bcf2fc889cc243b13b9fab62dc6d40f9acfc35a6628e9bd37e6a7fb137abfc7171fec534bb7f980ff181c3f82f5538ba3d140c75e209d00bd0d6f0b62a95d17ea5b39193ca67ba8594ecc633dafb4cdae6dcdec0d6292da67ca0f655b99980e1c35b5f3134bd9203e5e1d7faf0bc0d5cb6e6f087f46472913bdd840d2fad7ae696df9a9b846a9720dfd629a18eb525e3d859e58a5029165f494e2fb00d5dabee9524fa6de3314125a49d374d24a1b896bf7ed338999ab745b0c0fb2ba080b0ebff10d1a393099f486c88461612896ec090cc736904e23d634da34dd12ac4b5a563e1df2aa1926058ea39d347a805de97926474ebd055ff46509dba7a027031f6a245e30e42d98624dee00bc7689bb25a10e27d6c1623d3c4e1b1a851480bf58e6730fa9e19fe7e9f6a51ab2852c1365fef5e1fd8dd6b309ee5e9fab26d68e8937d4b45e1c77a58626f6f25d574d9d0e050bb667a24fae67222ad1e765d1b8e006678dbb04660b2d7a6193d0f6f6ea33caaeaf441fef11d878e1feaf05c78f70bb86190e0639c1ab3aa64c4c084840fd7b30d2c3831740c3318f0e0934170f0e915a7915c9a2c159b28ce2e2d2d86e6f2d4408b272edbe4d56911993946165d64f7c081440636bd14cc770afe339e4d8acf4d8089e788e94fab001bee410f86ec80f87b8530d864e6a4bfddab618b59f03daae368d783c9e042c9c4f7d3740acadf9c775c782c92a11c6a5e4f3189de621f4efcb0104adfce065af74965aa2ffeead8b0f0cc23403becdaeb06fb72a15f02d72e0601251f4f90efe0264a8739d156fc5f090ada03e624b556b858a2bd77e437268867590b33c04eeea7c2f0b7f4eab650cb467e0660a2b3cd2a1ee5b486d6bb124caf1f245b27b27c600113743b1715efeb72b2f6c2803a1fb0bb932544c8fd67d209261fc84f0f6df66e06a615084c269313e0e0c0a61d743dcf8239e921a6b0f975d250f0c1a060acecf849f46dc8d6387192461fb83305eb4de2505ca67b0f130c53ebd04e013b052b70e1d6f80651ac5b0c0291067b540949203f798dc1d478169f657d7eb1bc177af49cc3a35a14a0fd366b692c7b9894b639fe22e2a5fe9a970570a17018ca1fbfa2ea6fdbce313847edc41381c4be4f979e7d6a5bfbefce63db63f10e30a23e0c73b1a9061a7f06e5b8f254d3f7e9051563bef0dc2ce32efd08c4736889925f82b215b1ff2cef9626919562f6cd90a5c28f34ebe8394c134f9a1c8f35195cc3b1aa5bc41c41d88b637089af2888602ae972a751868940625a3ec39d7c3933f0a99f18ca03fa168f59984f82acc4bc9fd9d366a6785b591b7a265d18dfefb5cd34323f1b54e0a0c4eca6a61d789e95c0a816c3191817bfeeaef73cf1ff6838138a22bf23c8b9b9fb9f95a7a27986f43f43b6e087e101755c7973749156020962581cd75bc33e07f673b1bc55371b139389e27c2ca31a6f6aa4d275a404178f30efb77f9415de105989bedb879c7c56f80ac5af07e7604836af78ddb1140bb143c2d0dd662223a06b95e43c41b17cd73e2ccd1ac3c6d0616ed0b4ed3ab6bbe207c441b9dd1cf3c36fb0d0a0dfbfd5214bd13da7862193be2f3a546537239453a80f2bd36d6da0adbf4ce2af086128a7052d23b3179f3204f5fcc6decbf62585b2480428f85fb3610e23869a89f88e4a8e4fa2a070c1da0d433090326bf6cda47c74c3785e624be7f6c5d88d51cab4e73988603a916d0e36fc439e1fa14e27a6980616d4e0f80fcebd48df93b26225b21641deb76f1884139037c067e155af355987d0f489192a704fbe03a82174555bdcb64bdd9d1eb963298b83902440086596f67381c96618d67871a8eca21ae62028e384bc311e5a4a3511761ddb4c5dadf27c14f608d89e28940626a0df67a85c8bf5c66cd834c5701e45af3746367031cdb69cc9b97d5d43c4ce67cd74ccd83ac2634f9332648199e7d56c599734ec1554cda40a7e2584371f6123b782ac90439f52460c5012325ff74a2b6c74f297a04582a19a756179e20de9348e4faf6589a3cd670a5468f30b19553be117ccb7fe32943cba3c7e108fcee4cf4296af09e10076743d21ef253978e8e2899609ce949569b7b2dd7b059d4937894d66689b3e05773960bcf6b29547d69924eb9aba1c787f87420ae1cfe9c047cd0a86c7e99f3a2dce783888b8e44c2f746d3a407026ac3f946b060fb174bfcd0c19960214b5c85112bac95c41f391faa72bf90bdc35ddce8a943e0450572bceffb2d3a84aadd1b137148a220a47e3ab0e8ab913e8dc071914541c3c0cb93da0678aa8423d557cf144f23c8c0233baa84e3642b3a748f654d8688f6690441901e499f264b1eefa2f9cc8dc45e85b69bfa38f004d2a7a9762eac54204f41a7d19d13b25802d24d0a9948a1a5b5dae39ab7a0188214801a98c2f45685870b8e43a7dadc8d1c32f8bf7e4e1355e6e9cef20d7acc4871fae953e4f42a99c077e6d846ae117e4918ee8c4871bcb12cec36d9b12d470d4f998009a754d30be60fc35a9174e18185bdf1967a98e490b415d0c9c791fecc99241bc6e4729683b5cc591c316239bb91029cd5dc31a2c0661dcd952fc537bedba7161347d83102befd36e1bd57e6fde339c3094020bfa18f3a326294f2c272f34a4062d5e008cd59ef7aea58f8a8394c2aa82226ac78bd019872126b5898e9a39481cdb622a6a14867927b6cf8f34bbd283cdf7e6dc2a04a5f2c92a545f4f2d977f30860eeb814cf72f1755d4bc5af94522eefe3b730c03d782438e4121d0b6b88fe5d1c99c4388ba2a9ce98d24129ac007d757a07fe5d49095cd9cfe5a21ae80af8220696d08769ca37150debe585b77de2a016532a4d2a140e6655dfaf23a2433bca41ef460180734b82a916e082d6e9b3bc2228fafa291b779bdcbe30b5e2b8d901d5249387b6fe2f5ad61d7d21c57306a4360bf3659fd426a22032b7bea299d3ef25340e1c076cebe9d64852c70026c2e3fa259fceca5fd8f2244d3b5a58dfb0b5ff6422b4f69245e1cd6fb97932883500260eb2d68f296b7a20c51215d7648b02f9bf84482320d08e793ce47d47d60171165805cd95515cc9906d2e9f5c0c9485949637da7bc3647b8f7eeaea06122b9e07904bb6df1d1f17412c3ef78976426b54e1040ac3a82f1e4f6caf8f9327c9893ca533167769f7e240e97296f99329238b84ec141e2a7f328494da066205f3255044e3029318d5e847cb4c81cf7f790aad5a7ae4ed28d2ccd5f25cccbee53c707d61c427e30d347ceb90acec50f65d61a6a2f890cf250aa64941464e14ec2366126f25142a266d14a055b8f6242e9d3e7da85a711111b6422473ad49fa98e4f86fa3bced385ad64b2e07c342946b7d784fe49819830a8c5be1c73d39199d1761c46b3bbcc75cf577c7fc9069d97ace6fdc979e33e3c5660fd70a2f9a278ecbdce5ae2badba3dbcee236eb56a523ce1062bafd9af739f3409c9a3b8d973d5ab30fe941fd8b480c86da1611a9857faf14dc76450146a2e34ff44221e4dce5cf46ff3b16c35b9898a9b23357eb11f39aa128b8808a9f872c32bdfc35b355a740ef920a07cb650edc618fed4e819fcf4902ddf231deff3baf3e2d005426a3c6933a65633b6ca17b4b059260ac8feb3679d400af5af73880b1a1a68f69a170cc890c149eb2996b462f0cbec85149498d0ec39b9c11127025d5c9c7c4628f828e5e8ed3042e9e0e56e5e184c460ec5d99e5305e4095f5eb2ba28c565806dba73142fc46352e6f2d0898b0ec5fe2aae431a180a728c2bcb0fe2904757f9c9d4c8a4e4e3bc326e28e440e33a64897f8650afefa86ee51084eedc1f11d5022bd61f3bdc0595a1f0edcf765c4f4d1d0fafa79a9a4a6cb09ad7a46c8e886e2ead591c7147c16b7bec0cbc30b59675c416c6090b7fb043292a833453b0bd4a0d9bb02e1fdb7dc0d4e462d173a722de011b2e9be966da638d6596fb3f247a2a0ba37e63463668994e2fee5c4e86f263ee632143260703a91b71d1497c01abfe73829a5198acb5351d245247038683f9838d149b315a615904fb3862316f7d8b9eac09d132d721c564911dbaf375c6c9c271ff7cfc42aa056ce8c2f0c4a8612d96fa46471bad3c7e1c4d7f9e44e2d0de736f31968186c92c1bbb2a080878e0b89e603530cf6187f1614783962eee9eb1e2ca3a4e287ca8c6ab2398880841d8d5ba065624038d87b9647b06a4be2868158c3c1466815e6ae82bb3b5ad461289629de1b2db17b9a499894cd6039f0930543f999096629b5a4263c0a601f9609077bd8bcd0a4b83c037a7a32ab084c68085439932f36ad1f4a45a016034b9f9ef193bf53289ca7681a02e71f6d151a8283650f5db6e4cab8659a8a82fc9ae127dcd64940313e3b9307685aa8bd49433304020ebe494efaa3e7dc44fa3520b10c7b9034e8309899bfec0ee83780edce770b854f45192c390ed701855f450a7b4a9cc452227b4e5857a447021664762c42ce19abde150adbf3e7ecadf593253d494a4d77b0067e4486c361be7331d891ef64ed1b1ac3030a64038a36da0a8913f39f3e3a7f16838b65fdc9b4aa634f25f21db90e0c8b70ef4cfd5f7f349eea38e844ef17bf5dc17793b70172a61a134517a36499326bc70152e2ebc18205d2a1ffe41aec2c4851703189fac2ca8c55b852fe34441251a64c7f21ad073d4506520b651e35a47eeef3e449b59207650b51a05d77cf7481aaff869549fdd2a4e24a1bc62cbd5b7630562946e5be0ae376631f3245c660764264ddc2985c177edc27d8fb43a4896b827c3c78823a30d77d0bc49457e590bbcf27948a8d72810d46504a8da90fe871804eec5a1295b3e572f97e2c31fe9fe3185852ad02aaa2dc04e3f111928342501ec9edfce65da23dc7e3cccf67c9f2e57cbd57901766dcd122a42c91b601ee7fc76e256f9e731e4f5541d83b778add1d1e7b7bb281a2a5e2443e76c6750de1f421a08628fcc34704cc650ded92c0f9ec53fb9da85ea49af64198f0c8b9c82bff6ca593217860836bd65f139c2074dd9c9b4140705951ca03f1e42cc862b6eacf24d1a82d08fa46405a394d50337aaf46910f4bcaa2632e79ca138e32c9f1d85a705ed96688147baca3c963d1bd4207efe9780d17d1aa1360c1a8acf853633b5a3fd880bcbebb72d911f69d5b1a3488100864cc59680ee77e1debf39e179d2a47d0dd9e60b0469e0fd7ba6bc4f50be18c4309011af8c21db2bc4410255f34b5fed1adc0fa4eb11fd62c1f5dc4a333d3bc7b4de6014e3e88a3f40a8f4afc110f2f5cbc8568342210cb0f61f9e972a2bb2e4c947b098721ad0ee6c49cac74b4c70e2245729aa2fcf8e7256cb6b7ef67b3194f67314eeb298239fa193e7703431279767fae8399a210818d30694293005300adf80fdc2318a2919195237bfad27d1836491ceef42962a7b4bc6680f56b8a01f77ee8c1a90301ff02232534d1ccbbcbc3c92cb2c650240d93fffafd94896c7eb1032fbf350921195bf06abb05f98aca9f5d8067a6fd00ec6463a355e81087336af0cfe12287396091ca8a4796245b03aa6cf94d22ce59bc2c7cd2eedf10d8bd95ac03b2295a242cc75e5757f96934b744267e53457275cae5cf58f90e8031a37df20d8330d6f42d8e8a581b973f4e240a344d70f06c5d1a60801add7ed2e737a717a77b7a050526a2d32c62e4805a38325e87f71b937bb9fecb981c819a010ef6f11743a75e6a72bdc9bca815ba41f7e0fe549b56a22ec79186c515b1e1098004bdd162624bfa67a331e73981669c58e1e2816097a6aa81fac4fd1e7b00d20b167bfa4a16c637e7afe33cf01d99051eb2a36aab6690dd4f3975e6135dc65f730ea8395415d152f31acad10f93cda52c40544a177239cc18e6681af5ae616a75a1b237c43ec838f76b1f8b8cdb518f1713a2a22825af8501b50c45f0856c42847205ffae3f0ed82fbb3757c8dc070f9c8ab53abf991707408fa8828887445b0e8f8981ba511548cfaa836ce425ca03e876f2c747f34365e227cc3faf9d2b80171c1fd9c9d64047444bf225a707d4e6e3244474758f1fa5c62bba87d405c23a23b2230f61fa23816403ecd6f17ba4fdf9d15223ff56c17cb8fdd8d441c0be21fc11e4211a15023b0308b91f28c66fe0904d778607d1e25c20e06d5e71965341ba1a7f1f37601f9d377ac8bc0507e8ed2b100fe3cbf5de83f67e330463ee47455287cead62e463f4e378a64e629e1edcd29d32759a3414c3d72ca89394af6e673621b20db2e223ece73bfd17ff0ae5d8c7e9c6e19b11c81168218013a426e112db81f3b392450a6c959cb75f8c9396af87c9c76c1f96c9d4584256216c2f2a15918d028595f8eaf81646f87e436dee4300c9f8e772c383ead6d17d64f0fba169c9fad534b4415569ffa6a178d0f215b509a4f688d21c4742330ec3fa2762c803ed66b17f61fd9b97d8e7edce9802e3ff78dc71017c63ee27194ef3f6e0dc41017623f7da78f5046a4441082e0e8088cf727d206ba88a0f41f7bb40b4b1f9bcb28a2c5f8a7795cd0fde77e2386100b22bf102cda9fb69bc5116c8d7df4387d8432223d441247000afdf975bbc07caa1d70842f885690cd8987200e842fa21dc432444243e1aa459a0b10af2196230243f7398a0b0afc791e770afd07efed62f4e38e2ee8f2e3b77621eaf370ac889096f243d11a18e2c2ff39adc3b8fc08daba416c05b1844045104428748415eecf27b70bfa27d4312238217a2fd102f589af7580680b618b116ba53f71ad13d1268432023d041723c002ea7355c38ad8f4270e45c5b89809087403bcd5cdb7db3b2bc1212f661c2322acf07d6c346bc4354232116afdfd681b5f202eac9fef8e11c10162178112822082a32330bc3f423a16f41f0b6d171a1f2138b5ea1f0b69179a1f213a2bea1fa18d19818e08b5741fa73aa0413edd6b17fb4fdd1b0b919f876d141118961f51b9a0c01f7da783c052048bf4a7e1da45ee07b51d86f573411754f747eb78856071f9f4ba31467423a48b6821ed036f6327a272d6c7dd89468c0f215fc45adb1fb46b17a29fa763450486d2a76a030e627c1041d99f1b4d24c4b888ff8876c41284a4a1a8b7d0f035c4c5be887fef0266dd18233fc86b17facf9d134388dde5098dcc2fafb6e344334614851815f10e4a369aeaa5b9f158555483afb1de6854c353986a8610c408858260f1fa20d9e811f588e8087e8968c1fdbcd90d41fae3b8b57284cb8a4fd5114888a07d8cea19c494a0dd789ee9f3765173f4f2bdb279da5290fb24d30e7b1792fdada4aa5dff89f154d2a0ca47e29a2f498ca0047a2e617cb4578d637c316eada3c472fb6e614ebbb07e4eb0b1e0fe6c6f65056144d0297b700c8e123f5a1b7f6f7d87f55a0de5e0dd4475699938fd07a8797684fd3e52be1394cd3fb67edc3eed37f8ffd7d510d52df22b727ed63bb660a63ad6af28df3db1047b41f94b12602db41bfa0ec6d224eb054bb18851614200f6238c58a91e290db6ba7e6295eb6b8efbef7a6af030ac4259df5c837577afd2bb3baee5fcb0501cd2d52bed735df50af3bace196d717d06a90504fe4d6e3b5515974ebac5fefaa0e474d50e375c91acf9046bcd1cff68366e9774ae785adfa914dc81696d7cdb4febb97ee8ddc9a03f20dcc010b1cfb61bbdf7e21e3e5d9e18578f832303b4af2d4f98cd2cbf1f61f3816c78bef2ff0a3d369b8492a4f3011fdefc44d98a54443b38229e97c63d947dcef200500af81f883ba464a2960df043638bf77fd3175c92f9c340e1a59a5185660cc284e261b1f3b53ab8a63e17ae00a39f09d16717f94b5e7d668556746e330c088eab2e1fe913c7078e32c6e0db53edc8a3d0b3a40a7bb2d5f418f23760b2835bac8e71eb878bfb2691399cbdfbbefb17fcdbadc8adbe50352f938a1a264c9bfcc8157b4d2e44eafa11f652fa6728a90c1a91f24a6cab701e0f50a127fd0abb2ce99d277c1365da65819b539beac30b05d63af0b32bc002ec0404faddb28b59505a19a5036d62ae53137757f5cf7a1da4f5b492c008255402581418215cd40f34590561612cbaf8dddc2b34d19ceca920f47cd678c72b13af283eb8ba0bd68baa6846fcd645de38241c69f2db2df7967bef2da59452a624036d08dd0877083eed23c4446449c1e2b1a4604109921364e7081cbbf18e11213b468c6c6c44c8100fb58f152c7c80a0852351bcf0bdf3b1e261e2d9d1bbc109673b1ec9383a1548082c3beb3011e623424d82fcfeb9faae1535a977e208660c2cf42851de05c14677efa84f74d1b6e22184511cc9fd47d98223b9dfde3f6de39c1fc392284bbc3b1265c81f7977a4899eaf3018f73c848b4479e7ece368b0cfbb1cba65b483a3c13bb817cf6a5284efc18d7db8171cb9a554ed4c94cc70f5850ac13a3a04eb7ee39c242c5bf8dc6f4ee70a58f0b9820bbec2dad92bae7408969dd3234d187977e40a507ec6bb234dd83cfc691207358927e730d12426422c75da03bb8303bbb50fcfb30dcfd63ecf50ba87e166bdcdb34bf6e15edcc06e941f4777cf40a8102c4f0c65c00e468f103ae721cf4588bc2bf2cea9663dcdc976db7d43cfa74b6df41cba7cfddac2154d122561492e58f8b113809df31f4d727e83b3681243e9cfdd483aacb18da45bf23b910a5574c7226a748995edf4bd18638c2f366bd613d5a344682ae5ab2fd26da829af4eb594d2b179bd3ae7f482e7d48f27a7842c5ea8943c16c8429d58ae6c48446d741b12fd8acd57748bb330b72cafb5662e43dd52cf9cf29a9f3e399669ee7257274cabf9e9b27c6a1b7c2b5a95e536fef1e84459c5a546a3ca53712c60dce9eeae79926b1274697e2925bc23e8ee9f47f85e7cef3d9a13a5c9bf7e16dbb817ec4d08014295586b739a04eb9ee5e833deb1b8ebde24c7d243c41863ecb6babb8775cbba6fabdb67443f2d6f6f67ebde886fb9cbb19cf268b5e65eba4b5f98cb4cbfa6634e69dea2aeb6ac8ad236172ff9879b0c24ee80e7260a41ef8e3891f2d06d942032f698ee5e0bb64deac13750d7247ecfe3bbf1d21cfb9cad92cab52de20a37d2db86b41c9a34e988000fddd4eb89d9c126d15b64721bd3952b9a149da8889366b860e9514767a742900a16a140b0d3e438e4b003157d9aee68f2e9d51d4d934f93534e3518f79482d40cdad447f441ab88dd2c6dd91c13299dc548ba2f54d097d0c121073aa94f3ad3a4e68109264b94d425748a10b18e10120448e50307081b9a0bccf0108125ece694f0b834b4f5655e6b047df2e9de1bf13dc8cc31c218d9868128d1a1040f0e4d6aaa8ad49c6f4ebf1b0f1a12d79ce235ab187d461e44f1ea8857a446d1a7ad66defbde8bcf88dd1c4f741d3c3cf4f6f012ef28ce3b8a334d6ae7418711f6390e17765db7e0ebd21ee0c3d6a1048fd5564a0597e8097a56c2bb2356c8f9aa2d1ebcf71e84d159db620e4dc2a149458c34a9fd0d612158901cfec138d6816852dff4b84036538108e868523fa7bc01d2736812d46644ffdebb076852ebc06e2c232454dca5f9ddd1f317764d9ab0330d47c8774569329ad495fee0dd8d878f48a8f8d3369a8777c892ae2f52082f7841e83322e8d05b45b3dd02fa125cebb59a047d04fdf9751fbc37a20efca3179a03fb7cc62ab1bc1e5ede7d81f75dab496eaa3ec5fa52c518237bbcce2fa7340f7d6a9b93f2f35558f3d09aac4a4342c559c55f7d9954ee01ba45bbd8c06ef4df7bef8ddee8b9f459e346ef5a1f7aa7430e4d723eba11fff9832f3bbc0c07956301040bdd5ff3819fe6c0cecb5dc47cce5a2f7fb4898d5e3948dd462d6b59d4e5d14514e5556318e630eeb13bb22e83f974ec4eb73e79cdd35c6e31edbab49aaf1ad5a81a8a504a59b230d9304d62a769a2f9e96eec92254f53333b7637eb0bf385938f46d42777d4e3b34f3ab99b981d3b669f11c5fac277e47ce4ee8df8ecec95d3a8114bf39b731a5dba72d78ee6f4792d4ab350b00468924b93a20cabe347df44314ebef90b0e3dc0a746d829baa0afc412e9742101a838674c2cdd423dba0cea3c49cdb10de581255518f24ecabb235072de6df46964122bddddcdf98ce87b244752925e8b5b95d4266ad117587ee7a3e9d447d39df7f4f8d2a71bb179f84dfa8c68d4a3be3762843da048ba1b0a148968014d126237fef1cf690facbc4fca69c82c42d43853d8b57b61e75c7d81ee6ea277d3f9a4b3e2790bc7f97cad03be6b1cbc92c1d24f5a216a52d7e077de36be38ef7a234aa1537d715e31cf39a5741e1dce1835f91c4e53a8bc35a971bff7d8bdab9fd64f72cf29e59453cef9a406ad263976f33dd9ef79939c4b275510244439420e129a2061c843b1a1e066fd4539a4dd92551cd557ca2baabe502e4d5deef2e0065dbab25cb561dd8dbaa5aeccafc969adb7ca6e745a6f505f6785a93ebdba2eaf9afb595fae8aad7a434d7a22e79856fdbaa24b5f94bb3ccba2ea0b75be611bdcc7c81ebdeb4b9c1aefc0b20fae11ca25e8e4cc2372684e0e8dd3742d28d18822449a34a48fc84981ddd888c73e7eb0116c84d52181cd6bef8e24a1c957d873f0b7be81154f9360103536bf354efb68d24d10bb7110fffc4d3e3988394d53add561dcd7eab17af41afd7259691bcd5757a5dcd5aae62b6d52075f0e54017d62dbf27e1b123d0b8e2a97a13c3a755de5ef6935516dc6fa694108bb452fa1722c36de61061a05e89b5f0eddb295c3ee5b3912377e03a65d5e4547e2724cf380df10032929a596c3b8b7eea8ba8ce5d3ad9a6f6db3be69ea373563fdd49acbbb6ab30dd54f0533501ebbd1520c9e7763deee82b2354d4e695d5f285bfd62cd6a30eead77c5e13db7f7b9b5d661dcdb3bba5cc6baf4cbba74eb358f6995bb0a53ddbaea8ce8abc9597379d434a50db13c27543c0f690e47a3a7213f35b182e54222db44344ee37ceb344ee338a99a8907fb49d80e217cef69e09f9477ab6919e3ab5e04e275ec0721ac2fb01bc78470c7ac61f7aee710f6cb2248a99a543c939029fc9886d8a07a0285f989cf1328df0c28f2d3a51f7972c44fa77c3aada065edf4ad9d5a1be593bf0a33afcc95a136289f77c617c9e05a267ff35953832f6a518ba865ad39ad79ef2643397579e409134f9d7a92e49f9b4e51f1a9a3e1e0d36ee9a17fe2a78e064dff049d834e3ae5ee0356a6292d0c63ba4bc855acc778348c56bcd1b3281ae85df1463746120cc362b03cef19b4ea32b4e86ef455a49850641775193fb9ab281a43c6c7b88426d2441955556137460cb6617229e3278661986377a389325e7ad562c4783f62ccc02a66207a47543f5f619745e2c7e26850985f16ada629059c525c992a85ab10a510f5cff3e45be531f265d14a464d8f6753c50e52e15bffc8fe61e7bb754f55d148e10cf9b2a8a489d4c5aeb84aa87ffa4724920c63a2581979e6d257f8d63d32acf0d1dde80c5706d195ef88afaaaaaa26a8429b9e42a3fcba00804eef267abf2e32005714ba9b0800d7bd162b57e5d270dd6b915d8b4d16b5a40c8f5b7439f550c9305c442259f956fdf2ad7ba89e97aec22ae59cb37f9e25ac91a155144102021b242050820404384884e0412284112442f8d0389077effe0725ef7e30f22e080fc2c843670f42c843df6a1c0d1e70d617579d76839f9dd33848ab0eecce67d561dead6604fde11d69d25993cc1cb52dfa88a33b490367ac3ab00e5bcd68c2973862c8ce9fac1507fb731188a27ca2289fe674ba6487997867d189ce48f9ac379e532eab8de9c27b47935b35fafb7b3a5320a84f9d6ea1ce497e4ef9f3c208a9cc746a635e78475fa3bf9f09939e1e263132e900931e0edcfc740ac474fa3c22e940d047972b72925368655a51817649b7bc14b506ebccbb4d954c867af4cc561bf4c24ba95ba37f7ad5eff2e6662d110b1602e0b0001d3317000227481115b15a1de8b1262ab25cb7c8509f6ed51b8cc4034068b5416dc886bcf03564f7e2a58301691add0709103cef90000104921f98784758fdcf370eee3b09234838a0e49ff34dfaf4a7c9e99c2351d2532e272af31c3ee76f92f451f427dff597d32c66ac0859345a74725ad9a8aeea95c6c2d250937af3ea95369ad5acacaeac1dfc5d7154f7f2ebd21cf5aa6d24caabaa6e11870cb54d4836b0e4ab6fb1f26a6a3decc07e3a0d9c778bfeaa0e75341dfa05eb1d4d3a791c4197ce020775a74f77decd7b80ee63840cef0efe9d294af71426de1d19439431e84c243646be82168c7146870870df7192385a7ce40ad33e9bb60d272d69e73b1dba37c938e5ab355abaa3b1419fd2e1447a799f0c7b1f315d62ec8e4545847dee64a64f23b0ec684c0e1dca489f95d4289fa4537763aa83f4c9725efa909fc81903cecf31d8bc7bea486c843c00de21b1b9797ecbb719278553e596f4a95973ced1f438494fdd9ab4a2267cd3e3019f999d3df246f39ecab0b38fa8471bf4ce399f3bdf28cd8444d81dd8b37bc12e7bc4e0f3530c3cef5e0c3bcf6940286dc8b25bae8dd89d4b863c79bdb0e7019f3a0dfc6889a81522eb6eecd5a7cbe397ab07e83cea64ddadea405d1e3c8a3e391ad4374a3ad42809e108fa74d824e8ce25846f8ea0bb6892ab3c4750b58fa2572ebac58901e7dd77df51bc2e34f01c638c32d17d52977d6f90de45990dfa70473748efee0e19eeeb22081a62b0f9db8407fc1dd83bf48da22233d39174ead3237b9c9c9aae0bca91f4fa8994254fe3dd11295382a64079172e9ae41c8d1ed89d7bf17c7b2e478f448306bea90496dfb984ce1fe9a34f796f903e4217ee85eb1d8401663a648da77c8d1afc7c47369cb3c3b82befab79587144779707bff4e76c4c1979655eb78de5539094873e253fbba4a873d54754454dd6a437a4bb6a55148c8bdb3d526cbe721ae5610d9a20ec1d4d5d6334513ee201dff90eecddb576b00ef4f2f057714077f7dd1aecb4faee68ba37483f9dbabc2e9ac4cef513da5ffbebc7ef9a5ad5d1883500439087fec030e4a13f99e79086a311fdedc05ef6837d23b30caae7e32504a47c748e62d3e4dba7255f5802a35e6d3c2bcc34dac1ef9c077cf61dd8f3ade474b7e7d063c5119d2f0f7e77dd16bd071bf67de8aa79faa96dd16978f27765aecca481771a6bb0da80b4eec0de07e75e6c93bb17dbab38de8553843c98073fa4776463faf37965a0a4e18f3bf877500c91a8a0cf20d0e917863c75092d76d049e7587a7483f4f0addf586698526077f0b7b3b868d2736fb7a20bdbb9f8e733b868890ec7bc1b432b426eb1d19707d750c4727d81dc42ba8bd546ddc13f7ddead7970bb4cbc3c1da85cb7ecf86137beee0b3ebe1d5eaa8027be60a313903e3186a09bdfac3f27a60db04c99029484277680a430fdd004f543b4f1c50a3f617842c40e12429c7882871e79a2e787c9c61e01e2019091279ef87142021c68020927b879c2092bfc3831c509293f4dbce0439f704209d5092970202259a901088c1003213cf0a4e786e8851b401d040103cb08eb0710644e04f9013a7482034340c8859f28d38f134350275c3ad10527a0fce62c5511288c99266e10052953863042083770179edd16f1002384c007064178b2851e708185850251caf2437f581c0b29a54cfb8b4d64c13a879076c15a8e86dca101e8903a1afcd02747833af4ea684cef875ef10e7e007c7b0f0e806f07c0b71be065a4cf7ba300dfde83d3797947360af0ed05f87637e5390aa856356147d46b9e6e81c8cbf0eec816747e64837af57ab75ad50a408f4e01e8916a153f66b6aa3faff5524eabc35aa3bf564f6e30ee6566a05300fa7b9546bd9296d6030d7ce75e488f9a0c7b81e7f9f9bdb0f3eef9be446fab5b9e7bee5d2e6d69d39fbfade6d92b68515abfa5f19477ddb26960036fa36daa3b62efc92b8d523bb8c6df1aef298d6a93666955f47872808585628185b5a000ca85162d5022063f34e1841260c03e4c50477c9c88e0881688f0d6bb235ae8c1119f2447b6d004ca052b093e5cf0c2089a00a507bc05daa3052df0e04536a40525c440adf098e5393102b502a582f3f8547cf406428d702ddc6fd86b01c7b5702fef165dd22a5815efbe27b0a9786e1848814700de871013767c5c16826fd887af103589f20c91c54204769b3183f44a5688c4423464824ed53e2054127a189b8818db07ec26a271ba054e8d85bb2c5834c99186b09bd3812c627fdc1a8b153c3f624f429ef325be9de7953c096137bd601af234b6e9c157d424e77e84dcfe19068208bbb110478158ddc2856e78e0d3449dc301e73c1a8d82dd9ca65bd883fe3929ecdc9da980a3b1837be1e2b91128449a08616289c825b104a02cb0a31b1196dc0d6c53723738c73baf091f8bc782125c4311bbe29b8a4b6fa50e7fe5ca642b93fb743719296ef58dbad1ba3299aff8ca8d8162abc9ae0ca53706c390d78a31e2c498df4dd4ac77855a3a5ea5434a9e27bdbab4304da39675ec3154f4ca082a57b91ed2e5ad6196cc31731adddaece2e831d24335368279a661ac0435f2152b21b9e7a96466253696a79b0c56a265748db2cfe5b3e212a8e425691d7ebac4cb225bf5e9d472ea912bcf9c536ead8a3f7aad38d34bade69a3459a7d762f6ecce589fb1fc7477ab5c6164d8e16757a6ba993563b1bc7569fa3bbecc7c7ad71bd9bc34fdf7551cf1f9b2120cc329e19c4551a64c29a599f142596ba8682b2eb5cb2b57d1567c6a93ab3015f9943b1c85a17ed1158d4e2b9a5718ea9a60932a5a83f2aab22fd6416a895658eaf4b21258ab0cf9c08f2094fc9c6cbdcc58c9a0eae280f5a57a1aa5fe1e5050b73ca72eefd517cb2bcd7ace3d75e9d32dee839ef479d57434367e4204935e4d30b0328cf3e365788724044136e2db6e7129ea16faac90ab2f34eed024c9ce5d434ab0d239233c6584272f5da2784189381a5a03723409c50636f092af732fdedddccebf1af3e6b80d8049f1295e3abbcba326ad0796b78622f6f5b833caa0076218d1e3ab79e9762a10403bedb4d1caac2d312935b1a528d6969a586b4b3c739bf6c21cc34a4e4a4e4a4e4a4e228e8d85b5ca9f0965ad70f28a450bd336fb353d205621368190b930150cc33cdbf15430986132484eebdb8417e6985bcca14b202020a030d85a6b9db25638790d758f9499b5d066d66621cf329bd92c44999969669d9999332aa59c3f1882840820594d8f50c87384de922c14ca3c47f69284a255bd86b22c94798e2ce42e2fa46d6f49c86d080b856cc86dcdb090cd429ed5cc5d9752f8964d1b4a81659be341e1ce39e736fb190a1bb29eb97b2b2a34d156c76e4c4c4c0c54695ac47bef65999457adb07fb2ccca32785957c6cc4cafeacccc7c5129a59c1ca8efbd7759164ba06eaabbe54f77cba0e8afdd4ed5ac7609248124900432f2de7b35ce18afe8571613b3d9afe97165ee725c99bbbc4cdbde929ab975dd2cab99d7cbcaaebb3d9de7d776657eb9e52b948a3be75ce982ae2dc6313131313e978024043d4842b0447cefbd2b4ecbf2184bd52a51cfaad5aa5e6a62952cbf2cccb228bf2cbfa85adda5b1aa72ab5e5b29fadbe857cb2deb6e33f1a6542a95e40e608510c24ad55a1dd6fea1aa5a29875447a9b5764fad9572ca72aa5699c5ae5aa7699a6a75153b79a6590dd3ac4b13bf929314599ac4cbe8cc91d5f4c832cf91bd255996836d982f49966533cbb2f7224d9c9366ce3967765996756573ce2ccbb2ccaf2cc3ae6a5597e6c5c488ec64847c6f02217fa4141944abeb76a60492405597de7b6fd6f4982526a526b3146582a5262528734e3984cec7788764081f44dc0c1134c40e7d3d55d49d8935b3e444642510e7c8a0278364d0ce8cdba1dcce9340124802cd09bb674a082194b37ba8ee9973cecd5ebe89328b55d7bcbc35ff69bd6a51dd4df4532573ea2ab67a66318db6a877a316a5a494d26a79a4722ce8d3996dd11cf3e96c98532bbe5915df444faf5395bbe22a7e79e8bdf5cbba9b2885ab70ea825038f5143e299cba0acdb916338546431abd1b8daf68ceb5c8ec45137ffa4613a70a3a45d1dc9898981891acb13d6540e17489b5cdb6454b4c4a1dbb4b5389e79ffbf801bc430202262f05890f261a898f24487c04e99ef7c59bdf66e20d45dd2dded450c4ce3bdff49e974aa51285d36e87053420878e2944067d45c529a5fc9932884e193431e953555c98d247fe4c20a6f4913f534a995302cd29835efa36a504622e481ff9238364d053af2aea52c229e4ab39e79440f2a7a2aa4afab824fc733450bc731d1208c50a5c12daed480944718157f0cf2510e576dc8e119e57779b713b124802491f1924815ac7eed8f6e1a763b9a3ee34e89846432bf6551445510fa3a81cd414c3365098bcf5e93cbba43629a38c32caca9f4b4ade0d4a2973c8b7e4b729bb6296d4736c2fc96fb39f4e0cdb407db6ce4f6787dd529d9d39c6182966a65a8b32cae84ede18ea86acbc9b0de939d806e9f2289dede954184b39666f0cc3a02b377455ae6c76142a28aecb6b0cddcdaadccddebe2c9fafaaf7a4945252d235813e318d7a5b3e69977305e58ca46ee996ec8a611b441be5caae29d7946baa5e19e9d854b515436176695ee51bcd9b143b1e8aa228e9540cc360974edd51f418638cc1dc625e7d733c4faf8b726bf9e4cbe3e5f1f278dd8d263a9e8fa1dc72fa4275bed22a197fd2dccb97524a77694a833fc5308c4965d0b99881503dcfc20c7f30aa1da451280ceb91b4c73d6b9d48e4308c266ed47a27518f72564d8f1feba214422b8a65f95cb1422b21abc765ce5a271239cba74513a54575bc928b4ab642d50ac3985b2c57329b39bd1b53dc77a389543e205f5996654986111d529426596822a5d44625db50e7a5ee669dcfbb51499f573c8e7a2e234da494e5cf7ad6b32e95b27b9e29a5543e150e255139165b081271342c871e6fa28f6ec1e996e9d0e38f08c4fa0c9c12cf57149532762565a9899c9c2b8f51cad601d617eaaccdf881f801ccc7d6f428312935e952144d867257778949e5a52655094a75959c5c5455e279e94c83870e89381a98c3db15dfa06e6944718949a94989a7c4f3b4aa30969f4e3e51e91b56849470cec0e95293971e2dbfb48a7a4595985022883c1624e268d0f0ce23906e8951c6ea5d616878ac084a04544fe4a9f1e6db65a0dfbcbcd06bc909c3a83c6e5811ff36aa47d650c4cec49b78e31587c584df200dbef2cdaa9cc857779b893798cf3f2f39a16ec9498949b7b89f4a3c4fa594b2e42494a392c32fc9fb115ac162bad5fd1ecb63792c8fe561b41ac590e1618fa29853ef870e9764d22adf223be6f1a1788fe5b13c96b765e0e5bcf71ecb03c2930cbe61efc74bef472f4d7f855d9afee796b00cf9fad2b48e577216350376633c88652bef3d96b88dde8f9fdecd3102ae5b3638197e8c30348022965401865e8cc1a37292524617849a68cf3763e411df9015fc504477bcc9a4f7a00088eb01c0cb3b29712f9e6b60b0ec5b0412814c91180e0682b66a86135848e4e14cbc719456be459b872a9443f764030f5bca43e81d0583d09b892e35ce6b61e3bd7351b0a96cf1c9bfe78df34a13e7b5784ead48e43d75a66f7104ff66f0d4b7f8a4e2b817cf278de46e2da5e1a97be9b817cf5184a1d268f88943a9b522914f9c7f3bbf4d9d1a287eba168f0770d8bde887935791a37bd75633ddb7014883df289c7f1e6f78082b7842e3cd6313112a09a1e8a70c62a5aca8c9a38e971e2d1bfed2279c17c6005c1ef1fe480f891c522821b56e879b024bd363ccb1315ad16bf1bc732cfb83114a1979c097b1ef6398820c03d900cbc6f2f1aa1e5df44a7666c6cf99023b27c9b5885e35e75a3c27a919158024e848fe143a52f4bef2c9f87246e7282305663cbcf0880d09200fa2877046f431b6e05952939ecbe063d9371249240025bc40095034054847d9110819fb8a20e25c122e5e1d950ace396e42db821a7c3109142faf0ef7e279ad82753cceaa421245b89d874de297824a640613d8e7ce0851ee08914678e95bb6e2e3953c588fe4919247eef00994e040ee3cac524081f3cf2da033c30a2c9ca174deb2b42da3741ec23bc4c21e7cf42d637979a9141db0ec1bb5f3d4ce3f977186d2b9e19ef8c0030f9d8e6cb45f0a503a0f1905ce5b5241af3007217337d41e85b0f9df757d04fd8d7dd0c7642282e5ee8679ef3d1808218431c618a394524a0983412290080fcfc37938add33a3e3e9cc339b3ef44047b434e7a6d350ffd397cf14222cf1bde68136ddc0b570379e61421dec41b2a9750350b4bbf5771604822bdb88089bee7dd3ec0c0505f83fa47dd98bac2697a8f85851fce87de8e9b5eb0638d50854d3b76b3f86bded517f85b7cf62db68d487babf9f68617ba172bddd0db4684b0c264ee06203a3039f50e544e79aca3044c4e7d01d5a5d4a5b1c2581b7c2fbb17b2c2602318f77d6b2062435ff315ad70ea8a0355d5054c392bcca5806901d39d71674c153aae08700fa78a85e5ab0226a70e98ae7bee4d072184909b5eed15063a8ef601867ae830144ff97e78a97bc1d4bd704e2b4cc560d89d4fd5065f3855984a525352d45197c73be5ddc1533ede910d0aba051c4f790dc01ad9758708964a283dca19b713049d1826bc1f73564aef08063abd35ff9e4b9f3206c3e89ac8dc848a3ec7dd3db17b62f5695a0ebbaad339ddcd25944994b834c9fd412aa59447dc599076a89649e01d183694413208ba1df9237d24101fc142a20f6cbb0fec0e0b680013294145e266417a3af4e95038cdcb9dcb34ab611ae9ba1e78c5b8981d9af4bc6a39683834e93945afe82724a41824b67d23915cb0583d6540c5184b3cb3c43361bc29954a4c4a14454d3ab1542a954aa552a9542a95269d1863a439b188cab1d876704cf0b51606550dece67a78782556f25a3ca7aca427064ae80e134c9af0744bd5d31345fa4820e9b38411d631b2840e4fac0303f629b212aec7480c7aa88411b203310c96525eb2111e662eb0042b713dac4402499f1e09c4ae4702b10f0a3a6bdb18a969e082181ac820868a2922e1e482294eb08884329030e6257918c330a084998d91ef51c7340d567963d8f1584370fed177d5497a43d6bec0724ab3e0b592b09407d733c3f5cc9831cdc0ca488f140fdd32f9f3990ad05ca007c537906248e5e32a87d6001d0dc8a183a58ee90532521bcc9881e5df663c3c000e39744be54f320cda519ee98c0bcc20000e39789899c130585e58b1e895665cb75ebaf950f3d379a38974068553b96a82f9d6504464ad90aa442722768b3931e7952823afc5731a73620e65e44d43accce4b132422d41e95049ba45524ba89d6ea9983069522a31295d6a082584224209e916e814916e993c514426fb144b3cd30dec7437c432f51621dd3284482c128d944a483ef04309e9962de6504aba65a384c49c28ff3c1e41a45b9e53432823dde2907c60e79f534bc41cca48a9444b4c4a9b0f35cc37523c4b6077ebf198472d26e68ac1300ccb5155130e9823e6e94cda85c56039629e0e86c5b00b7ae97c5ecacdf150216dc66bf17c45d3322d068b99f1eab5335c6065a847db2d35a2908bd1414a9398637e69fd32ba457ae69b7d4a928e69de2449338dd417e8d100cba4925a773b0b9bbb919e9b9b9bd9a7b368329a5a6cdfc6a213a313a313a313a31373f595849803833c20fd436eec234ef752ee1eb4973b0d73d7dd2245d52bd3b752b25e938c31ca28a5a4942975a63c6a14a07c4e1f3e7cf8b87ccc3927631c1dc3c12e0fd3aec9cc3e26fbe039277b5503112b7d8241e2c66fc85c5ad8944f94addae42e6f7aecae4e0c6b0dcaa955d1c9faa87d6144aa49889599936f842fc20aa7492ffef8c793166589475e1c327a8949a989139ec9a536cd59b91293529312cfd4625541b85142fec17869f8692704051b73fe0181da7579d0e18b1891e9c6cac9b967baceba49525f58932cd9861e9707eb8bf549b36e32946c430f4b6af94b6963c551bdbc6c735953e468487f55bdd5f4022b133d8abac5a3f59c5239f9794a2b990a75524e1dafc436b7de8a9d6d28b596b2b3cd7b5724aa31b974db32fc7bef35752939e7ea2b0850848022c4e514ca5f17c775dd0801e51202ca8d1050fa8710d00a91798c41f0e7f97414d9f3f1e6329426459f61ba6eac94bbca312b6b8bfa6ca5ceb5a8a188a5ae83d331eaca4c97da30bf1c8bce42b00df5ca5c4e51f36e35d795a1f56e0ce5a564ead2a326e3cb36137a6a855e464d3ea71a05422f237d9a7caa5c3b1d09eb2e0dab02563ca32a9ad52de454d1ec6bf164e6765214e53c39455977905a2b9ae4dd828560a96d7bb0b2c232acacd0959588a22573141e3d06db80e2e24071b79a47e19996394b8c4c0647a1c990790cb6c17de65c5fb2958c856d88f17e740bcde8ca4a0caa4237026c32748454acc7a02b2a5288a21078c4cbe920fcf10289f7fda054387b4ce1d137cae3ddde0f1a7c856f74e49bfd19ee96f98abb6e41e19b2dd170578caece0c57e4446ed221ba9b5b227277734908405150141edddfa39b0dad68f1577ca389323ea81ea8e48d767541ea108d000020001315000030100c060483b180381134c90f14800e8ea04a6a4c9788c32c88510c21840c31868c8008800080cc6c080a026262995f7dc3f061364b102a9cd224f894cf9989d9d7abe28ca134fed691d4ad167ee535d460ba4b3b008542912ced2c84a21057c24c35ea8e835668ddd855c92ae28cca036ad8204f98cd8d4a09eba371016da3777a644986f1d418b62c215628855282bca18ab33126a14b080503651a0b574709596e2c836ecc29fe1f6d35f617efadef51ce4f04a8f1b3d39bd124f11dbe3d7f69f405d7c013bf68573dcd6bfa20ab17d37187e0e1de63193af1cd10ebd38c2d9fcc42ce148f3f6e53ec6b95350daf7f9d2745bd1b8a5a2cd5517f045ad56bd03b6db36ea27227d637a8fb9c35be9b044aae1ab1301db053e8c03eed3454f8913fd0324cf54752741d82282ad2671b112ce1580c2cca5efbe71e034f615e2f840a559ab449c258ad1f8f666003812e8568a646d8631f38b0201ba8b3a74cc643016d27143bb2b4ba4bbb86d5c4100fae1b062a0a20961852bd12d4653757e87d7f9ad60a770e59727b3765cf160581e70700eeec91e90f8708b8b48a563a446c54e12d4133002fb894f324998dbadc7700928bfa788fe89eabdbdbed94be29879320c987fe2c091d050279322c035c385f492cfbd4a47071ca0e5457181b8f65c660d72148dd24ffb10d04f7e07b87835356d627d7510268816c0fe53f939ab68c8b1ef4c715ee3069a83301a7a635766d1d779850f93ab29208a3cc567e52b49959c926b0e00260859cf1c9e7cd18fd00da180f5a4b9d72175144793be699a27bf98f7f84a6b19b1eed3c02f2d320a4441ecd6ec108b044d38eba94c239471fa77749f56be59e314c9cf15d34d9d24995fa94a1b3f2082e3c5af88009f4fa5bd488fe8c95ce779fba204524039650d30365d255ebe0c978f084aa19b3df7a0ea2a4b4beac9e83315246fd24e5ab948fce4a9958167b2d2b6e6ac820eb10b7c1a1c624302486224a810f7af216907e48dfef4252d6509d1058ee8c51755dc28cfd3ae58acd21abd2de43b0bbc6a41823e3bfa6ca6a87eb1791a49794e33f66dac4bc78fbb72b26b8fb464db81c85cf752bdaaece1287e18527d11683bb0ea8310a2fd36d4d1153a06e8e1716c27f2226e1f575b27e395f525dfd23040df1d4da190082b038b7be5ac13dc1ce8ce17f407340e3d0f7bb04c546a547c13dbdbbe2fda7fb717aba89fee1932b1a4509be1ea624a1ecb9803265e3fde3494a3eb3e80f5342029c660d4a3b240b60d41b0a0c2f5aa26a293680c4f6c80635db8fa3045ac3ea5cd437fc6b667bd95c0a6b2b33f3735074d00d2ecc88e6b63ce4fae1a26b01168998aac0894161d2cfb9be36bebb7c21a44097d544204cbf4f131a2d7ab5af423c13e2cc1fca0475a6e934cf2ca2808485b4fa38b4691c7c3e254c86c8ad63e10e93b007c03c03299ca67fce6f0cb342265c7b665ff2c14cf7e185ba0a0fecf6bfd5cf2399e22235e68f62cff5d5e3eb2086bf80b3daef50a3980cb09132e6b7e92bb16ae16418e8c7ce5454a405e9eaff5a1f097845fc0f6cbaff40bc1bccb58bfcb6d17f28a163788a776908396ed6acc707fc16fa21a3b138cd146406b49df79ab30b39df7f23ce93769374abdf3ea6bc9c592071c8e1c9fab436d3e86e0c9ce954fb5116fcfbcb8b65cbc7125b97fc2c4a52b94eea629f855f0ff8720e619c65e2e8087729e6c1b3748df4d30fb3dde23a7cc6f6af7096736a9865005d80f909239d385948c7a2b61b73002795c6604261c5aea38072aa5cf087ee454b90234301140a6cf3c9e134a1f542e30002dd2f96b5a016c558058b82a53b02df8af6badb6f25dc9933509602a86b97f0d802830b90d86e6ded9b86d301fff3ed63abed1b69ac14c09a93d103f41bb05768070bd50a9c4064b552fa40af5479713b85ba2bb9a1bc1db5d6d7590f98be7b0e65fa260c1f79e39ae6eac39c7b910b7fbedc8ff0237057d54e137606bc7f845a71b858c7eefe3c8829a6ac6b8e4f1f58e29c1ae75be68b656765b899e7431bc533a0524b35f6f6a97732724077295f252d9fa8c206118bba49f474d1a6bd2794ea5df5488a50283735a881357564a07ee934dfbfce1d1b42a29e4cd47f32d3400922c113679381197b86fee9bc794b022c64c82aadf3d476a71971cc20d27cbddb9103b41aa3791c6d232ee02db31f34009b46659d6ee7e267e3764563cf3353aaceadd8b823aaaaac63a4c52041e033909ada7676a5b3941d0c5e0b1fe027b0411ab743bd79e9bfc3d88f7ecf61abd9458af6398bf09f8008873926984da88d0c785be49c10ae1df9c7d181dbeba8b6f46fb48e43924afb9fb985436b6186ac8b50b4d9f8d39ca73a78bb1c78d3346455f22d600e42d23cd222baa15eed22b03689b3d0309efaafc717274d6dd3ac5aa74ea90838902f7ee97a3d38106e4898adbbab313b84d2f5ce5d91127f533dfc75b4a1d8206ebb84f69e4c09a264e95230d40fd1c10d62771cfd983954581d7f58f3b74a463da1003fa70d2e1217886aaa4fcb1e7c6b55b1f5c922777328eeea8fadd6386fe65f459dbd721c67984e52644a16aaa3baef3bcc9f101b56e5c84186844109bcaa5724517893f377b28a9ce9cb0f121b0986c723c2c24e171f453c3852d0336d2f6f857cf87e917d6c920faad68ecdc99b82dae427d9774442c4771f394989e8345e0ea33b97dcc8539833da0ad9199cc979c4755f169659bc710b1fd93605b819d0d546f07e15fcfa750a5871e2e2ce71d9674bedc3d322e3fa21c1646daaba0ccbbcd2df5dde784521a96dd200e34400e3907ee9e39d9fa10b4893eb92f11df667bf18e47d92e17a7eb4492ec866f5049f968248d9e9ba697f77abc0f91e3d39127769ca5271736e9f683e422a027d998df4822c4e5d08d32536843270ef90b762c8cd4fd2b51c745dcba91fda233736599603c6e30ab9ec5ee2189d5ac8b988dcc66de423717a30a420ed0cd4fc78dbc0890a08da684cbe894a7a663554d96b9ebd688ac43521d9a3999aa20ce14506a7d99c28c9f37588297f803bb3b4b740030f99d77823d2d0bb1af527ab02fe35aa3fd9a17dc643a4ca8a28c6e2ea8a85f1986ef0fe2a5f23fafae08b5e54f93ee2ef7016128c0b9333b7b43e062548c222902b7fda8dc87adae41bb0708daf082b4c76e747e92283decbc40376bf48245edc5a30d6e5cc84a83125a155296fb91207630f88e46024194976443cb4add453427e6244ca80822bb9759240178000d132232a07cd256b1bf7af0921a8f5920a7e19ccfa1a3e3bcb75b0d67e98b88cd97f2c8b793a9940616c3c3f4b3656582353b362fb08178793f581113903024d37e4eb7ff4f89fd418d184bf8b6b6e45618f6523823d283ec12d19ff5725235c3f3c1fe3454c83336a34d70577c932245f3085108a7b56c413137b7b3fd0a933b917abed6bb2f72a6dcc49a5ce89e129c0d7abc6d3d1056a6a07d3113c16bb7847b9239d32fc16a117c9e9eef72503b76b5ff7449cb4f4b5a03dc59fb5465b833999279de7704aae65fa67eb8b4c9b9b95a70d8f63a625be00c7d37d19beb2c946cdd3c43cc9ed975709fce58ebbbf1ca65c5903984fe1aa5402cc712e589ba3183be66c71824f47b6be50750e5d815218ee1ced98a4c36be9d9bfb76460ce40aa7253e682146e1e32aff144ac833818e98a8bab69dcd492c3ff68dd760036675cc39f39f5d155aaf23cc2301d25f4017bdb8741c094e83a69ca5acf4ddd42247ee68aa279679e2972b16591805b44dea0b7d75d3b8501bca11197309cfebb90aa381ae3dd52a44b6fd94abdd19462a7654f6cdf5021102cf63a34f800a2210a78e71bbec56029dc2ddc1fbc10d27ff83575e6c1b76c7c4387d46d21ffed722bc6a81e1c70b2e559d9bdb255f68a99d7527aafb27b419e102f253bca064bb81cb2e3c8a394206de01b0dc4ea2874cabedd87ea77afd86219858190e980bfdbf231458bdb37ea2332588ca628f312a13b295a042df84fe47560fc93a0fa8657f54f0d73dcd654d74f508130f82d011048d734d31883a737dc411cdf5f6867b0e250f6d75b1260dc01419ff5e8ea1e23183803223400f1a2219e6680a9dfb1f7b483d1667bedffd86b5afeea6326a311b921a58628c9cc45bd32137f9c889abe0990f1d31da5709343a6a9a229ec87f10f4dd440af4baf48dc830dd8226a7f0d3ebb873c6cdd54bc39dd1d85f8634e452dbeea363606547a727727dffbbdd08e9785401d53f9911bdfcf0fdf82724da26ccd08aa97ca43c1d022366722523dd1c946f86ca473d1ff3afcf12f72c37d51197c0f3e9421bdb7ec88da166c20b490cb031f35243613e8dc588efcf4c6ea00feacf097eebc5573e2b4f670ac18399806ef375efce33f8a1a27c08cba9fd5e12ee614d3d0094a688772d6a7478f3b1e3d44206465ca1ae4908968c08260531bc9bb906c2818461040b7547b944c605d4bbacbb49a0ec10b7e9bfcbebaa9713f61fe244be4c01ea42a83dd0f1058c394e6ea4eedbc9594e5e4b2f1f1a3fa156a8448b4a04f844c87b5c5e82ff1800145aa0c7598315610046eaf426776b723788f0f3722fbfb30ac75757511a36262c98187b09b64324698187e22c55c9fa4b8ae8afb5f8e0717f50220f73032cbfcd1a3a27a996a3ccd83a7899223770440e189454cf7f4fca09194c8e916f526a9403204c138a9d5c485b3983d3072d61e95f5abe21ffb3349b8f5beff76da515a67eb26472ec1b8d1b502174abb650281419d33a84b25696000c7c4f669e3238532fdda31920cd334e62b410f2b99374efb6b7705e812b2af2e552f7e0339171b4031f2b5b113fdc3dae0ddbaf695ca68bda8d68cf1bbd432b55016a99bf6f5aa3c3dfd084e99c223e4c4c0f86424d1b1c1bc4c48ae565f640082e56e87f4eb126b11eef4f13e342cb8d514e3bbf610a0e68fb8ddb5793456154e2771a2a42651d35ddd2f3bbcb380a42274a162a2205686535fabd42505013ec11db280c5c7af3e048287ef28f55f03820d2af1c1f4e81bae524a3940833e0644ec04d82f51bdf89860de35be2ff48992b59ab531ffc20897ee47e57a449c0af34b2cb2a14b59c9ed646d3cdc7582f246d64e9e63ed68096d9d3265f1986c9618b6c519ee053d50810a71824d4c2a45320ca780fa8b0029b57de7c5d5d6e3378220347bc005518038f8dd876b41ea5ab36f70ff84e72c2cfef2efaeb2fdb9354c9d44626b59f78a24442ec55541a1d7f368f5c680032e1b6ae36eeb71dd45933bcb66be17b131c5e0f12cf9b271a7e33d16d0e562846654f2fc5e62146cc8944595663ce0301ebb19e1e1c5185aa8c9269296f103dc0bb2e748072180ae3fd19df21898cda76109e059317fddadd3aec190a947b17752d61b2d8bbdee6306689b8aedcfbbe86ec326e6adee2352383c49cadce56c377198aea8a49b0972e450905910e99ee71579dd40c21210e35bc74d593e1096bee36cdcf05e46a8c1eff2e67ff98d835f63da0d5871ac8cbfd3ce69f8b60272075c3645118c11a35663a2c59f0bc0ec15c58f9a2b8bc68fcd2be0a1f100bf687cd4abde48da17ba30eef33c0114397e7552b49d254f0f8d3f30649526adeb42a4b35394e705ad633e9eedac1a06c907c6b881532567283874488cd1fe6cd670a8baecde95d938e3b58177bd4e9a8ab305e44e4c856e03fac6f41d9aff5d8eb5e2936684fb93a6c1f1a468eae1878da4b890f89ca8ef450dc3e2fc514b18990dc865cd837594adf1268813999f37b3025a4aa47795fc0cf1854f077631ad5ae9f7c48379c300a395f71020f7a03d987092ed3b19c916d39ed68044306d043328169c50fee3ecc2e65798513640dc26b9f060311a563453d12f5f481ebfa83ab2714a6c54f6dcc399e516256e382b61629b185adcdba8126201d2340b73634f0b29348b4433020370f08d937a4b4ead6f35649b0bd0902b3a73adc9b7aa27f402a330903da95e03b898e221d5d0fdba3108ac38c75d9be439ffe906b409e6f6e5081b8982173825e510e86274e12bf89202f235f546f13ebb59f83d846e418d9060ef03f4b36365a5c4d37435f7c559ffa91f4a4ceee11f91a08e4590f928c75466592078e85347a96bbde727d1c91fa708a1b15e115b56efb58390170614f9614111981bf9a6bf336c2e8228a0c22c7f495e886f8f1c28a0f7b5413755a77c0eacfa9328ab53b8714ff5c249c6f6e9d5635537b1e0f93d1bc2919ed7c7901cb0c67e98ae00949c3411e88cd93ae20f7ccf35e856ee268ef9c743f0fe828f6999ea2fcb734bc0e4599c0cc5af46c6ddfa4b59e58cddd1e73105a8441c354a45511137b7aa42f7e8631914754270c58271ac99f0cb7301c6d452e859f92f0768547cce4ba6f2e82ce8977d8794d58b1f8571db8e0490ea90031426ad074a5d4cc814a1e4ab862254a89129425b9b764a853cf274ea180d26edfc36539fbd20bd11d963600afccbfb774e60b44424c3871313fb62b820510a0bae07f5d37950008ec3c9901257f2ea19b868580898988960c3df4d8c49d0043b3c863c57f761301ceaf3a6001d5402192004d9bddc086fdf974f2e5606c2012a4730b181f7d6eb6629f5a68bfcc1853ec962dd82449dff04da2325f2d46c37b1925c86e1d77c07485e7cfcc41710e7ca49e458b28704b28f951ea200761ec3188f51274cb3fd241cfa55224adacd95edb329e9d9150b26379520d5d907a7b958511b81b81da9a0922c3cf14817a2736e14d6eada46c1bd6a08b7663853bd0e4c0a3f5cb508ebbe2c0bdae641ccc4d76b5b4d6ee2b44ec7eaeb235185a35a0ff08f245d4c12d2d0e34a4800120bcf89dd853fed0fc6e2b46750b617419b8d07d12202082a4635ef9ac2de3bc80c6231b8b1b285f64cf413a47c25088a74ca7df0072efa9f6d5d619a9a23d75e9044feab6db14cf51559146881a2423e640d897f22c3809811c515bde6f810957a843c0e2d6ddc07a8a0ce1958c2220e230f56dc8bcf616c2383d919cece2c5b232b7893e8afb99deb7ec7101a927ddaba014d185c467259a82fee1070e064e2ebc75897249bbbff7a5f4908ed954ef4365788a74811a2251d0edab9a19db86e0b58c725fe574c130bef71e9c2f6d503e09be200e488cf2044516d4c62b51a0d894606d91576d7b8f72da2d4891f0e69fdea8b8f9046e4727b7a0ddf4b4d5bb1dc5815579bb70baed6616d44322629cba0e5b24182cc2f21a4c2ed183a7594d55896014fbaf3fd2cef131c23b58803091e9dfcc7f2ad248e444f69fef4cb3ad997539f327f819062a90ce495b72b94ca3b598bbba7d35a9a9b0293502b50b598fd45fe346387fb801a435d5f673561d7cb1af9c25cba7e06e4864341f81f1b635eedfed9821732f42a60e40b35deb7e51fa3d144d731d71ec81aeb77ea6df1e0430ff9f32809e4894d7ad671570b18a4126f5ba56f0d8fe1e89d42dbbf3237a265b0e1245da14f26a753645a6e85b047bf2e401780df4395efc8059110c102d9745472b68af04645571b626938b55c008d4370655a652204bed0080f93101e5f5ee2b77e80ac105f0d652d4263cb4d5df1b227251ebcc7c6e3c72ec85f9b3284d0397b2e2c9dafd52f5fc8d0fd362e3726c3c98eccb56962cd97e58fda6237717e0000a3b4e67906f7231946aa135ef0580bbb62fa894f5f1aecfa3e526f4eb9fd3fcd8e07e910c748b5f1e253a728416dd3b49587271e8ddae7182fc2958ced643a48525198e9ad64afb6a596fd133964e33a4b15c6298eea5c136db746b7ca628b147244c93ea3b53267001b9caf3c93c2301bee9f6ead6eb12557fdc68a918a5e21ae4a571357b82336d5059470aa0edd2f2e9c62598c399ae3a6f18e1ee8fc9fe348e87d81b18ea71a9e3a2cf244d67264066563bb46d39341fc312c9a2c960e846d98458c17f44d89fbee3c534324c2df6d46c2126667ad7dd8bb76194b3c62230d681901462e935eb9997f786a5800dc158c7ce41d0a68875841fb7c8126583778d4f789adeaa0a3350b9ac6d86491eb248486781564688a701445624b56ba6b69004b9c9e0c98e9dd44cad4ed11792b3fc2b847330adf82a4ac86f8a480d4d33d8af3e4b3ddc13ee3b95d1081e75e295760634a085f1180e5c2b30d09f2d5c2e8a0c408caea7edd07c4e16d11c527362467d587cbf3f3ce1b426a56b0d5c90dafbab34876b424387588d507a17009f3fd4f6f2dbe28f99240f0ba1d6be3cf6ed3fea416b29c0dbe6302c99d760b7fcc0511a12fcd035ca0dc8cd5fd0d6c182684bca149e339202a42a6d6a0743704d6f19f35be458326f4d11eb3e41a32871e78069b731940f89380721c710a3caaee3aabbae826008a22dd5e4f0f2e86e33d005e3692cef9a8321cfc2316775613e635514400c9462180185bb4582d3e18f83b94a407662dc284d4dbcde698dc66e362edc8423b764cff518ab26564042e22310bf0882b558dd52af0fecf93205503db3664a266dd45a168d8ce792c6b4fc09819a350415461fb2e0db7c1e866fa1fd492100fc6027b25c7441b00583a2c2a2403f2917ac4a350cce001bfacf260c769cb13a44f842debfe6ddfd279ce59ffed17bcc0e0494af8483e2867300856bb1a88b68d6c60c8ce3c9425868b39f7339bbdff732fb666c4b135f89f4ba412a804fbe09324222641ef9cc17acd90353bd570e1470ee91d354e26cace88b0b21ff578af2501994097850ba27daa490277c4308cbe5248ecc256d3dde23d7ff020a866bd1a09b42b87a24b41a28c06bc9b57dde569a6eb88be32bcc8ed4074ef3ce477d30ad2eddf0a787c4b246f9581afebe67af67f6dd98e61fc904c9006748e083c2f7f7e4b24e49ecebe5a214464fc9a89d912d8c4f66ce0739501001726d4d612f31037030f6d6e5b92ef9022c7d17961d416d7dbb62da438fd8a0c18b90900a434f7fa88946857aadb29765229650675d70000a09cf40136abd9b373f6afe1ecb3ca9bb20f388d0d752b75dfd5a97649660343aef0c33c1e94ce135cca1612abd0f73d612247ab91ce62ae0a3f167c6d6ee6da1964c94af51e1729f21847e1cf949febebb8f830d984dc0bf69c2b0093b01eb48428e33c4d2c801457126e73ba91bd874cf6f1375b506af307a203da386c3e348ea12dc78f4fe42d072dde70eeb452e8f7b7f63076607f6fe407301f5cacd2e64bcf4e88dfa51b99db8a010ac9f896bb1fe020c919a34979a7e48de1a66536d13207c6140b315bfa78e04b837e7103fc7941ea3abefc983743d34102d6165d60bc6c13dbea4dfdef81015ab801e8ea0485fbbd08347e2354c3d304c0b8e55695dcc5240447b770347d999fba47d2f88f4a98f05b4004e3f7889c53de1ef353fa5b4a02154a1954de4a4d1fdb3f5b915b728034d60b7fada566e7b4e6e10b1ac6e38c523dfa4118ebbe28f291d1b152d5cddc9656cf9c04ebe8eeb7e570815fdd4f42f172347bd8f6dcc747611e2048f67e112089c697192790745d253f63aee89cf9b68a1ce5dddd54167fb3216f919daec5e4dd9effd404a035038f45dc2453e8191cc94acd9ed29eaafb6229dfae5bc398f12433f8a88a09f45a4929a66fe4fffe9fb884945dcedb8392228d62365fda8ec25bcd8d7052c2455b9d471846b316e4e31407ebf682df2c52f850a333bd208ae510e0b7c79a1fd5ded55ff27559b38016da3fd49fe0d72371651f8805797079ef512e69735fedf85d97e87d1f2973c5b3fd1303b0d5ca6047aff7b1be68605c228bf86c369ffeb262420432d57494952bb225cf4e468f7aa72a85486dc4755a78995bc0706ff94df9466aec0951940c259edacc1a7c3134697c04f6bf025258765353bd0d736dee0020b1caf50969d3354e5cbf18d3fbc3e88212daa77fa507ed65f3df9c09361670de7e00c4748c6dc982b29853d1c1723abf3e932fcd9a467ac32cdac7228820358e81717144ea93c1e0bc7630e856270b83601dbc558ecbd56b6d002e7aca95381d106ca696ab2d2e95d4fafbcfb2475a67dda02d0507dda66e2c8816c100a8eb35033b2690c19df9d4272e72b1ca0f96cebbbcbcd99fe0eb0bef373b31a43c5288e19f3e9b01e21c1bf8ab9b126ea21fa5534416cf693c628a8697d7d1dc7dad7f866cdd4f0513ed9a12260f6a3cb167c3dff0340776f181ffb71fa6db5833f0a39c00a359e6530f1e68daa2b620e233040d00c457207ac0cf36c9486ffec20bc2610e66880aef46d1c75fc8e07a55207262374d0750025741847075ce1d8c78939459c98b223e27481460bfeacdb450a316e43e1a25c89100eb4fdc767d8b91cae516bc03e1053f9ba8e698a440f7aa4ddcecaf381ba46dbb572ecb1fdb2bbf08b55c118a6d24dba5af399778945daf0cfde248bac0440f92cd8ea5e5683b613526cc55fa9483720eb669b59d1dfab241d129dd4bfd341978fbeb5598aa2b9547bcc29460383c222edcbced03ddbb324dcecddda98bc55b1faa7b0a6f9760d80618b60a7e85d3aeb30e28ea1ca9cc2c78ec2299ee0d49904a8e8389f108a5f0872b9fd30638d94bd4a1f2e8cb545bd0f4777dc4c44860844da1f983337bc7cea3679e9277d9c171174b7043fa2d2c2d83635657dd0ff1446382742d3c78e39968065f8b5f9b3be3e0141749e9427d9b20cd797c3bc0844157dc00605967ad5a7ef8448d07a769f1a1ad4f6ab53f1cd9fe693151a109602b22754b7b69c33dbeee9ce2cad705623c14e9d4b8fbda8c3828f65c9539be4ec21d2554e7613a9a3737f3dc24bf0ab25572149af469ca7bbfa1cee3b974433a61b2621b6dcb1c3943ff6b3acc1a7ff9c81e8ea3ced069939d4e42ccc13c77574689e28a223ac4c5498627a05a7e7b8db5816e8f81a5f4652c9df5865049cf895a9aabfe943feb8075fb1dbff235daf2ecfd6232f0250a0f762e35c028bddfe2b62a92c72f10df20a123f11c8d03edfde687bf8213a1ed3f653ebf895e2ea961bc97f952035b1c7c9984425cff0c0ef99c515bfc27c97d55e6aa3571549daa0aac32509492f091f08a0233ad84ac4635032cc2685984d7ef4caeb0965683a945f216e3ba4e7fafda9aa3879902b5d9c9f08d6e75de03317330b5701da5d52be8ca23fd216a517a4943a255ada71035f99cf641ab0c5b127f7acef9f4ec7b01145bd04c7619d4381849dde276a7aa5d25010b8e75e355c309156606e7128335fb85694215f3a5c9b814b47b341091026171688de4fe1f92e6c0abcbac4c31f757e596c12a991656f93d8132c54b05fb7d42ec40132b928fa38f1326364760c8fef47744aa0e49b27c886075ddd02dfbd416623b08aee99831e93958b730a60df6cec854d5ffba4a63d09bb451569082fe8e574102ed2b1fac505ccac75a10f40a41ad403cba0843ac4bd2f8af23e6563d82fd91b79c357e758a25ae4ba8cce1c34d4c8317bb333d497876065a9e1fb815e37ce01c78862500772f3445cb4a4aee20f3ccc3f80a260f68c7b40f41bf9c4ca83df215a6de73f598767de790ff9988a4fbe89a0346732c40346218ad564c1e4448e0b5549f68c13c4efd01317cae6db92f318c4f7f815ced1747928b1c057827f5719fc6dbddee2483643ae47e02d058968fb9cf13077bf2278699c2ab108cab7e497c20c07e231c2ef36d0b4e897e374a6af736bd24995d1a5f10b67b229028b59a274d43b3800f22ef0cabe9fec5ddaa540c2d6331de77fd8ce9a1917cdb3f22447aa493a1844f629244348431cfa8e7a6e16224716ac44529fdd37ea0189d225f489e66ac969d4eccfa555ffad31220d5cde85a920a2ec86df8f4083344fa66dd2ca2e6fc64e800d89939d71c2d51df9c1103bdbca57372f64646c69c0dd639ca10c1017a6ab32c51999665cc0edd3ee56751915b00212a89955839de07ce0133260e4f4977dbe553aba8056d457b3a25f302e34702f8b58173811d4b74a619a57525364e56fe170d8ac47853aa99eaa3767c9f5bdeb572d4348bd6b4bde996ba5e9829b9862aa36c942aa2b85f5aae8f543a47cb7228bae2955ef2ec3e22e49892d6e3f5a09b0730c9e76b6ff85669aa30f913edcb422eab3d63122af68586d28dc9ee47a383f0117a1210714505864888d26fc8e8637c6715f4bec837e4b15f591887c459017fcaf5d3fd70158966233a4860ff381cdf1578a23a630953e250b65b22922e05b7c033a48d3b6d68ebaa6a9de9dd40272551632ea2afa9a060c1be94ce1e369d7d849d9d10415d7177be15603a682dc4c01da9cddb5f092982c6e7ff45ff7b5d0df1d1c13fabf2f6809d31b0a2892148c5c3fc92f9871517a4ca50b80b5927156570c9116d15c52f5de875724c5a69d00b1876120e9cfdd48d88ec930783e7d7e0180be2ab3de286c1db015e551ad08654facf8334c0d9c902aac38f0a1a66d8f38e8314a0355d6ff5342740402d00d53e8453f860f56cd0ef9373c55990420d921d5364c812a6419e44fe9ffa1daf0f063c16fc862592ddf83ffd773c00ed0ce83d942310fe5f18544f01bbbc6d5e159afe8c67bc4f5b5be595335464c2cc41c47c454d3b944f5950f6948ce0a6c86f8a9eb2b408c851e732f6da36a79851605c3ec8bd96039c046eee2d0ef836953752522295a936e659e521d73196d8c76b2fe4d3c939cd86e47e1235756a5c3449e7596597f978f3c99c3fdd6b6defa5616c9077e96264433405aa6ab4f9a2429e28876fb40420355e22c5a54c38c34c6c783cf4f0f5e8eae2d3be34ab0a2f98343c02d31b361c0efc990e847a028e0e7b7f4bd92f6fc7dddf79fe5ed80a18d685030c9d05a111793b02a6e9ef9b2e4a87c7764c39c5f326bb8feddc69e92bc90856f73305e2c9ef399d8332937d32022e0a946188c8c849b8a7975159ff93ea4b89cc7338ae2c7a7f3ed0b8d4a884b8c8795fb760d959ba3d1717998d50e1f95c021a178035940c33f71097b7d0d41bb5c8724570c45998b345bcbfca2623c71997b3663cbd03e5dae25e417ce42b56a0594712f3febf578e468ce6016abcb4b74fb3f38ac9061ec1397b465e33507b24a8b33f761e2a92c42c7298046f07b079a7afdc5da77d702970dd5c3efb805b437dd05715b64f52ddbbeefabb356d271f8b4c3db39d9614ceaf356f676443396001df22c4ff67a7fb227dd6a37edf7576c0c63a405c605aed2713de65dedc52c9246312c5bc850c0b06b480a62db872f1dff31df676b550c5fca5ff0bec1604a8c9c0d5551ca6ba52df275eb97a5c792d8cda278383a766f9ba57d1bc5db05bff767e9a9505d7dda820f76f2d8691ebd0cba10d0b5b6b76dc7a8e1ab682a35a89c3d31d235dfb4d1c00e9a25505a61b8afb4c758fd6d2fdcd9ea03e6079a8e4a0f9e0a071ef4bb11e52515d433641f087c90dbde3022a93928baed41b0735c5d5ea9bb4012dd393c5a49da8ff0c462eaef9f8bf587fbae1b652604b0bf692f59b0f3fb6887f7e2c24bc1f740f7890860c1ee4a93de2c2ba2178c6e21bfb760f20f43cc2ce331f17e62f8ccb27d3ea084125a163c3b24fbf19ff9c1bde3f73b7b57df88ac66c2c71b431fdcf0d9b43fc24950ca0dd8664fc9280a942c9e9d9c12a281516b8279eb786386a62387ad898607a9afa9003e96fc7328f7e2f9f1267ca1f727d463e4f26f5e327aa8e6b836bf488994cc6e47d026e9f4322c96e472058a146939bd4f5214dc1671c17c16586260d8909f30a9f7650dd055e9a1d6955e3a8206e9b997cb08096b9937b68974ea31cbd3678ff27f8d056c9715ac5c58cd0ed445a2826488debbb198f8245870f7f122c11dad097068c21cd278ac01a47e774c86fe4f8f2020b4dd5c1af527f5d73e57eb72fde5f4a4a4717e3356866578d0239d8272a1dff14dbf32c25515186c7f13c813ac616de048fcbaa25620c524dc47deccdebbde09159df2d8892991812351034bfecdcdcf948f984a19203d22003798fcd19934f7b98d7c6741959da44e0ef3be5c854de85a40008d0b1c743857f00681998d432839a69fd6052fe312c12762905eae0476a3794c278c222ea47baabbe0802f50b9b7fe4da2b96866806a3cd52f2867d150f2d89b2ade2b9bd8de55f02511a7c7a9260637213a08f492e0b3b607897e1dc724e584fac365895cfb4bf1009f88bf9bc0529e1bea327acd81039ddd5f46084e0f92c6ca7997b8584761e62979905d53a75ffb0a515016c8dba320dad8af8fd1fce116b87fb6103a2a7cd435b3f727d9fed8d126d433348df60b69327725b38d67a2683f538c0e4c97914ecdbf733043192a45f6d38343d8ad5845cc5712019712d7a5c0799637a5d93b3c52ab3e45e100e19a96e06bf211d28f5fcdf4b02b06f6eac14496d2346bbd821aee6c8b164518a596d6ea4a76ca0cbf34136fb2eefca0740dc2fd1c0e5d8d9683b3be3d126c7b002ef975b87581abded67975e87040d58d0a3be1f99949dd8338cdb5528d2cc12f4d228b7013c7162053db662a702093e82385a8331dfe9cb4829093676e93600eb099d03d82d51a56b699ba87e1a0adf438f212b44b9112e49326582bd78c360ebb23b0f9498704d8492760b35c32b5492ae22ad0005d66d4150eb60664eb712d12132f8af4705b09534c96380cfd6bab525ab1703bc46c3768de739062c6fe63fa7fb2cea630936a42aa553a976acdad634ca685f34f380c38bb662a80a827c3de2c3961a77f929242e72492274fab8873c1e446bf6ac616ddc9f76449cd486221c2af91f1772a8676df37f10832e51d80b7d0d3d6f4fcca1841ef6e16640b70759070156a33ab989fa1d68d47e5cb6d8736f5d0f73a8b1016a4c14169f650cfc253dc247ef4cb6838667e4f4b4fc4a540c09f9ee130cf3234e6550bc49fe1997883c66d4f71387aceac144cb0a1d6654fcc79a93aaf93f4736a384a0b4529e3e1a9da798537d382b27f978484d1813bfffec52c9bcef7438e8fbbf503d543e1b665ab3e86b41ec43305072206eb4ee5db95870b37e0bd675e66144d914c082cb5e99cc456ef9a9f140d48a1af78c469a0b8bfd2765e690b0e8dcf6b2d7a3db0ea3d01f5356deca49ada28a20790120c5cb727f8de66de057a4685d7778c2b34e6bd0286a745bb4191c5cf7ca5b800f2ca3c0405ce85f9a3044ea3ed0821434ef5740a6341e9687133615089dcf0b8bda1ef1508f1f789bda69743e1393532e8853c17f7c6c557dcc7c74d3006e9af13a7533f49153acf0279b974f0f8a580080cb61ff30001c0f713492fe6579f609fb848e9d1c5f862dc5ef59edc3786316ab698b1ababbbd46c4c49f83fa57e58feea606d7358bc28f48336c870014071d27ceb659fcfe91d7ea6631c39683884189c136635ee63f3026458238c7dd47f24c2bf2f6bb58e43777572f16938dbfea974145f2399db6fa640761faa7d60db9ed28fec122e0db74ac9bf410307aee5714502c329a4926f12af82b1283469339032c666e3be1545ad109a612b058624a78cf24d805bdce6dea5d0d4d60316a6a3bf80ccdea0659faeab7038b7106006c2de6fe3f8778c5c4cb6b9e13f2d778c58148f7bf93578cd540145d1194cfc962a52b3ea595011b7552c75404e54a0a6ca887ef3a1994d86245693312570600c8b988a6a993fe0034d59c98a4b3340fddc619710e6940884f4e06f469c2912c020873f883e85c3fec0f6085c9252bcb48ae0f791c218e541682ca09d59bff7661c5e67303f81f4fdb6d09b1e21b4702ce1831bcb503172285b3f9b467ee0731057cdc5e7b1c622b94e35772545730cc63fa1a9b58353b46364631a5f9aa089699b601e5d457075bfff46bbbb162ec1c222a23f95e704e31326858a671b1f6d5cd93b9e9507764f1e98facb987663ba544dccd762759370ab1dddb72c3062ab6066463ca2d15123c608af73853b46014c6b39f3b752215565a7364d38899a25790ed3e49a47bc8640aaa2723978ab2c846b2a614bf964a91bff52f032b5b0c9a44b22761214b72dcfa1246af1b7c70913e613a97777ce342bb8950e5e493b0158037c62413d43218e8aceba3db83ef224af2724823c486c27f9470f68115e5d677ff7172ae9f2af24df045826e0f9996d284daa7889aab381432ed6f88b48783730a7b0e0d4a77ac3e3b1dde044bf2bb76274b0d99c0178ab5f0acb00f03051a32e92f889f98a8699298692f8aebf3a04755d52e399a0c8584043776ec30d91ff6e6e64e4149a0e27fd9ccf5b6e0b175f629ca7ae9e76a3e075881fa9ab103faafe217284a44e8470c4a937daa9f66a90d3f3732c1c9411a21ed988ef595d3610851e489c6329928fbabeca39df6dce58b7426661532d42b4e564e7150105d6ee91bd1e18561527c20d1408e8747ec2280630ef565c9675d21ee0ae9a62ec72007fd606b3ce3557a8102650ac0d42f6d4cde5f2c5ea2aea909d5ab96b1215ddc26f32af4b301d240a3a6106de35d8074b033600e38cc4cbda2dc0c483fc377d2b8a27366eed47646205dc6a217bd6ba85308a04803e7b0798f4601fb0aeda827fdc1658733dc38baa1dee1c40a6aee7fa43216f3b2acf8434591c0a131783af728609183fb8759e285da75991edcbf2e0836fc147fcea20824f223b0a50e318ceb2c1d1b8cbabada0c2cfd6d15d27ff470a3866cd226b7833b805a4833f6e38aac114c40adf52386f4dd938216729ce9d7d7a68476c052204e5eb5b6839f63029bd3fb894ed923ab1dbbe9f2fcc889b7c853ba6eaa3136756a8c13c96d8c99adb0c181f260c10c841e565aae20300ee8c163150f473be8d1457abb9bca7bd56a6fffce8f5b0d55d8520862d657653c5f405a5418b88df85749eeef8c306641667f672d7c6f010e2786fdc33529ed6474623d3c3fc4de806c1f154bc7d5f761090bd9129a7592b707a8b918fb8fe271e2b5bd5ff3e0d630cffe2c9e94c4cb4b2806326c69c20760a422cae77c1deae7c54b45dd43571bd5b5ce03f662c5b05989bb3ab144dd4d8600f53a8f0239f4de13e835cfad604aaa2a9ef6effc08a3270e7116d36b39ff2e51d360690c0612e318ba065ab95c6f0d0f3bd2f98fb4eb663f68d055518a69a9bf73c459cf491b40b087a16c0fa78451e37ae937bb940f82b9df0cbbe1c568d1195f5731cdd972953b4484eee6e197065c95fde7209e3e3e5aef75193332a88c581b3ac5d7c3d0c3aa2792f5e2449f4668e32b2b220ff965261e1632bc89e3a5eaee41b594bece66df098b626bc00cfbf49fe2b0e81af755c5a7fcc9212dba4881a31767ea2c43049c33723ddd1b0bc5d58b35e4190d2d9ab3f8693a1e1a6074cb533eb5ab35c96f16c944775feb2ce144754c6d98506b2bc301e890a83758168e708258a2c2686f6bc66bf67b26d83eae92745d08d88056830214a11f88c569d35ed6a660e9a4c2d44edd3f2872f21ec4ff8d8f0de0c4fa6e5cced8f9d4f962f7fec38080c28399581ab84fbf712ec7dfc1963b1d8a246eb5ba4b6f0ae0eb6963a4a66270e18797603eab844eb94cb37ef837123b59aaea67bf09688f61f44e14fb56833a4b2259a5af914ca61f7cbaaab66fe810fcb97bbf092e41448661857b0a992dd930263ca23caeb82aa9d05e4565bd51f10e5b2824fa7e622a9c034c89bc6af1c0acb16eb7f783b9b06372dd715d9634267fdaa05bfbc9833bb984ed074d84e306ab5cbc9fa4aec792c7bab69f13761977ea90f7b31f7a386f6eaebc9883ad86faba4f3d915bf324affc391b0e1f67e213594a4ca259d1ee92a96e4d984fe4a6aed46e31085c327f663a150b456069d899357f8efd76750cc3775ebb4daab503ef1645f6e6f150f025280da97eccf58c4b4287a13925b86f37f4fc391957e42d04789efe224645b6c1fad34f0046eb0f64a03e9b097821094699a380ec956445867bc25b74f498d729cf66fe707900f5de70151cc31d38f62fc1fed42e1c021f418b3cb3eb836940a2479cc305a2fe001560530764c469a23ed6a89d89c6b0268c706b9147aa01cd330c84fda197d322cf938bae83d3769755ab3144039f59cc8a1567b33fef5f7fc020f14ff48e6484b9e972e4c523d778c714e1f5dd9e030d72e1ec22afe58b04b7f33a28bd39e45438bb63a9b3d050312a34c09eef51d228066e40317226518a91452d3a90b63a0f0e60ff49e5a08b54fb2a2423117f35c948f721faefa0040897570273c730ffcc365baae658c991de76b2f2a1d27f362c8f6f52c26d8d8c61219bc9eebaf643d9fffc589ea7f1305861b0720d355a432af30957f9d36523b1221cd2fb426e94d14f82073259f36ad424fbd424579ec20a38eeba68f8313d72e4a11ec813bc1cdfbe18725fbb71545bb4802e7c6a2f62700e1109f878ff87a6d96283dccdf615a0f1e6267b1a3ced268b840faa0435a4baa7a2b2dfcefe91a9c668d587b5e0828c81fc2fc9285bda90bc6441a6a45bc0dce3288b2c1e6c4854ed7b89fd8607a9ab089bb1d84a35915203cd56535dfdf425a2e1f64a9e8bcaf32630b5f216ca136656444dc3eb5df7b248d63a70fdf73bedd0dc475112e343813a8d049eb28772c1702986581e6527e429ce67591d6ea5cd0baaad702815448b8240a54b59ca49d8b69d54053783b7053008d074df7970b6a30032e09b20ca2264fda9cbad1be159c878218addcd697a048a2b3ab6a0766a8c971d7b636bc736525ff8586d4bd9057dd01f775dda29e4a8bfdc4f34181870df81994153b03e23c310cf4cb7d41aa32d5861343d8ab32a584f8e134f882141050a505cb786fc9f3e2fe56ac2b776ae453028dcfeb8c4f3c392131f599e0edb46a0dd069144a3ec031323931bac37c8c8bed84f649248d9fa4bb695001eee2b4dcdffb9041ffc40a9b61198ab830088e0e89ab026c0bda19a7cabcb8fa9bebb3dc496b97dc1bc805150e56a4a229785444b3412817b36e461518711d9778c599145abf3d6d9d7bc100c76079e263d55e26460deb9eeebcbd4d523710d925aaa8f98b8594e1a8f6d67b75deda36ecbf5818766e2ad99b4ef3f1d391b62c5389373abf06a7b9d12270c5a64cab4928149629c4d82e55edebe2da33720633024d971351ea0dc7f63facd3db4256569cbf89f83b1f1038f9e7c1a713451822fd52c25e6cd5a96f16ecc6fbc28caa60946ba209d9172a5b439284b3d957707b04dc748fe1ea519a8f6707732fefacdc55e97679a3a021861a177afc0d249fa4d2bc198bb54e17da150fa946e7d98ec55ac728f1eafe5073cf1b5621560eaed6d8b958e3980b335a1d176bc5614a470b6cf87e6e53de57a71be766a9ec119562c1f0458b76760bd29c50c50c28ac6ec4767e31b585fc19e48b807ddf325037ad1ecc375437a057806b58ab40a41b2c3bec17320f3a3d4be2363589924d6e2280c068e77b60c6c7d25c727fd9651e2bf413b87204a846586d56f3481454b7e3a132703d1517765445292f8f2238eca7b5d123d73afeac362aba791a7bb45e441e336d9170fc6ccaa087fd1d27c226983f873861b68836e5fbe115088f81f3d27109a014c4e13df9b9505a609b1a1937daa1861250d40286458d1d9cf4d5ea01d30d2497d46d27cc52d30ca3052ef6562977172da932fe3759611671c25387d2f568d2b02687c31b665abe4a0a6982a2c06fac6e361bc5a45427c84ef0e36a8c15bf2bad721ca0afa9f32a23363a8b2f133f914eae4bbd2c2c8432066c74a2a1c0052f799b516ba9e09d4f1e0f376be37427eeaafc6178172b4dbfce202a61db63fb4efbc264223d1d22c66309faea7212447fb4a36897c7a46134dbf2bf843a782eab96a37a2b56bbdb2044eb3bc0dd5e2dc2adce6cfde9f166275707d74ac40d378773a2bda0604578b98487a3dfffbc92d206121a4fa8ec7dc3733f534720ed897ef11aaf7731e4eb842ffd4f9830e8cfa21f51a9efd2a8040b6c7d2e362addb6437a74069a528ae6ea8cd43ec4a62de2c0b6917cc343439eb40dcb0e8320277fda44ce1897578212aa4196313719f990fe29d426936c52cbd8f5f5c8dd009dd7c530e237ef78216e8162d0dba75a2dd26866064ccec7911b308d00c283318192e46e449657af1d8bd0f08439b3671ac46badc731e4f086f3cb0c6a56609765b8d9da8ad00cc3e68bf1af5636ad6941f9190ac8ff1cc96a0000eb8abd5ad63949602b9e6bf490c8e84c971488b1b2fb6a07beb639cb1e648e5fa7d889f50e5ae0a6c440b871e1e60aaba596a2f055a00c775f779bc89878e9eb29f4a9a91c191a426154fe25780a06d1458845bd0c16b44e6434051119821abee230340ef5fb93fd9d9b9ac3064b40a69164499ae8da3d5b7982d6986b044ce6d29478d75b67b59432c2ee6fddbf9b82418df34026920dcd4c73beba72e26d16c887ac3369815e6e810f8a275424a107ec4f6bca411013c7f38681bb380da6e5857b80d1d41a88dc678a2d63a95aa5aeed55d5b2f3c6cf32549c53d0762ec993c1316b7eac9739055d7c360c18d4474c2c06a98f273bd95d5909324293f07369f30312f51443e34d11e30912dddbe516512bb77a84a08d8f3bb2a452e44456cab60d50fd069006a8cebc92efbed3c4200c0c2736686d6459a283f389ccaaefc0202d7b36b0aaea4457aa3ea19c0ab98201008f7b0cc277f34ad8735cf0ff9d0793b8421787b7bc76339a9fedb63edd002db08e8ee1f2a3636ab569a97f9565e8c4b35765b87934619d6ee749b4743b108c5241aa82642b4250b7386941cda118a34ac5e08ea80182842519adcd29b0407651ee8c3a2f2911880fa32dc915ea9472ed1716293a94d0e672ed8309edab30da3654e0928f19e3effed85766a890d358647be4646f40e19d2c56ea98aafd5a77faaa4155a68988fc37c1a8ba733344a18509d389cdcfaeda4a69083e9393cabb7cfd2925f3e3e3d1a177830f951ab0f1a8ba2e7ab4ce2b030df515f0919cf29a935c42362f8428cd82d68c6f73d97b3ebec2347cd3f66e6a41f814804f89bb1f81e282c3a6497f1f0244ced22383aab588528c827ca7d74fe7ced6591142e68006456e08680afaef6af8d1fabf8a5aa29580c04289ece3767258c2aca7b5470ab2af859712fece38d68bc59187e854f2b703bff3f1723ad00ed17ba74a7252126a8f062fe985f19239f8fab986a1f6bd0c957ac2da66c5c60a0209a0c16a4007a3aa300b714a1d1c6b8f12c2cabbaf3d5706f5aa1148711bf0e8055e0dc174146d372e1bfb6a229ce9ab0903ad05d0b9baa1103a5613c689718f18a46b81aa3653b726ed511711dde9f51dfa1073012e75442ae1144df4f43572d282b791af54ed1f7b58285f270d20a2f9d134ec749ac9f30e1abd5e5382580d82c974d7797fcd2b408743aa3d6003b0e5f05e681a2c9eea183a55523440307e112635e606c06134e8dc769baaa48b32eb73b4bdc914b6613b47d80826b477ebe5fceb340d772295b93f08b46fb94f354e2298a66823d56c5a72df8323343e7b96b41be075cbae8b6f2b8a402c26d1f618a32f0dc9b654d5bcadeac519a3ac4de23b7e89a9cf92e7e5a1e7eb625e19baf0eef1ff7c9cb5b8d137930bc4de9b773d5f2de2f3b92d819457c7ca1accc79fc2d0396826061549b3ffdb5bf2ddc93cb497e1b0370527e62b0e3d9ccea4014a362d74d2571d8a6d094123ff7b04459b796c1b1548654c02792b88f3ae50c0c2df5c226584e640a6a0df1b4b9bd1c4aca937b600267d62b5b255264850cc966bbe1e6213fafd47a9767c476de260b9115ae57de82b921e72a1a914529a5b1944831dd6ddc8bcd83db00354af0e0ef61b32e73875b3e7cfdf1db0bd4db7b609b842720d7274e2f772a17ce44a358bf7c02d2799d3aac462028734bb9c83ed44614d1d3d61f9b36c25ac6d121d85511442bebc458a9f781992bf4b52e35dbcae93b06b6d2cd506d284a5fc8f2e23d9aa553020c8764f2418cf9362a55ef9e8297c9d2c60f67557ae286ca2cead33dc1aada41852ca2b61650e9535ea29c4e25c4e1422901b12842bea25d730c683d0204feb9828cab7d5b6891c96096170f6fc9664502135456b7492f1b2154d10190014d5fd3ff8087b441b5c061b7370d40c495e35feb317971b8663f8373cc85de120d820414050ba7ceeab26735a8042c50a3abee020163675c1713922b5bf824fa5c4cc762ef0477a49b7ab2ccaa2f82ae68de12082153687bd5e3995851e1e95b297c1aa059e39e60c6531e0aa9f12bac35796ac90b56189a72119c1327ad4595695c30875e346fb5206193f6324d1b097d408e069519aeb4266f2026a036b437f3ad3937b3cf1d6617759288b0bcaf31d1f57303e5238ab079b9187a440f4687b70c3ba505df30308f9e9f4412d027180766458092d03e8c48ef9a8f4ea5623f7074ef2baaa6e4b829d2cf3931e9345decfb6a07e7cbccb7c769d16b5c18d15d97b0946c4bdc41e6491c698348683e3ab6c84b5aeebf51067b35aa67acd3deccf6b2688a1dbf1bed15c9418e559ed4057248fcf1099bb11f333808f9bbf88aec97a32474ac17c7573ac9dc1b8dde113e2c68239d5627d55abb511c63b4e4002e41d0246b2b56a74dbedb2c4870d68f1a83ab05a3762b8b0361261ede88e256f0d03f733848bbb3646ff443a4da7422813898b1db10fa73ff3a972a37f921660dea2e228cd4a645491bd90c9ad9b8d669fb804c6c1e3a94bd258a8b571624165dfc44bd87a6622d8e5eca56bcaea71dda525f0cf007beb78b1c698583d154c8f495eda7c7095ee0b751bc91b2792ae8d59712770377e04790ce791747425d1bc4b8e60b9104339f50575a2ae5118511c0e6011aa050bc5d1731ab849f9ab0d2616388a90999fa4b1f14b7d47263ece9fd25911d9e31140a9abb23b3db4baa83628699e83d79996ee29fc5b40773835d475c298e6050b82aaf5cdc045b38d44c48e29a24d84ed4b96d9f55a4692a71410e89d3854474879a2bfc20d2344fa47a1dd47295f24698a4f7db465950d096562ae73f82a1f34b285063e121ca27c1e63c83f41f3ae7b2aaf1211a98083d117dd42e2c5ba7b34f6de809fa3e79abb35452f8252cd83f2889fb3330505bf22939d40360a0c5ac8cf0f6602a8df576b55ed3c4ffe60ee54b5cb8969e3cf8b9f7a8ceedc91bc632fccf19fb339b42c974bbddd6782e24f5ffa72fb9d29bde0b5dc3bf56af8896782df8ee99877b6d7ea0e9fbafa8181db35b3dc42d6be2fdc681983698ff10c4ff46d895e29ef000397e823b4f0a005f9fa242a49bbd0aae74532de36b1256ed519dd58c046d09c8d37b4c78120677427ef594734b454657ad410e038bf67ea4b5726e2785c28f4ce5c258b5b289f9ab9eb58df0a905588ec868fe49faa0b41817c04dca81f9278237c218802136f842f0c416540c104def7a619b508331d6024a75ca279a7322a18dd2964d866295be75601ee8710590e9763b469d87fd0991631bfdb9c191c3dc5939306bda24208647fa384dd284ccb3634c8f8ab66dededbf3fdc5de98b701e5bcea1f97128666d866c78a2cada14a1ee0cb528306bba57a840102f0d9766e79c1253a80aa1e59716cb26152c68fd5b1846a537bb2063438adfe2aeb66b6648b80f77a4d14f1b68e33a451de1bf67d97a47144b90552380480a2c5b4c682b460d286c4fea80b6b00b0e12706d9bb8a3513b511e9be71f4634b0d21a3ecc90df9f1767aa495481a71ef8fae908fce2ecc43a1456ab8461bff14214143707b3de2e79dc28b5f355063a5ce68fe034ddaedb3c7eefe6e2a328f8571599145324e0222e66220e62cd174485a7aedd3db5e931acd7c24e21cd168b3ffc64ce9289f75726c37ffffbba00f896bd3a644f45f3ca09aa9492855987461cb3dfaf7fc97c3cb92f11c807e2e985f96972ac1563aa9220de9e0c6fed1bbc3dd27abdf9c6c6b34889244afbb93cba11e12c3890cda75e0fc3aa52bd4131900973fa0d8d96fbc7b8b5e21dce918857c8c9f9db35265431ce1b04f694c68da7a224af94d5c35e6a5c1349570eb8532a902da7fbb99f34d9e9ac980ce765aa1cd52917f8d5dd8a4f3ada09badfe433b13ef6788703abd68481ef5a6351a853e56ecdcb0d7f7d2741dd50e3f4ab88293a0e8b5598d0fdd63ac45fbdfcd75591f163aac212287582fd470843cec4ec8ea8b0f97259698ea9ede799151d56d2c129440871899d09f25749cb3340189d0d655fbb6b659377e0486a5e00c8f2636bdca1201d624ab11b009e3a89133ccbd99d7142c73e266c3e89724d6c2a0743cad396c948c42f1eef48086277f130a7d72019f76bc40237aab0f4b7e6854f6b5d0d4aba3c6f97ec1e099de84a0ef803bc4e2534e9586242efe3a4a6d24fd93096c9e7a844963a1d1404d6ee6b09a20883fc79a991db9b493e97e410e2492709de656273b3bcbb3bf7601e1a7e363fe288540e0b3bffe52c0b39ba58293c810d932bba503ecbfa3e3c98943b7c65f9b0891909fcb5ce292b23f41cb26b66d1d4fbc9b6aa8753f631d48a72cac221257d6a08bf403aea5b09df43e06f39a5139a43082531c873dcf77038d744416f636ce3aa2442eac1844d2ffe10b545d2b77b4b0d906c58b4c4cdd24e5fceea5ee26d143032eef562d3e16eb778117fcf24c0f3e5b42362af5938be381d4ce05ce86b8dd20e861ff8578e8864d2f471484f5cebf37329d49f0295ff3b02cd17e2e7f21bfe89d4588d52f47fd7975eee5a8f41735bc3242a039867879ffb818f03365c4d9fa9e8357db553df3a8688b7642e1f0f7d673e90a2859944c59a66d904110ad7c19d4fa19a27385c00baede75a6973ad20931d90f611208324c13e213f16f97c8693aa34b68d4a49f6be4ef7f11d7764ce4554f9f5abe24309e3bf1e2928c6548b573a1eb4483cc44d2b92771d74cf3ab906a5d3d90a395893de3bce8e8a33c93d4e0c2a654630ef28d121c77d6de5d04082f767e02ab0bbefb7899cab93d5b3ac8117009a971c62ca238fc08d0bd6f337eaa495eae4afc5b36814f94da3883dcb6630449d73e951060be8d796fe838e297f7469cce5352cb6a04effcffe2e324e326f187118698bdaa11a034fc5387655804e7b4fc50133e67318bf190786019af2494e02cffce085652fa99e18ccf3b927c2fbf2fdd4850b463e3b0aa0c439f89544165f371e89adfe927112bffaa7d01f4625d221fc07525833ec401e22d06f5a33227392ba1e7307c329808a00d467e15c95970469f11a212c6cd8a36e30c0232e692409d3cdc85860469e46305f7566eccc7b92d44c1f6b4456be6202b24089abfdf2272f75b0f4268cdb18c03a51a22481cd829c9189eb420a4549f1a855c085d0525eb011858e4a372b242ef8e5f39387e7dc529b0572233cbfd1528a08cf60d9759ce7f20cbe778a60e2b2a367606592d1f54e4195615c94bfd4429faddec68e0077f825b6a23f25d081171401c47d59f9ba1aea34320fa93b798f0085265279a73ab7d3394da5952b9ab1bddbde9cf35aef5579ef22d6030a493ffcb84dc7a93694edf862fa703a982767f702c361ace7e466b76ad3cc5cd8610848875551e9261b12dc99d02d63e826e68f39456a713ab5e8bb7ffdd3bcef8f358a56fc6a5ca4ceabc81fa52871c93769d31e633058d96e96d1acad8378ec83ef1743941c7d814e21c753458cbe4172b63f86c27fb0de6a41098bf919a4adc9aa2ce3f21ab7fef26339fe9347daee1dcc3b7d47a9790299023e487aab10d8e02cb4a8c53ef529d9934eb8967e6d77a33bf6efac2130cc62d1e532bfc1b41d5f2005be5ffe8e1036c664bd73c104b40d57d172ead4b6520deaf2bdb6ff5dba5d006a559abc422070ad4737f4f970580237634b68ebb903debeaabcb3397c67c4754671a99c3f3579145b81b506a4baa761a781597ed110b97e78eb3a9987cd93403ab4fed0c4fee7146e058d50524284d15453d5d5f1e6152968b7730c45c32f690b011b5fc409a1b817617af0981794e9171c93e293f3005db3dd72dbfd74f770395927bd9828a10bba26a88980034db73335cda3b302d8e3c5e75407c67c7c43df80a809ef51ee07db7711ad568bb32e5b374a375020f03ba5461d041baac20f31483837d05a4edbd5c5ffaaa26321b86dd2f6692797f7cad43aa1b18d38301680a0c36174a1a980f41d8871325ffbbb0541df7e8c61550ddf35d81c3d1684992a3473a5005df8c50db0ac9ef4fd0cae1939ed1d75708930bb098b518ab64cb8befe6faac889d19764abdb651cfa8938215b9e02e768120f35259c9cbdaea5b4f3656e662d1e202902b38629f02ab8bf472266ad73320b9f6f2a5eac226cc7f1f5ec0118f939f1b3acf9b4278452cb42b5ba8b0ab4350237233b40cb0074513c8e930cf1f1d7bc22a7b052f86d8617d2f35d54124cd6eacd5889f119540342feed2d09f62ea1013826968b6fdf46ba1c92a80eff6f0f1818a20370d02f62b662915a6adb9850389b150b22ce97b1e22768ab4d7db3268f930fe3ccfb1d34b3129963f300be678cad901ae21fa7cb99f9f1b91a92325fc18f91bd944137e4fc62214de3f8377e16e5150ba2f56ab1205be90102428a5386e8887355f3aa4a7e2864d288c056b336cb87e629c2c4f19cdec063e15d987f4b97b4864941aba20cfb64a6f03e379c713efde57cdbf6cc7caad6ce11966087621e682e22c6d5b8df369287d55b3900522368e7db70889b0b18b8cc4f7a87b560259e69cf969c509890f2be1879030bd1c2c925cbdf2f24cd1bc0c35c552da3b24bbe02bac1329881abc1cd7135ce200b4c01667b6017281d78496274bd9b4042c7069a34b27f4971d6779d9f610c65b4211ce832f7d4947def6715daf1b899cd8911b59303224eababd61788964b60f962705a638d26c83e73b26a90fd35395b95c4f9eb6def690a6f22e2a865a03e04e65164a58dbf8ecbf4ed962b1b045c73451b020fc7823dc4268d1ed861270cc7e8d0ce22058ecaccd96b13a127116a570af520008a1c66855f547ecbf6da1d6ac4ba327ff305961d41cc02b82340dcc6d2f198dfeb4ed998e1cc2914177adbe73f9d995fb96aeb0e03f2e86fac84ded4355642c83c45cf9af28557a1b274127f17dec6f42627e9fba8f44e621918de94aa76d6252312d636e2f585c765ace029efe0f8e3668189a74d8b6f10a8dbab4d9a62d81eddb111af568371cc3b3f3bb4b44aacbb4ddb0a3e9b6e77545726eb98cf5778681cf9c242edfec9eed37e66c58c6b885c6679af578e3069345727d8ac9b71bca6419990fe4e8af8896b3252a86dd3e474ed1bdf0668f73b54c85db13886edfa36cd14f2f57bce937b368d09375a24577d3760167a6cc1fb19bcfd18820213ce0e0bd89a6f2183d51839bb9dd1b7deae3e01c8e5d84ac6a7c7bceeb67d8bbbc67a425eb72cae6291e7f2d563d2b4e0d318887a71e67f93329f073ef82172c2dd24b1ab7100608633a3fa0573ad2d32974cd133c95d841a9fad651bf08b75e0c0699fad9a2767400365b77f1f1b39a420f07e8089710dfbaa14ea0f5ef08a112f841f80ac87f4b4be079a661e862b2630ef25e3868440f3c27019ade6ca46b5f8f36c131eff630d967ba84014916619a1ed5cadae250f9ede4202de4933799216c12fca17f77aed5186427731bd5589f75f49814d429c7db4dd4033c07ac977de8ce5c837f16e2869dacbda14a00aa2a96ab89c43792f3e255471ab030d98d7f2ad5aec5e765b9e35d6f1c1c109f52d2fd2864c61c89da2724f414519458125c1bca07ab929ea6a5cafbc4dfcc87bb1c659651a5a2909aa0a3970243fb58a461bdd82c8543978d9bcff132008968433c1508baa8affc05d0134f061966e0206f4d2c06eaaa595f8af87d445ac37128904338afe48bb8fc7716c1d5476b12d43d9206852a251a68b53a81baa28b56115352f25c1e7a7fcab9f9f72ddacece40f669a8b4c0ebdec05782ace22cc076125d07e581049f7b153b8dab3281105b283cdad25dbd6b27dddf02c92fe0577166e218ea6880638cc75a8c5882a139fc164bbc9ad212a0d5ba8d71246b9a9f62d678c858cdb525cb206ae7cf5603fd687c5aad19344ef181ca07f9ca6ba13ef91a132311823228d1861b477c0bf703e8d811248a210f45aa1a00944c56c7e407bd15609121881d2ff559ee19be6569e23c4d333849767fff36e8521ec7b8a275d89fa19d5bbedab98ad7d5601890a433dd101a10f3018321340c39afb85762932251c30f0f20baadb7c0d545f19d58aea3609921f60c12cff1cbc98b8044e4d34412a8e949e384de32996ea3cc2832fafa79e96fedfbf70b2c08130867f11380bb47f794c9f305eec75a5486168757c929f9c4e964ffe3c6b12cfe99dd53f5dcb35a84ee4110b2c2ef69b76943a8f18ce3fad2dd4e454a92bffe1d419f1887433105a49ef91040bb6a0397ca16d23d05347c58e9716135bd393cd990b8014b05e7703ac091d900a5db7f78869a5f481a1c52ef688d18ad8829134fe89cf46c4740e9199667995cae0867799b3ddce1644f0297439c59082a2fce0d091787f2e6cc3f275d732a31d4d9df213e65c2aca3557a9b6a56457a378c8b13b35203ccc583d263037519f566d053fdc159a52058b2e40863f2a9111d6092c0c465534b52a0a5b6cd9c4ac855230b0f26b7c78846bee87c00881f8b54cb1726dc08beec0bf82ea0f4a4bf398be3024b40299ca3da5706f6132e4f3a760e951f03c809ca3c0f87178770f122dbb57eb13ea0b53552bfdd76ff22c61720403cb9c7c45687f0daeb0422737f4a1bda05a498efddee2c47642d1e28a41d56e8a9e7a027c81f2caa2166bf217f5c8c20d4319b353b350a92aaa9c1b425815608d6906652175d1e89fa88ad934f0d4856bbda8645a174286a01f6457eedd415481a95af58cdada3bb01652a585d4e0d2e6be4b18a30006399aa641e212be93651f9db35b2bc7f4999fa8343cb45c21df2e6c43351d950f3cb16c034550ef895b498581630765fab9a8a458024721b1b74df6f4a55f051c1fc01ce19e394618623fee461a5dbf48789573308421452593b639b0d1f0200cc13e7461c31cb68bb1451e2de11cf5648b1e1440aa96990add281d1830ceaac377b64fa720a184af1b383fe1f77bb0e9fc4689111d508031524a6f50cde660bd36e92746c2af5b4e74f5864d4ad061056522d2e1055b23b96f0befdce189caa1d2afc1d5755cfa9d95f18a628592665af6744cc931d75e41fabb4a8643dbb15a9cd069bf49ebb7ae64b2c6354708c9962d93729ac9f8dd3696dd7281fcf4f832dd3021bd219631318d33a91b171744e26b30e2ca1df459483f4e8f299e39691a091a771ea4b35e66ef06cf5222bf790fa0f8a533926c094065d789bc1622c78a60e3b7b4bbeeb9ad577e671cbbe0474cac8c1e3b37cf511ad616942895754249c24099453cc805b0d5bec06696e082e1cc4f91b4341235899d5550bb9ea6d19a16ea0ade3a465a1d86162362270cc89f15a3144f8e1892e95bc2153800ace6f54ae8889c3c6c0ba231b3edb1e65d1839e89cc16f25a65d5f63ff28a789e9e623755ebef1e10c6d4658d4501b388b60293f687549fa4bd864a5f6ca42276de88e144b51e5569a95848e81ebda69e26b50390898a4c169562a8331687d9cc318b209ed58c16011930674411074d3866640cbabe8b2ed979da4c0f18e776ff054620acb1cf70b890ab483834b92b730be04a7a5a51697b4c0711722b35dfc6ca5d0a75021516da1281f9584fe0561dd0ea61b4ce2f124f6e8d7eb72fc51222289ac095fc0d8831b66a6456728d66fac2e39f29acc3b27aa214744a2eba89a0c49346a24577b740f34a0d296e4c5b177ff38eb742ef8ed76f47bb0877e1069a37e14ca73ae65f259b3609462914605251e19020f4072fbd81ff3d1329bc3c81d7345a02c9cf8e9689e0089b8c3a74b0e94c98f92c0c3c25ba8c5d423ae482dcbb0e6363c75843a4b8e6090130f2fb1abb075da3ce327c91b6f68d021726c83ce78561129612b57fd2aa4d4c278e66084a55a23e06d070b5d65283eb133dedbf1c71e186bfe5b08b944405a501a4d5958a7eeb27d11906f0db2f25575d725a0ec06b39bbf85d8ae8538b6cf563072134ea9200288409562970ac36ba1d3454f5f980d731f8d087d9b6e67e6884519b436217cbdc5972cb8af133c3846d98647d14fdfc4e6ed456d9256cd7143e07887b166dd538b20ad4806d751eeffd7da7e277f31e0e0223474aad525f2efadc9ab8909104872edcc575406543e111f96a9e8cd3202e59ca031373e787639a0d03c511d2a123e77c24e1c7ae1440205a472a524582200874a17ec3d5428f3a8ee4f04ef23072bc6e635a89b8d2715b69f482d9e53d608f5ec1b86a05600ea740d240ceb085850ed2992383d83f4432491c82c92aa465418d19cbb4ad7a13e67c299a072129f470b4118925ed5e1cd915862b9c22a25147bab781243608fc804507e42321b70f906db8b4d2df42b7e84ad796905478360a32d5685abf177fb3b75fedd93ae97f8f3bd3fd7907ece0821d68ba187427bb61e8ab54e2f9992e7b91f0852566341c27bf01b73052ef5b7d71fa14ca703907b6f353bd073fc9aaa73f652a6aa570187479a96e8c9b1ceeb99b08ea55a90697bb5ab1cd2270c2e9645a4846cc97f26cd9ca6544d9ef4349c4b8890b36b091ceb3f4c7f99428b0390f5e2d376501a1ebf234deeb5c1d60ccb79d531e45e76c5be11a91bc1ee85fd2cca54f1572a9353343422b34774213bf22f916502943e4042e119d2c2ef0dcb92f34ceb239c1ab79231964744b7798e7426f754af0d319a3f4e7e184466ec101275754065343c97e0b0a4cf76b0cf4513f7ffb9b2565468e0c94211025c0109ea7411825fe9e002c88d72c0231173fe6723e279df1de976214418d3c2fe14f9db51fa29830d670906ab40c30d642b226ee9f60fa069961b14059a050200def4c0dcdb3577ac48bfb00092f9264b0ba1ca71946a901beb7a7a7bcc0545453bc4940896c7ec34c020287096fa42f5d7e39431d908bfa8955f55e12d3a72b64fecb68ebd6197c7b63a751f228bdbb72a2648bfc4722545354d9c0460d77210340167941474d82b5932264217055b6b73b881ff7b1c11446644090f264a6b4a96c243db161d283cce45b9e600cc7ba8c05db4353d9c7b64396bf2e071ecc84b0786538f985dfa85fa78623027559ba6a3372c9c1868c28d6aa17fe2c1d0ffb39acfd566cbd202421c9b154cc55a2ac963fb53897f9d333903b7249476e838a500bf6d6aec65ca4c15fa3f9be688d609daac251a62c7a1316425e2c6bbb483cc83d246e1613361f4e5881d8d1110ae054a4a4a5792c40f82d5cb20a9e04e130a8d88ab1970a903f1ee0ecbaea469aa66e81e584aba9b67be7a40db1dbc42ccdd2314d93f7f2583ff3fed118a610edd23d3640c983e771a0fba5366c50ad051a26cf2132e4e4960808cdc5efe1a21fef16c266fb00bbf24e272c7cbef8c42349f3972dc85833e2a746111669bd8c8e5b5170b358f7daf8710bf13a81b3d2bf1ed5d0e3a053e9760497292577a19535ba174c861ee88b7392b59cfb6baf29198c0122b1f02977d655642f689009b74934b0b9830d292edbfd0f99a341f3bb52c0927933911beed2e64b5a5796f721ef99008883119e30bb52d608560417bd88fefc6cb586f2440a7ba6880f494f7c2ee1a841a08f4849ce66d27c114861ba430e4d47e8cc230f6fb34037a0b8d2e015d439f9e64de2901f9345d2f7a6b810590779b449eba70fa8fa888949a552289fe67c1bfdc6ca2fd0fba9b07b0d25a322f97bcaec729237c01f79d5657b8ec8568d34b8cce917d1217bf87af10896f1861451953c7949db2c4880a84c84afab0f61968ea42bfaffae30d09f4e1c387965a21370cd74d3f3dba26ee9e76037dfbf12dcfe92715eff808603e2479d963a0cf2c0ae1d3b8a4de1d289c23800a46748cad90b87db5d25ae5e45c9f41922c6e4dd025d8e59790e46575b31501891f1ba9d52a8bb0c5ff62e910040344104860f2ccc7023505c91c2f7b6e0c02f649b6e49abe99f768125d9c008528dd06256d7463c2139340c13cc33331b1f974bed10b06bd4231ca335b14108c96b02557d4952b7967699892e30ee4f06de485dda2f84e9e31e6d5d0bb124cb69bf3c5888f1400e45984d8c19966f611169f1d1916ab02e20c11e497eeb3b3e82abbc6f7145a345b1bf53c07a9da070a9e6e18286497557866e5956ba6263a6b15d9e6ac0669efcee4c52be8514e889e4dddf32c71e0f469f3ac0e8998ae0f4aa9c9014ae0cb2226e9c4b678897af5a0946674917541c042bded71004dcf034e965d3e03c2e7cd1dbb2cf988173d1ad814b806d31f5c1a40ff00c28786796ed2e2d0356331a884bba3f01b83347684b7eb483113426c0976c84d179839f47163b9a4b003d10d64e44f4eb15c34dde2e69f9553fa39d1d655e25d33082cb9983fa91c6dee441bc8182de776925cb6257fbc560c79f1fe8f6ea5f75e0e54e6c19a0173c61d4d564ae11ec229fc420cf1270bd5823be97f272c31ee9f65506b1a03eb655ba4e83254c648c3d53e00db9f963732c09104d5886e89bd3fc5f01f797730aac405adeffbd14a1fc14ba6040480e30f433409582d3b047476a47fe76c1fc20b92efd06a150187c711c9c6bf29b0687f78ad662b3156b74cfe648ca279de65bd9870bf5f35771a354499b3cb51385639e3d4c44d4a69231703cffa9bb1e19d889cc0718ecd52be503a6d5d5c948d23f6280909d56eef9a59178d2aa36abd093660c7964e5463911e8bb363975f54421d911b7a7276ca0f6238ae955712eb65ff003f6556f129d6146967ac25a9768761bd31526ed8f2cf435219458fc54cb2e3376ab14e33b719e70f7faa7518de98bd294da529be1775fc7ee8e57d0834353a30fd052ae36f91c3beaa7a03a4c42a883841ddea93c85de5bd14b72af5a089468e1da8aecde70fc97df0835462d1d9251c60122995437d613459722601d6cfd2609edacf6086f98ff7513ba940f213ac2e3f3c09041b9ccfc182bab075845b68e25c7c4f9ebab2eaadaa3988e1793b5af9379847e5a7f6526efaea48ecb609459a770ac3418c60ebb463ef06c510110efefe92a291d927b05499899f70b01a9efdb2eaa2b0d08cbde47af0b3c863a2ce7a4b5c7fedbd1017ad30ca2c42bd09770d5228a145d298198711f68801f7d8cd7ec8afcac83c6d223b8513cea754ec1df19497d2227a1d9184c4989a114a2394663ab4c78b24bd97cbde9cf912b3ccb168e07eb0dfcbc83805f8b3fccf627b459cd4878908167ef9b288b1cba52cf60300bc4a234ab776b978ef5d7163851f5ee7baf9f697ee5e03189ea3107df87c2b8c821c4b3264dbdf3b29a0d836f165b6e5767aa628109a30dc04409cb1e798cd4c7aec8b82059e8d903b5d062536525fa6156d23bcead481f190a4931a0b6672afa80db1936539111e773cc0b7f0ef5253a009aab9e524748c3ec2b4f98fd012b7690312ef493a4b8529dc3e2566169333c9b535aa5d202ee07e20dfcd7a296c573cbb33e17ffb8e409786f58ee8aa24b113dda5e11eb1b3319de065204629730c6db0bb12b5372d9287ce8452062f7912a1e78584d6e890c97ec520d11b549cd711a0949a0a56b84e8de3e03b90dc5b56a35e0dad3d22408b27420bbf9085a0627b02cdf6f0092c9658f25f814b0d1e1f3f9355245c7a7f7847d23c449346e62faf024cc5a3ab0313f982abfab15a146124e6a67d6dbbb688506271cc19033a5dc6785b30d8554d0775e59ea49176ccd1929b22d2040a364b4ffc3b47b2078c780f64f54edcaa792213bba4b0078440f061351e367dfe3f3ce007ca93d7fc850259fc570c26d4a89519e16eb71924b6b9166da295ddf6dedde49652ca24533c075f078f075c24729f8b0841c4d3f9d16b5ebff2d16db040b35086a2bfd02b165241cfc718bd5d523c176bb55a2d1d26584805b20f39c9479d763a630b1f3d7a0b1186f8f1c9474f4518a4973e167d08f343d73f54999ea37031f906439c8f493e547dfc2325e062c5c92f627461c18de036c58dde793bcc36b1063d5723cfde83ab9b9543f31e5c470399985ea78cd46b774f01081af4c46771236a99d28e43e5e8bc07d7a11d58bb8adae6c8bb8b1ef5eacde0c24fe71d1bbfebdaa3e1e6a94f6f5aeaa518837a875a6117a87328167681baa52dbcee74b0a3411d55c10eb4af43dfd07a5ec1715c6cae6568bd9d3929ad55d36cdb518f82ecbc9e7df368cca73f1bc7759de791fab20d4754d8edccb7df458931364a8ad7dddd6c43f5dd2a5d44954237f5ca4a5ce98f60e8009552c9b4c26262616129a9a4b0b0a05c1616968f85e4b1b0b0c02045bfe18ee0b8187bb24b297b66dcd1e8ecd9b7282e9dd5cecef5ec5aa93474adedec0f0cccec247d9ebd8be276e792146640d67d3ca7eb461b75fa16c5fdbc1d9721dc087e1e0d98bf285f8a8c91d218636c1bf2153995141594abf2913c15151595ca2da4e831c0acc038ee7399621bb20d7199c2658aedd571caa0efa1b631093444b93381ec71e3d09266c521298786603616ca2124bdd2bedd4a964592e4e94bd0c678b8118cd14ae7647b04ce3aa79eb3b6566badb5761bb2bdaeb5d65a6baf70fd7d4d5ae7969b10ab55bf0f044be349337db4c5ed5a756dc7e346d0a30f379b95c3824c50ea5ad3063b9034831b9d70d1de126e0e1ee4175fe8905801adc343fc42870432d179f476463e472846795ce7d26b8742e1d07f830eadef9c9b9d871a7987c2216ece71ddfacd47a830150375eb3ab43e07fb231f81d42f4b8d1ff0129cbd44a314d4c04b9f5ad4a128bef286ecf823c6178e7f19d57f43460f4816f5ca341e78c93a141575d84b80951d81508960cee608438441c60825095efa8c09e8cf806c1e272c257b9204df0df1db67d886d993898179716939b1ac984a2a2928f723791db78dac56e994cd366c9384f4191d639420b70ef146772f91524e29254b2925bb94523ee19861e88e31c68d1869d4d00322ade29f3ce79c737673ce39e33ee61cda3766ec40113a2762115440278c939469adb5564989b61df0b43c6d50659d13cca1299db3d64a6faf426a3d1a34afae7933506badc68308d7129756a7dd12397ab8d1b14c00ea3946de83eb6c5b650250ab43bf05696d8f86486901e80866a55ad5344a2b9550a8b55a5a6bad5cd8eef2122aa5d1867cd1dab4883ac46e1d67e3e608e7547fe37e63027205901bdb728b5a6d9cbe32adb5d227ccd36ce245d4d3a384a88709252ea2180d5cae9e2994c043cf6f523481ce968c2e3d82b30695c87660285f52c74fd9f3f306eda3e421e29a9e5b55a5bd37496522f9fa396770e7a41e969868e52b587b5a251dc6c76d241fca20db5df96a214f08215f752e999146a74d298db463bcd4da189d523a279db376482082f7ea9912beabf994de0cd56775eab63abf56a96b5aad14ccc009ecbd9152526b6fdd541f9efeb400d6a92169a4b7fcdce8d3e24c22a804bed27ba21af7f03aa6be39679d86083c1d596da46916ec00fd0a668082d209b5d6da94d25825d35aaba4295075c1234a212f2484ec8043e081d6711b65cea1be669b507195ea26bc21c770ba941a92a502a69c4e28f7f2c88eec68e496d62fbef8a2fe04a9125a9b3ea92e212f24847c32273cf15cf84983fa695bf8c4399acbf0719ae181e1cc87a7e78e39de4656036df0e0ce39277351e794f13d4c9854ce392538a30f1428e06959c004e8081c75c8860f581baa9ff65e3fb14e9bea074eaba49fec4906136ef41087d63aa7ea6713d6d246425b333277553f38148a55d4efbd2c3c9df52be86c43514f552d4792e0846afac0559fa85a678ce0bd41d67ae7c8e79cd3d96b756d6a5d022657a7cb59bd19260872ed716fe914bb6747844a14c8d127385ba09e8008e6d08f9a4f5bde20bf488216f35e785af3b4e4a99e6ce40956799db5ee6ce4f3a14816938dcc28b884bc9010126484a0a048238631c61841ae241f8d001a23141bd1cf4f6ac44fcae327ddf1d3e90f38f415836d743acdccb804bdf4de549a046d3b78e974be70a0031cbc7e124da1d69f0af315ac43df16b556cf07cf09c29c38c6384b4f676863536f2e7294d9262cb5bc3ce29cc82520ccdff7e1a55f4115db48ef86507d3715b2d12dc1aada7d9d34d57ec0dae8f80a205700e9884a1cf588a231faa5f39e4ea37b9294baf548dda5051b4cd768774ba7d84d57f0b2dd075e1fdd8587a7809e606b317b2f06244bf309d6222df6afdbdd219e7da8d52205481d0db4dc15377aa80dbd64a20f35db5d2df6808411b4d774a2230241acbe78d49790fa42a20a095a2188a0a0972bf6f3fa8ee879854dbbdf88abb5d6ef66d3421910f0fde6374ef23a1bb60f8c669d617ea5ad9a9408b59c53a51234f4d75ae708a6fcfac5943fc126f2de52547d8cb17acf52d7f9a7cb39d4352ffdf47b02724ba77a6ad58d116ee7e18ded5a70e35370b2cd9caae9013621caeba506c6ccd846965e2f5f1a18f43eaec84da23ea33906f972b61069956c09025b52b6b8d1c316a097324152e038a097ed73c2ccc929184a238d30d2615ac5301fbd63937822c175b7db638c31461baa8fb1d452721338fe6247d953d2f91b609b1a285b2d46a3bf06b661afb951d96dc46d9da7b21dc9fb48f743b92976964a5d89db46a592d54aa552641752734577ce12d5346b471b67a4eb3c8f44fabed229254545a5542a994c2b2b2cde4e113b319d4ea7d249e5743a9d524edde9c46da791d54ea753a5a7294f1d5b90b2344bf4543f2db6728c46dbc671a58e44fabed2bd2514942f25e553515121a9789d8a8a8a0a9db25522b7cc4969ad5a6c3dbbf5c299d9c2303bc7b267c791f102d7759e47fa5e289dee45414949515129ada0dc8fe4ad742bdc365a59b1dacacaca4a64174a6bd53495b5a391ddb69b176aeca8a66ecdb38fa436e278c4d2146ed8d118a3f37161c16d90e3220782b6a023b6a018bddb5d5cb7bdce594219ed8c2ec7759e47227ddfbd37cfce31a3a0dc9d128dfe5ba2d1cf6c6377483edc23ddddd35e8e79443794f1ecdc4ef77594936d3c52c7ee9e51f6fc84705152be9d52d476b8274f6ed8755c77b72a7ad3e4edb828b90d869d56e95c911d2d29bce17df5a3537e1d2f690694ffbec190021f4103b438d9b58dde301d43cb2980e76494f1a52451b0b379761724bdaae99a6501f0ec2e40b861677b45f34c2357d941705cf7926797438d448664d2aa55cd8ed88e366ee33aaf632f3ae1efa6fb41223df1ecdfeb19751915f6cff38fe776b58abdc849e7e896932227443023486246604682e20f571c820e117bdac9b34b225ec24e9748e0857e81bef0b15bca39696c29e7a4b44e5aaba6593bd2ec68b46d1cd77531765d17bd482275f1f348df772f0a4aca083a4685934b6ce295958e595052544a343a7afc28002e388e33c1e4c7452f8075aca491722593b8321ff48ceae2466f49873a1c3b2ed7dce9e9e97074383a1c376ade0c7682337c5726709c0a65f4bca54043710777e41c4b8e3d495b2cc1b42c71cf1927e73d63dc8ed8503d927884cee8708bbd75246f7b3bd2b5c63abd7f82a1037e83b5cababc28d66914eb68dfcc8e1af5121dc5ab5fd44b0493a07e3dc5b90ac32569ac57148c1f49639db35da358e7a25887db6077fa36c5a5d7addbb4a40d8f6451b76ed32359d5e75bafd18273ba2d3867e41405c3d505b09ef2f3d62b8cd7025a8782307c78b8339fb7600b9cddb066c65e036a03a85fb0662669accf6814540d91a4b1be028e1bbec45470dc7e7d6872516fc7649ae28625a21251a988733ab75e7ac23923b76e6a49d616b4c1de96886ecffb38e176731e75e21624696ccdca0e6e4d9164bdc4248d75f9d66b8824abfd64d3f312b3e1793bc1f0f49b1497dbd98adcf09bc5d2aca44589890f4a3cde7ac927458bb79e32e39ccf4d2e94fcb260d5b7e4addb6f8797c587920e7e3dde10dbd8701bea611beb5c0803146f1d0620ce99cee2418979aeaf9c1246ccdb8adcd0e4e2cd698b6da47417237805c3a755765bc16801abac872e3fdefa379b60ebf30d45f20fb5390c9f8602460b36226fddfa480b0b058c18900b3cde7af8cd3e86166f3d045b31666cd39f872fb1989f6e81420a246b88bcb75ec3a457249f2fb15e91bc66564344c40d61c4c29a195dbd3827e6633ee50937ac994d3087c9d52ad38f5659d34fabac9b5cadb29b911b9a5ca69f5ef15b37b9242bea589f28b0d52aeb1105f2b40a6c7d106d61bc0001464c0b07d0b2456ef4d0e4a2e1e2621b9bc55b0f5baa78eb2d31cef9dcca216f3d3c19f1d64f2657373d1a323fc1d95be73a00bcf55100de86dbd05bdfacbd770b6a8971ce74eb2e2ece916e878426d7db09bae0c336d653486e583393ad9f1ebec468c4fc070ebd48288f66858aeb42110a3e7c89bd751ce28735b3b75e33e39c7e1b84b737a8d8c67a0c35cf817d3add5bc3a455d66774a90ab78608ac99b5ca5a4fd9e2b6873533193db6657cc0712a349174c4d8b4a3405c1a67bd624289a81773858a1bd2998e0f19d6e3a7d71f9845af602d98ce3887fb4997f8fc0ceb8ebac3939036f9e954c6ac93a4993eb90fe9155fc37cb125296e056b8032b46aa670a10267ad62d2aa099aa024e486744654414ad42ad5a474262951a9865e85a754932b5932cc1696b8219dfda4b35e3513a25e15599f4ebd68559e5ebd58e7aaa357b567885ef57079fd5183f4eaa5c1db2ac7a182b464eaa901c7a9b0babe9d9341023c188a2b4e6032c4bea2c749d61e15cf776d7d4b962da40d4298271c72ce10a239e8950db1b23c116425e03737cf2ac9ba91803d2da0752a68254debb0a4c00b5a24430d4963617614ab397a555bbd92b7552702300cd2431758d527eac436ed36441824d8c23124ab82f6e5db4316c25310c66f5970dc90699164516f1adea8aee7154c537811f42cf4c4c9932cbe8891224f70ae9737289d5daf6e744117d6751124e2d4d15edd98ce1e85b5aa3d0a1e2a7b4963924616b9eeb390153d6f64f62c6405ec43ce47c88a57a431d837e7d28e00400f3ce80f3b3efa8500c7d127f7f1084fd0c3c8110484cee9d98b13e038cb5d2057ba0c674729a5dbf6e1ac0c5f113c283e960082e4e3fa90810ecfc7e743073bba1e43241f5604c111c1e3f91832c1563a19d163844f777d2c0188118f18e7030a27b02f906d384ea0b95c231404515b00645d50025ac4907684cf3cc2a7d64008198311313ec2d3f7543a15012469985e8924bee0c70b824896ac2b69a20bb994bca63983422ec3627041ec9e9808ba25fa2c14640a1923a49042f628cf5244c185f6a1b7c42aad4882bc669470679e8582ec8039c2f52631565c991f50715b6ab0828ba2c4cc12aef62cb4c41132495c9667a1254c204385fb3d0b01f1e2c31b93e316a974801045212046bc9ccf42405c70038e53e14ceba7b3ab554c74c34ff6c97a15930c4916e7d3bfd9c7a4579b4fff883e272b54dc9216bcda80605569c639bc9de0e79c5ef2e19cf60f0b667d3e4c23e31cd34f1f2d60d6077bc1cb92b40a0837fc60b2efc817ebd517f337c42f3ed717e47b7d4f7c401f9156cdefa755d37dbe1ead9a0e6bd577a45552bacb11577eb10f466a72c30ff6c1c20ff633ce4fe65f92253ffd1bea95f7d32fcf10cd8aafd7b7a408243f3f9f9f1e7e4e8a7e461e433ebcad9f57c7cff09bfd8ca81a8e04f1e955f80df1d3d5abf083c17a3544afa67f3d7a15bb98c287df8f9f92f3c18e9fde33ce4ff6ddce6d41bf057d8340373e7597f053b215b92cc423e863baf533ad9112292db41625af397d1aa664436056aa25f6f0c518638cdf73dd459a840e81547a48655d4a3214298cca28ac57473eb64bedd94b828b30b5640a384e852d44df416cd39c9cb447ab9acafc7bcfd1a691e9a41a89fab4aafd7b6e76294fb8e1f683a46549cbac55aa6e99f58a252beab4c45a64ad6a4fd9e2520f5b6241e0d01f6e3f1186488bf8f619236a7bad5071b71fce51f9f62dc8f6922c4a8402b5aa5b86be5b865a969c5c7e4e1ddb4e7d24eba546f50a8e20eae850bfa15382eb5d4488207e82e106f4adfd16647b81db4fd1951f6e3fdf2d44d5081ca7c2175aaf794b8f46cc5b97d1471c8a9952768e4dad0740eb8b38b1e4190acf459ad0f15c05badbe6ed5ca06b391f6f27d5a3559afbb8ba144fab523a5aa53933335fa03b9f5332cee1d73c8505b35a644ca3852cf0a858d8e1b527af79986af21aca872919e7612a8bd7bc458955c26dc2490adef2c56ba08b0faff90acf45f11447f11487fd308de6f6ded80293ac948fa4d11c46c3c339d62f8f290a9fd4cf6b9e824a0dc9e8e3d3ab1e9a0f3da534153ddc8c841bec690cc95394a7295a2a954aa594940f0bfa62560948d244a73b7855823905923448b24c1e9dc224abe4d1b76dfb641f9df6700ec9a3531f66a53865cb39d7a38f4cade74c282b69aa97501ca9bb4057fb0b74ebaba0a24e2a08a53e37fa16947a9950a9a1526a56232593349a5f24dc30f523fb09d2da129ce4e386c5471ad9c7ad099a1f360f69783e8630b0f80843f6b17b61f1d1e5c56a91499a1f2a78888fa1cb8e8f3e13c439decfc03e7a3873e4634a06fba813ba007d7409fa18baf47c74f171717d34fdf0d1b720ced93ebaa9c5392a7f3d34f17c4c2949c998468b9e1a02a78ef05a8a2702a0f52d32c94ac952b05ecdf0a4a8e0d58c095232c92a01cdf0f4aa04eb1913f0aa4405ab341530d524c5d3abe89aa792f4aa73cd5343bdfa5cf31494cc4cd2682520c992299234b05ec964c1252a4a404f7d68b9a81619db68245066886d344f31e1cef0704ee9359fe9f11919e770aff94c925e5dd77c0520ce6179cd69863887e45a9892cdf0d0147d6a091da99f56691ed264f15a48337b8da666785c3ffd434ac753d7e8cf530f677a5eeb79ea28289fa74f02290fdb50d0067de125dfb54a2b4971fd8be860c287a9d96b2592e760bf73eaed5cf0b64af3155a37bcafb5bc5e73970f6c196a554b12b6d1bc43e190c3fd16256ca3790b113886d6226b4911a5643125db7e7af52dc1abcf095669be35c1edf061f77acd4371fe49d7fc9352460974376f44b7b8811c4a46948b73629cc3b9e69b11bcea5e9b8b733ad79c79d539c1aa50ee605d9c33721ca2d6bd7a45776055bbe69d1339be5deb1dbe3d9ce1094b556c4ef303cf6b30b078cdc3199ed77c848221631b1ce287a952152919db68de1c48c3c3365a4baabff891e3105b649c135de3c0f942c916dbb48f5a5ed32c28c3c3369abf0ce1862db2d752b2d7fc85d6d62822b6a08fe8abb34ce73de9b9579d6ba83be5ec38796a24fbbdea3b10888b6323eaca20592dbd7a7509f44a7ef5152aaeecabcbc039295fbd861a9255d334d5ab0a060637c4f98a23a36ab2e635bf912c49c43931415fb5d7aa6b2835f440072843ab6ac8b08f482ae138a57a8e2047e146970ae04c9799d239e3cb7074fc74fa8459340a2c98759271ce0a129e287eaab438650163969d93bd516a38e2a455fc118cb238a3715ad5ce3f033c6931c61863acf235d4aa99a45538ad92812aba11c7430ace68d5741c1c9c19bd622e164193481a167a5cea214e12d6ad645910072795831be2489605dba341653f5d80d0eaa2c04de754281374430c31eff312f6f286b663936e904f9f03329f83cc53d74469b775794079cdcdd5a54655bfadcb83d6f3b27ac7cc3aa8360d1920b691a721b661818920b400b1bb1824c5d846ca4897818257dfcfb7840c918f99718ee630409c635da24cff7e7ac7b7c410530772871d519e182d5e7af80d790903450b06880331a253144b9a9061fde4d5ab908b1e28567ff413416ad8f001c9baa886913c3c3c91e726f2449ec85374c3c8e392f527bc41b2418d588c818c2d9ed141f2c4e307d7a1f2e4c38cc562356ac462b11ab158cc0737e4d88cc9291661e81883c662b11a3562b1588d582c666fd0a1296ec841b369ba1a6344a72932d8662f202637a4402fa0d72b08689b6d33a057a90434d87ce8c18e930d2eb2b8d20511ce01ead51c7a421624f603f631f6c9042d87a898c69e02d0273b4fb2d30d1123479024b93142093ae4a35316247ad55e4575f52a642b5e010b8830f48aba91a7958b909138e915e7486740a444548dc50acbd2033b3a01915a9d615f036fb8c837c872bb31e252a21e1fbd7fd0a2204f10f9e861edf99e3c823e3af5619ba0d92f3f6432d9d08c488b1b52a219d14c362b2292c9886643488845b8d2ef9054329340b5a847b8d3258db5b7a9df3cabae0d7168ebaccc6e486334265933f68d63874c59a880d0789c047153ac1ba669e728eb0b0d332fdd4309beccf0b1e7a778558fb0a3085cbc74fefe01bfcc2aeea98567a1193cd1840b7ca86da215538421452750c77d91a99a97ce49273d17d981e3836995744fe978165e7a8d07342b0a0db55aad1d74248f1c2f7f5e7a2843debcfce955081344c2cc54498aabf28838d157cd41af7434c92e4dd9a372e99c3445cafbbad12765a62e2951ead5505d7cffd284e4712188d1d74b5761024b9f6eb439d0f3f46ab73927b779a3fbb29f76abeaf4dd93a634db4c3b4d3029a59456d2c852c7cce23c64ab7d0618457b24cda43c3db9958e3d66d3931b5d7ae7449552cacde77d9ee35de96090b8d12533af984a2a2928f723b18c1a003e2393134e526a524af95d71a3cb92901ba994527630b831bc304035e95b74a377114411dde8a82e2e0be1a49494522925a554a394d670a48b32da3a12cb664b1f8a4a0b85d1177dd1179df4896be43bb68862ab361dadb058961378d2c13dd55a9b5521d05a5c28a537b3e5ed48fa7a75a3a1d6a173b979b9d6c7f5938f1b29376f47725cb09c5c64a44bb725b93174f60d6ee462e28cad38135b92459f787df3bc98f03ae9805f292dd86252ca39a79472ce29a59c73064929e79c1be7b16cde4a67fa6e4a8b8c542a2527ebe2dcc09a5035afb5a69c228dd3f376e4643959e9edc896c82a1168159c1ae7d2d95e491a69cb07947b899e0f46453308c68e50968bb8a75bc48d4e25b533664f2d32d245e6a5db925c39397a84e5e0c69899196994e9e4acd3674e1fc9a247604d614da02168fa7cde8e8405c17882a322b6691aace49847c1718c524a1965e4a6d46777addddd0d863d513664ecd803c7a9906b8d935226b975c5235d97ad061960b7e53ba5a8989849cd4d3d121806f44856162de2991d5240ebd0a2260798b101d38bcb8bd522c9db991c61ba98ce3efb9c382c8891e96c4ba774a6c3113d0450af8ee2d12082975e5112bce0090797083764222b599a759e6f51de9d6ebc24549d6b4ecfdb99a425bcc0a064e9de18944cb725b933dded0d891b85ae94d239ef28cfb72723453d41757382dde456885e8c30c9a25ed0a256b5a398c808bbdecee4f92efa8e41e625b8fdb00dea49d126b9d64b1119b952d6e817b7ce186e3ca52e2905e23815d216eb4c20ce89138ae78e5470daea188c982486bc627e0cf98929724fcf42437a60a27057a6e03b2461c60897f42cf492f1f02a12b3e40564c72d3d0bbd56e03cbe701cd7b4854ceb7a4490795d2f0624048a280851a20842583073c5557916129263a6e75e27313aae6512d383eb292122063df02aee7c168ac10e66b0b8a367a11804c500488809180eda99f907ed21a9ebc737edf1edaa93378fa4e919a20e788a614202757dbbcb106e487bc0d01ec9da7c58e7e4fa7609f2ccfce6c2e2460f7568e9d18879eab49f83f8fe08caf8f9704ef50e493bf876128c73a87f4d787c7bc7a459282ee9f9708ef4f66ed6ab89021b2248079097e3e5c537ca4fe70425c67dae6fd93026403f149caf984771495623e92428bd04a599a0b41394f60265821dd36146ceb1cede0cc3284aa9798ce252af1e8d1819616c43dda341e29083fd669bea234982c836d5abc6c96e921679362f86991f01b10df5eb73a387b4c5399ad3264fa9cf174aec29fd5cb2c539f3837911e40488c9eb29fd7e9e52d0078a26df3f7c7b88127b1e14cc09d08f0447406cd36eea428bf9cf25595a92a6695ff2c3e4e50448f322e83bc7b7871fec7bf3f9f6d65c7ab86c43654882972e5b7a94d467349d2557e9488d74523a5d0821203eaee3871e80e088a184142af72ca4041315c879bad7235b15244a3c81f3d34972fa95ac1054e10317301f4c7ee8d08955b8801028bcc80112217e64a033a92044458f89f2e47e3e3acb3e3a179b1456e0676cc63847e64bf0f2a577de8efcd92b193f453bfd1df9a962bb69b55a3c380e0182208416495022880f48d0995ef404f1b3db887cf48d48aff8a34ef453d8adf06109064fb048c28aef9f187c7b3b60e1c3ee6751121820f9e9fd00091afcf4395df3b0f8d1858c9f5e63ecc041e21c6fa757ec839ffd23c80b907cab60c144f810e6e70a5c7e0a09f1fa29f48213a84871e3113bbce96728e3e77ca157dd4fc73940afdaa70c2ce24e8f31b2ceec15f86d8492ef2d88e463af7db12b1ad5b4229a94408547cb54441474a4944e31f8fcc892c581d2a34745470a2e1143293e7ce8089d6fa8a4ed3645350c15cd0c000000008315000020180e0ac542c1601aa891e03e14000b77883e70443e188783410ce4288a81200621420821c600c30c4086ca661d0000badcfe841fd157c7ec1f1bc9896871c79ccbf4f64a99a6e1b85ce87f2a4e7f603bbebd24ec13f2cac82a548a8ae0270b905c6f6d64efed24022a2aa063f2da8144cdb4cb5614bb280dfea13418bdd945509a2fb764b92ad0257696dc5cdcc1277a72e04e0b69b5c1a692c13967412c39b1c659aa6e6215c2ab820bc0f35641f0d1a81c8fabce74423ff4f091f3e94e6e0f62e454c0ce435b500f4db069c42c40a55ea051fabfa89bfc366488364225f5a854653b534c35b656daffe6647cb4964db922ea7d965e91c5e68938e297bb06f77cdc959bceaff4c74faa96a99a263d3a9381f6bce75d4e45719f8f5d77d3fc947e1cf8caee41d4a124274373e18e9afb8880665a5d8a94ea1f6cb4f7f979dcc8324e563219a3460455d948e1d78faf9acbbd22b7588393afea505150437b81aa3cade1c181a5393db4fbc8c9bc747e1c480dcc817b4571a302fb208b30e14f307fa18e7aa6a3bd4d0fc5c0ca8eaef4fc6866371c3dea72f54c2e2222ed9d30196b3c84122059d66f1fd92efde006408c8619f40366908b03b7f20e13eac0103a2aabd74e1870411256efcb3a1ca179fb247262d183c2173dded7da818536aa320c1ed777c07eec4450c7172a4c872683dd16de4e1a3abce6048fcfb68a25d6b20ae34d7afff50cb56452cb8bf476bbe959ec88a69828413adf5702261a412b7cc8fa4a4661f4ba8e300de9ea9b9349fe06470d228a85ccc09ec2e9b24b7978b55ffa56a1225fa9bda988114c983fe0c1e3b1ceed7c2d147184c15ba36148a96b4299c0263e7432ea05df07c376d1de827a8da2e0775ad8f057e53f03372ef8a0090dd179d44b866ea3a64c7ac32e32308affe6444392ec7d79454d435f8d5bb993a69d21ec23ff22de7396b684b4c8663a46c44e51f53f53973422e2cce849c6c2f615d37d8369dcbc994a1f899690c7861106444c83b4ea275364cd310a4d4d29506c7c13f44c5d304fb8c95cf58c18ae1235018c9cc047a7c7f204f89f99124e4d1fad087f88e4971ec5d2d8991231216b3b4ca3efa484cc0139efbee8e3b449a847a9644ffcca7a88bf61b98a50b9dd71bf26f6b0c627f6d24626ae875a343ee17b58691baf091fae8ede8501ffa8399632dcce6641ac74ff90783be2bd9ab40976971ef15e148de6bb3f9a0a8f79d2a4be4d8c7cd4cea7795910d150d21e8c7154e55b5b5a87b5160261f3c40abec513380bf57f27cf5d5ead24b16feb141ed1f44d90b39d842cf393bdead2239a11e222ce87697a1c26e2faf3d50f05f18192f71752aae25079443abb4f26037a9e5345a9bde1cddd5b7b8d9deb57088c32e34a5b607079859a0e6951221648664b4e66e05140b73088122a44f4174a60c4f6bf00ac563f20647f4e5d49746d0c24c25e853a22ce0447a6ec4baf0a84cb7892b41589587acb711afb1617cd5fa746871429370cf41b5e668287faa05175028542c2d4df9f00dda73b3afe919aba14ee07a91f9fdb7922076bf44145e372cf733990d8c209d6289d107ec16a75c0a8ed7767e1eab8a14e8135619d05ae92ab24322265b2407641495d8a79f122b6b5c8ae806689d62335fdf47df7632e638857628a94f618cdc6acaf9cd8ecf62dc588e1146e2e55c62cd283e7a56047bd92311f870f8d2fc08a624df47a04a7d9497ef954409dab0b5060ed51f5c47cbdce3af30f61056f9e7125f42f3bf7db87d9acce50d065153c9011961247ca15cd49c73b9f7f633b1d2fb308031d405e66a63f64c2070bfabf3800a831861fa77c2aebf0f7cca463edf815434b7d3b1111783b462ec4c686f7f0e68fc58d50918020e3e4fc758f14623230896c0dacb6217c488e499f87c5687234a657a657e9d76d0fecb8a1cb1656cc578aaa620c35ba88bdc48da19963224af22bc5a751ca56d43e320a5b66d144ee9833564a3494081d7454c5df940d95fa95ca530d79cc28e4cdc63aaa15a6b20280cbe0969c515d23d4da571b719682d98a74d622f74c10c153bc27f4e003770ad92990cad610f0e68828281c22fb26ce1ecd97f92de7dbc026ea0b2f2b97f93411d9c0b8a0fedc828e102b8680efe4a34536989177c6047ca4041cfe7037a17fef085fd2b9b8ed66b11a31c78871fea2e372cd74d8b3ce3eb732afa83fb2c35fd2d40c2dcc685525ff706fb715b3a0ca6ad9718a9b4582b31f633053f18887af51713415d9fc13e197f38b2d10c2ac965e7fcef18e7a2dd99911bbbf0789d4310b1feb79180d18e188d0c807fc39d6fd2019271d2ed07535e6600a3c6c0c64d64876d56a3eee2795d3f185b79c66b01507d92a7eca6a23b7ceb2338031236594a2e752f115abbe18373ae2bc7554f91ce445d9c36245fc04fac7049c34f39aea34d506ce41eeb124e2cf0b48e848b82de31c095df9ad3258a0d1ceffeb51a6bb186c65ad0611a44aa988fe3593e9c89b18060486ca55398b72ff8ac2fe9f59076cb416ec155fc3f0b82ff0c3b1dfb22306ab80c000965486113d2e3d7749e7dac4a9748c5b5f4275313ab1bc0cdd9210b86995b951cb71d7b9a7a09aa65d1c5a486f64d910b96985171701956ebd2ad375a23b846b30712496dca5910d2f49021eebc6338cbd35d67158ef0442e7614431bb6f89d1b4a6f5c59420d04478ac23e3ce9e0e714fbac95573fd8fd283cd6fa273b7a42b57ad5d129213d0e4248bb11cae8a72693db1445de77a568f7b94df3cdba4b9b2d9042156b05e63ad5f487406566a84f9d0841f50795f414787628e5a09d4d9a321e5363cfe589e323e7dcf92f16b8091a9b03fdc25ea02454b381d14076dd99dd55b761e504b29147d25c0b75e912ae52ba752d9a0b0ab0733b83228b2496284ce5719eda759dd649d3a313810494160cf698839b1b53d5ecba2df2f5c124c4a7e945fca41f948d094b2e935b4e7ea478ca1a0755d779771b129924d74e41f57ab6d894ec23af173fc00f15fac46284e5bec142d712b71431aae625464604a7694c5284d58a158bfa54bd4bc80f6ecebfa720bf45db05bb0569a042d12417069af0aae1be4d3bd89f31815e4839d315048514fb0233a6e3defd03bd2c8da69f7ce1f1f0f552e4dbdfe39ece1c1c983c5a985e250dbb1396a9c06208730a34bdf1b2f4bb22abaf2957cf0fe1932e9a5c1cc9f7bb688d3347ec22d353b535330bab075a017d49e659c3a1c98bbb381a49c1c60be28f56668c8df5065324d340b2815035294c3c88b437f8e28879a4621ef0ed320fe017616b645b7c9044466cc46ace41ca5c377243d5c233e4a38a4cc4c5e18e6494c7ef407542494f60297d5cdde8e56f7aaac3a3e4f08528b626cf83755023dc108a1a80b81f59c261c5466bd8d8f29d880334abd2e4cd3c81641d31e94cdef6bdb60ad4f7350ece719733acbb3500221c480b9453ad31ac4de37cfe8a1caaa469a037b185797ff83761d347ae677142d64d205cb30bc24bd6b7f935234b974b29ee20932b2b54b70e371b42c268f904638810e61303a6a251da1c486e69d8e1f9176b331c1f357047538412b37591d5a0ac549c51bb255dae93c00f17cf6aa6d39e81f4ebce47bff5cb987770491d0dd32922a3275d15109a50f6d7bd517e653284086db1d1960cdf199624e457f15c1a22446070589b443b69770692ea9d567cd7fec0d14c58df92e8a19977c65c1cc3512d0a6b2bb1180033a053293a1569f79af2c3f89ae40c86bb9b755e833e643314583b51ad5e1c8863f5d215ee777b6a36a2653e0de2700594b63a92598f68390acc66f286ac71b7d7bc7b861a7d56e9a08c8f1690519c18cd4cd43bfe8863fa0234cf90ec221042b7434a16f3c03df6ab4f4e2fd838b22841ddef4e339ae0820c158af913a7896c0db2dbc6e4a17b0b3ed504215c5b2be95a7f226f38d697963320dc72cf496f7ee2c5deefaf30e9d4c8adf1fb2624a3f728b0cc07e8f4360fcb5f72694bd78355c064082d3ce64976e2ca795706aeb55f71fcaaa734ff4622a949a4afe22fede974cf099705694d6d3185067ad41c86cec5664a3afc220bcabc337eec9bd257693dfa81cdff96dac90f88337f07d35b46a6c41b5ac5bf68fe5b12ce5ce63f21d49ff176f0ac1386f1eab350d99977bf9cd66f20239a1e30271419ce36368bbf2b3d003f14854cc397ae02deef75447f780f1af15bb1f483723782bff14d45249d835c3c85d0632f20fe15310163066573b3f4c1ce53add089882822db2b1c83a849472073da6e529c5ec62117a79e508bc72733c28fdc120be0a1c73b557264b0f38a3cc645a0c3f26176ec74cc9c7d45a9d0d63038a402f57e8570c8821c0d7af53974f7db468b8d615b47d44b31cac3b7da922509f43f421b37903a7211c0ab330e958c9e6c8f8118d3dbb1c3dba81540480066bd618600aaa8c36261021eaa9ffe67ce145607f496cdaf3a261cea8af1e677b67af92009d949811956d485e60593ac01612fb86273d008193dd59c313eae005a74fe2608c8f99d0e782717a2722afdbec3b5b952048593af96b0d112a287c0538612fe6379edc24ed8925ac40cf083bedec90c4bee06dfb3189a1b980b1b57bc25937d2a8ccc50af65c46712a772996f51692d135f0c24c6224e9d10936b90ecdfd607b10a7fd40857f106e384f10e202139521b3235400ea63ce46c49c19957aed244ea348763bfd74f68d0216b0ae5c6ff8a50e37f88bcb525f520acb5d416d1b73375ef12153ae794ee329ff97813f25144343f834e26893c8fe40722226d7b43a2b613b2e9a5f494c12948f0b57260a17e9f89cc036136b45e33ef2ae6c4328487c1f0d7a9db2139320df9eff64aa0dd7ef2904906aabbb6a0094852eb1cf770e095ea1df8af2151514743c9cec0f0954cf0745a44c0a171a480f31e0d3a2f40fe3b83dcfeabe9f4ed80bf41d02608faf39a02f82184b6626a0cddc52c969f7c7f6a0f83c8cf2e55603d0fcc00a2f45c2be7de6027888ac180f16a36399c92659a8cb9aec6063290c98a5a865922dd3a1845fd54ea007ed5bbebe5199eb71ddf3bb3db5bfdb1473762ce12965d2e0c85e42a65ae9d9152184ab116261c86d7c42d3820645a51a154c04637ba15b1b3f1e534e3a59902d1a48984e23028af5463cb2ca091ba4189515b0e7412be59e0b10e9b75d814b6c889df91506d7b95184e251c949b9c9d4f2588afc452d2056ee01aa04ab4a8026bbce4848b94905fe3b3175742e6665fca79e7e8afd6e9575a32a02c563e388fe1b3262b8d5879110e8db18f414a7ac71f2ec61f898fb54d506ca6385cf3f8932e23124d53db30698f5d0091954814c6d5626871f3564bcc18e0df8f9a4752f959e03dee7703e4e36a71019c81dd02e3f9bf4e54e9413b46b5ffed94d6e39c6e51f6fd471230d1c830c9db1c3391303cc2599d2835f88abc74b2109ec8c7a09ba7837f69d4859c5235b1c24315e26bf5e12cd9775edb7c12ba882cd173724bcdfc8d1ede711f65da6ca35ec7b2d2232833cf9f925eb432429cfe5e642e01c9c6c57e9b534c40841dec084c4a661461d27926d07b7aad059621ffad7b3ee5755fa4322b3f844ae52014136609c8be4df82845c06a2b94c968444ed1917929a64bafed560d89f0551aa4e2f4ac6d56780456ec3680b6855269d89897eac1fc36066c92feac37abb178ec814f1e9356c16dcfe8bafab5e949e8f43d7ebc466ea05423a08f1eb9ffe5b569877949ff63cc85756ef5c5ecaad5f9829c212da2f83da0218deeba88330fa93e41cfbab3affec18a165e86a86b9166de4c00fa1872ee2d092a3106e8443b165c883b3c3896f91311dfd74b0796a6e143a1415ee96462b2a1224efeb5f9c3145591f6e5980de45a380cb7a9bec1181efb2dcced8ab1044c2965ff44388c0465c7b61f0b1fc38611464c9bc77131820bd0bc3d28108b3058aec9e44f4c9acb1d4c561f3cbd39615162c90e709dfa0e50a14b01e144210cf107ad0e35843b6e841e161f5713d2a0408faf6ee95013356bf3a597a5c0e9def986e9e9e1a6fd33bf2bbca7488f19cc3c1588a06a96291b409c6c2e113e92b3ce5894de5b9be45595353692d35ab3827b5a19ebe179b1144a736b4a89ca0087914ad317313646d21734ae100bf5af4aac863f2ac117cd606cf7180b28be51e225a6e6c2fd6873ab450ae99f2f7c9232ba0a60d9ed9bab5225ec0b6787666738bdd0b18df706b5f77067f3a42a251af6d1b0f75a413f4adf9d7873d974e089191ed0fe55e6353e806f66f3bb75e36e60eebabe6ca8ceadcefda50939fd2435211972f9d3fb74de5f8fea7179805d8dbdfd524c96ea5e98ca600a3de853763d0747494834a540be7f8d7eff766933ee63017e62b87eaa6f8fafd8ed84769a29b9ebc7454f46cfd08802c6e562124132d8ad1c943c599aad853d939a72a63bd42d2993217d63dad9fe4fdce1d1050bc9056b88d2372932172db05fc3b621b666547361c182fcf6b5b68747e74b2f3bef83fede2ba2bf61322f34e20aeb4a54549b223c93b21d83b11ab1663c6e096ce2c42d3a8d2840aaab93a56cb328de5fb4de5f2f514c0552eb78de3f13324ceeee0b2b56e050d54d404eab5865fdb6f119e21830b563adacbeaa86b035f4905d0df7da1f250007e44104c061c33837af0d93f0f33d29c452305dc78de05bf8c13b23b368e9220e66749626f9da71eb49f97790f8d248f833e66287a4b0652f2ed7af062edd8bc5db61eb8d08ad6fccbd908ba467b7fb02040d29479548970fe2dbc923204490042961e52810d8c0e88e1f81c935ddac642ae8ff5a79cdb4f1a814fa04cd55bdd2fda8308cdecb7f718ed667a362ea099dc9a2477c83b315f0b54c06368a71103a3ebc8f0071a3a56471acdc7b9aa7bdd3078a2196d06b068bced412379758d13d7c64cc3fa52c6618eaf69ab9f42e820f5bdd16b7fd992be9a28eac95c58ece805f5b74b4a5dd3e9bc480f17dda7ca8a98af124faebff6a45a3892d58d25cb5b4720c1ead3f665b2bba278c46b755cfdda7e7004154bac3c947cb21eada4bb60c4ca169b7f254feca72de9178f58d922f3a1e409effde9e4a823d8d46966d2f974bab4e15177aa79d0085cc604ed368da3c286f05c94817bc251442a0e567d21155fa6f3df1200a4d0f63b1a2610943b3bac6052ee33e0095b6e99532f1591f1e90aeed945723f6be3aa620e96a9c00a3b30dfa21a296456af9e23a85bdcde64c9f78c7dbbaab45e314d6177d8f5bc0c6a8aade8f34de8611484a7ad8454f1209809da7d374378c33aa97d17d809000ef302b81a7a9be2b3c75f27d6f59f078d37f6da62708ec3dabb4025ff9f97bc77544f77c188cc2d1ede8750032b308e96ab3eff947177aa640f8cc7f713bf31324ee86624d22d35378be4b3b3bf97f01c149598c82caa748092b5723e95ca9b83b1246cbe192021703ec0b8487227e6d73a41872bb68cbc3f410b31a355bea48a242c561c99ce9055064d55685eb6a127da2e0aee3b2bbf1760526af12e11c9f8caae58b59f82e56e0c8aec891ac637a01196027474d2006bf10323e750bd0588f656061ce8669168c96682c6f93c66a3a9f3f4213c7cf443ad1c81ddb27161d437ee81494dbada3f18c1659378abbe030d8c71bed929a15371be0dd4e6db5468be0514375756b7f223d95a1ed1c19c0537e66c9f2e2b5bc580aadeec13da66c996871258a85aa073f6db3cdd5105a20e2f962a538ac5d1af2a15574ccb255ece4e020fd6f1721e301bee447cd8591f67a8ce672fec5771f471e85e9c2dd240153fce30985481019bcbd97cb7bf5ecb07c901c2b7c8afa483155a351d78e9d5e5ac54e13bad52e6ff1d5dab930fe1a0a06172c06abc47d096fb3fb954abfe6bf31e26eb01298c96f82554899a6a769ec5445b9d7d92ee1de2be6517e3e213aa00462ce80f34f34d3c69152ce108b7103ca7cc98b04acfe861455594dc6444c2ac5a1b72428872feeae9f61e6b56e3ba141d47750f6bb520766e023f546dfe7a71ae0bb8b1c26156cbfaeb1bb30acf5b397cc224e481019a0e2e4b95248b1184caba49b58dd3a4f60fc7ff29c6589b5192df11e5ff95903649700141428e0b844989cb786627155906a280fd419c5dca9e4d1dcea4cc6fce2ebf2d79c28b29fddbd352c239f153cd9a01762c729e55ad2c578d2ec450106199fe760c5e7c720a71101da8fb33a8429b40c6de8934afce37d75e06c6b4f9c575d455dda8569475e782fb68a5b94d44a9ff10bf17ad1d014ce275f711646401ce9c40090f0b1718a83317afa619302a214836afa0f30e36fb18e2e0cd48cb2b7e008adaae922db3dbbf62f9ce9e9720bd8fa5bd3423a430a15afb446ff3c0cc9d7203d0373b64cf29479a54885504601a9a27c57eba25b41f26f53221257b394ab0ed5fc44bb49104c4b1e91f1dede351353f1e0295da6e43d4c1db2e388ac48637d100baa04990a4f00de11ebb583f335b2935e0c7378ed18fe628b12abdb7b7fefc3445f02399fab7034921d7a77a44a43c3218fc1e50ec7f080ffa0792b9a3f85eff496d5e38737e2611fd5dc37d3aa2db17a843555dd72a52248652606a3519e80c47cf76984b047920fd3daa70cdf43ff72e6427d13f195c1f643109d50969f23a00ebc4d5ce9719a2669c652e0a1d9c01b3a01b66fab14f0f64022dd0cea9bbd2072074b97c04f806c6e82ae357615d9a7bea20db5751e70a86cef95558c005f05cef1888347836dcbf4fa4960d25390ddecdeca3f88e75b6d6704e85af0902bc96586dd3ba704f5a4370f0a02bc3bf3777f39ec88598adc094b80700e0e2f4aca4eaad5475beb2d38fc5a34b13298b61b98200b8f6d9e43717dc354f9eb39d0fba9cf5c75b629e4eb539741273e3c088d48bde2c25e54ed2d4585ba15a2079da0468e17d29684d3872bd4152785f4e00b818aa566438a6b4ce111dc76c905d7b315dd6a58b211625b52d44bda9f4064d08e8b7e21ee82e7e226a998a0ed8e8629b63d2fd0abc14e2207430676824f1d7817e2e613954c695e470e79573a2fd81add09797bebc0fbb58ddda56d672437b3202dc237dabd458d67bd8797512a9d82bd3cd4cace84d013285ea01606840db8ff5593142db00d16ae6964201bdf90633a2e004b9a3d758ccfa015a9628379936322994b2731039284a19304d3a2efd2f2d1d14fbc4933a9e2753624ea0a9e7872d7fccf2665ba0ab223fbe9f486f512150cd64a05711b98482cb664e204150a925d2eaca6df8e1bb79714f787d5536bd24fb957479a2e942ae69164c606389d49d71d08f5eabfb21910fa5d1c8c39d2a6f989614d5c35fb6458eccce13a2c287c4d1dc5216cf7a8f8af1a4e9ef5a24495f7c349e142a2cad7cc9fce12fb17a5df12e98ebfba3a1874e909687591afc463e2069484aeb83f1a943d011f36272e37efd649ffeda1b7d7b0ef11121975b79c58cb7ad17de8b7a4896ada068d729eb785313d7650fb9ab21321ab3490e65c6051449b89c99919880464370f42698ad9a5ebca25e96e4a6ee932b449f8b14dfde4d59210849163686e7460a6ec0280c8c83f0d5ed2996b4139727dd90871e00eb01e701a55b18b2022b2e5bbc63307664a8af9c92dfe5e16b030a7e1368de1c61d4ddb5a70dff4e15b3125aea7d3ad12beba8a98572854abb75ebbb383339b4cb4f7b86a4df63ebc42322c0fdb47ddd83fe53d936a82172c449a3233df4bf5c27e2eba5d3036416ac6ef124ac0f2bc0ad0568439c3ee00ffbc6609d7f8576e87bda663ce89bef481f032ef44f1c20da456344129c2506dab63e26176a059f3c23b5f7b839f494dce1eb535ceda95335c361fad64f92ffa389ae20393efa3068e6c7ebe9db670228cf8b5d629f7e82fcf641e74512a11fcf106120ea4899c9d95128e18745ff5ffc6c6cc17581237742ef81d14b409466a49ecf99a0d41fc73a00c3aa5dd1c49c880e34dc03bfe6663a03211481de09ee773c62025b256b7c1877b87b122bf9395f220dcb67dbffa47554e9c22447b9479bad64bcde7c092d08bb1e6b8480dc415d509a460888300e3937406ef196461fadfef4d4546e14328db2b8e3d7c9e20fdfaeaff5fdd065f94e224062dadeac9461c9c93e8b1a8c9b2c8d3860bb4813210d01b0015e87494828cec6efafa35ecb8647f898248133b7d9ae4b421e06fb04658c74b134b4c1f0ab3c5e9c6cb322c0dfb4c80710cdd2ae1f235916da10172aefdaccead5a93a0aa393dc43c12b4ac620191de149c719e62cd0c6e98a6f3ab7514cea8e3fd1256af7d5ba7c77985913afb4bf4e3adab15611f489343ff02891ef795b7f5939f880abe4b60595731d096f12e6dfc0b20bef5121ee75528a0597bd58e4b2fc35051f631e9df9373744bf212f733f762af95b25a44404532f722cde623cf0cb2578b66fd2d843e6ccfbdca30373b1c7551547c55d0a76a911520b79bd134dce985632a72760e991599be0ce31acc9eb3c9ce11cf1c93e14ee8068f9118acd32be12a136f7ebfc46f0d880aa587f0077398034c32344369b470204e4ee926ca3a4d98a06516086fdae7bb599921e98e4f81d077df10eb015d33d7e1bacfda76d07074284e8d3078eae6e65650b65bc361978881a78c6a3888d01ad318067f2e1c1ecae143303412061750772347353cd800708c1034bb9c33c92c373b28606d0d6fcdac3c264a2b7d700c431f917e9d37c91dabe6060c0c5dc18e6fc1805818bad4b91c817f30eb0bcf7f595a414f3e26656b63d7e7a679ad9a885682a509a8f6b71f346757c2bb3b25335cf19cebea6f937654efcfd76060454914094f40d37fc105211a5106e5cb9ba0d3a031964d55db4c77645f5e2c6402831e4c051958c4d8ebb8ea57db5c565ca7740c02facd9c615de54863d5d38da19177a1f39d0be68187d64bedb398d473898c6c55438a04b848757e8e450ca0402f2315098e89024bbd97968ea26349636a59ca1a4f803449f25530e8d651c110623b92ee5b73877fa3dbf336cac9d6eafa415221a8bb54dcc0aa583121a21c208de32eaf47ba6611be5487a180dbd023c25f830becc456d6ccea638135ba558b3899192b22b3056e23711344d66c502b8a040ce484754f609fce187de09863c2fd7ed6b7ef1fc57730a6913351b28977f2507d6b54e67a711eefcc7d9dd8894654df6bd2488eb8417ca1822f233577a9b3a1d6217d42bbce54b793dca4b27fe903032c1936b9000e6db720be5ad4605f7c2bb8dd280dd77ff1db31dd10a46d61e4a33186e374e59d6555fafcab24484374d70a975774faea595b79d6914f4b583b6c7ce46c71efedde77c4b7778f64f89dd75c58041977b052f7cd2d30ba7e4c0a3a3024356a3cd6e09ac1976306800d10d06b31e58bb25373eb4c387138917e008a504c6c767e90b04b9f3998c042d82971910b1054c13f025fb28e92b57fcf646c9aa3b7e693ec898a2af00a6b40f248a29952bb1eaf8886dacf92d5edb0d465f03e5db01e4a5485268b3bbc0a3fc55f0053d5447c082db07333b65fc1971eb378ea3dfb06a7f61ce419e6f0cdac53fe4199b82f5a6daa33c1d4154faf1941482aaa77948fe86710113ae3aa5a0024e07e1656ce8a939c236c3f75b06691ef30be28d280e8b6cdcedaac4764217e2e4fbf7ce134b9cbc66b885056d0b805f5a47cea5f95b19331e1407ace8095f2eac8b69c9c49f5ab239c1326289011824487ca3e60016ba41c60a6b14ec80ef87a335410860b7f50d2ba396ba814b68f7bfead4baebdcef571c5f8a57ad4cac94712efa9ba73e7242730660c0f5b45f51fdb710efefcbeefa3c39d056b607fd3a76e71da461f21c4eeda03fb2624005c746e465241119188b3022db779ca5e6de643b36702808644f84dcbe79555f4fd6b431a2f9017354b5ae1b9aef465e58f03ead16d434526afcf7f6f97697e913c8460a94ff2323128e917a8c373f0de7c80a470a69d7358ea3d3353325bdf73c90b83126404ba61f7895aa0caf4e560848ab19a1f404361d744b428ed0768d544b297ea86085cf91cf18417d50a799d137e0ba758d7a5d8cb229dc343b7a941b8e3c0ba84eb48296b5accba2686a345a3f3f0a274afc8d6586bd8eed15e952413f404e4f4304ec398c924917ba2d62a817a7fd1449201c11c37ca2c2178d43989d4574706bad61e1f91feb055d8fcf442c9845e26c533d63eefc244f64951aaeb55a61cfc7bcf604655724069e631962ca3270c05b89a7dd8a3d1526bf3c1dc133956a0e89f7f0a48a3db281ad6a091385028172e998ca144cd6ab2cf6db96e525aa98f8e8bdf4cf1b61ce6e7bc6d2c45fc23ad3a96d4860a05a44537082c5a00a5107bc3dae369f3396a81b9b550d2f7e3236d7db6217f0b8e9ed32cd84e4eb7f0d3c97b0081f83e543d29e4a22dc82571808801502c0b9ead517491e2f8b17d6e38e3d9546127ee8b4702608597a7709bd3a38f35fb9ce300da90b42442f84b81db6a8e8d0410fcb288d3b7458ad387181601d8aa945632a9f4c7dd256fed1bd8aa8f281980d46f3b88f780055030971d5e16fdf885f85a9ced23754f1c2de5746c42f0a21921aad1ec9163b6ca9e0702c13e82b0fcb34c8b3b5147b2c7bb0396a3c23fa144ca186adc81bd090ccd75741ed03a36b148d85ff223c52235ad963f7892aa610db046112a604bde42897522a3e77d294ba2deb15274287e6b4eb39d0ae742b1fea8375d0c16105e0c0ac29517645eb1929deaa821b5b32537bf07e6b0ffef84f0591bff8d769efd6a6282ee8c56c93d12bc6bc49053244e2b2541ba121fb7b1f3db75551931ec0ad907d8fb25d194a59b21abd9901e6313af7d464f00c900bbcbf8b8c2aaff067575d424cbf3b3d5ab149c8c70e7032546d80a390bba006cdd67d97efa0d37da2d35649e8e121aac9e73f4d5761ce0773625675ade3d87a7db4c59399fb84472b128beabc2f3b1bfacb5d0ec766391bdd97078934a826e8f8d881f5ad398d8fbc34e8e7db6f4adb4e0b9002d96c7d8b47b86945b3b7f63e67bf8a31b26e0379e10e97758a86ea9026f73529b3df6ab8012cf64ed11862069cfdd54883a2d9bb2058363b870a933f26e70317677c3c340813ffd833df0e6adb023a0bd5a5fdfc92b3f20b4239105c2b2ff5316d268183196236cd3b44d150f67e0da2dab0929e8ae87f1be7f10dc755407be681a109d1c2ec3f7f1ec3f108b9ec2a2dac2c81feacfa7865d9339b69ba3a6a1aa077204520d879c0881eb80d7044065c29b820b7442073a6edcd39afad9ea6bc58e1f28db57d893355a6d55be26d75363c0e77760a9570dcbfcb2d5a4b44bc3f55fc92b9a7547132bc9cf0f28edae1eb6ba09cf2be90b8931a6b2bfc7ae0bcb72cc995139d7c6ad6a4744fc419c0a5ab3f3ca1fdcd0de74332c18623c4e9cd5a2b932f9777c2bc5e1ae7deb2b1f8312496f218e5cd79b922e1537c4507c9e7cb524911f1ded80e45386e7dc871a499fc9b29cd90982db9b0c856b2618ab4d1dc1fe547b514f953de5faaec5520896eee9c97e81ca3978f2eb3b7aee140f223f12803fdb37f8b241dc33af19f1b4d9ca397bb9bf20a45a0ec87aeaea1087343a21333af13c325b0abe710e536ffefc553df50c38aa6aacd79b0f823c06354c71c129a70ecbec0fdac46cea283f323ce972bc35018437c8b6b0030f695fc483406e275eeb22a8e0d72a12d0a6de504fb58000358bb46083638119ef0d06a822d2e933fc64130291ad7be3b734618f290ef51a99b9b7805dabf352f477cd58f9113ef7ceef708839be20f46197aba096da0aa821491a80482b40bb5b2d18e55e65a21df35473a1f4c34c9ab2a8a44c2b5402a397329a64a92d659e7429c4f4188f1a7e84daca1187a37250f4e3ae9a1ba8b44c205723cff6f0a0a1e6c75407207d7d85b70fb66504aa01c5f0df9396a8c44f20901194cdcefa9385fe63b3858234e3e0352b7a9318950a0aa7378e07799809a4224a49168aaa01afac8efd067eef39b7708f487cdd768349854bbd309ab86ca2f016e655be12b7aaa600487759ea3ca32bb10202017a6cb6c2ef8e562ba2dc025799c5767c97e39f1ed1345230698d25b423e5b91b82c99e39322256ac8053583e50d32fe8865fbe7e67fdcd3004ea245e9ebbf1aa4e3c06ebb1dc144fce7f20dfca3d7ec4f3914af38286ecd50226d5712bedce4ad2c6c71fdd46bfab6f101216bba741c4bf336ae9f28f06751c12d722291e780f93120e4daa3950161a4c04b0d08dfd5dc4be2ebc77ad8e50212206e9ff82eb42aae9405c2bdf0ce0f08a2ee2472219ca808b60481d09dff723c31348b54e2eb442d0281687907d0941d8c4130ac85722abdc4b831e1ba8102e12bef57073a0254916f248130e0db82c5ecf991a8120d31bc4052dd2e180293bce19816abc4e7daca40d0ed093b7e0f4bbddbe383a5c21b8acc5d3b7d6201da14afe7901efaaa7df914062de317e7a2b74af497c836049a932a06778bbf2449ec4ab3af25f847f5439b1534b2831ee9a7be1f706e984d5bd38e94301a7ac084784d09afd1d8d0e8a1f2cae823a99e563aa0f78867276d3aa9711b7157a18c83f7637db8e66e70585c82b89d89078c02ae93422c91183633104851db10f6b7a85ce309eb3c332dd425bb412262c334a59277252187611c726c5a9c507d7efe94645be73dd0e4873e33cf674b97e2c48a92e20e06beff3db6c3c0838938fc252f7812ca318ccfee7c29225a745c766d561e39d789c3a7810cff65bbcb3f62a9bae515d016261bbcced6e92ea993f13eec5c9bbb0493e080634855045a45003d8da5c49f49e1e4d7c3a0987bbba457a16560dc93a592f3fff637f91992ad21d26b79ba29f7f280f410e037aea407d4f2096ef3a9300ebc44250dd190c6646a45af4be1ff17ef0542e421c51b80c50c7acc48b691e63f6414287719a1fc949c76e6ddfa09e047065b730c2322523719114c572d9bda2c2bbc658c826aa7c4cbf75625f35ed00cf0a02e762250e22cb5d44317664352746ac6ce6f18439e54b438d9b74e8886e51740191277c4d87082bb7abf03c3821062d40cba03e72dd22917da6a6c3ee9f4f06cff242352f734d2780505238c40e13040fc61e123fc1943f4a42215b78db411c7ed527711e30454586e5ceb82a6f66ad296581a8077dfebc24e7062b8bdcd5c2f7d397659b49c2c911005185c1782e478366441e8b37c4a905f19337fe15da83950f44e7518df5418921ad9675db4ab0718704b25c86b81060db8c67b323a8e80be7c5d36be21df11c4c2eecaea43db3d7cd9bc5f56d667b84144cc0e116cca13891249011415d0495e2804f0cc7353969713aa80edfc7cfa31a1cb10523aa8df8aed15f9a98a37ff4bcedb89a158d75cf7977381bf70887c58bb69682a91f8427f10c9d7fff664b18309a137af53ad19afd57703574ad464cba76852211a14070a73533a705b6ba595f3b31d97c23bbe4627dda0f7e89a6dc07815f2b82ed4ab624477855a0f9c26d1bce610c9183ee1968b57e50bf8f32f7409937cbbde45f56bf28c3067585ba86afec39c963f401b239b986d84ca87887471b9c313ab15d760f61d52f4f8854a9838581480eaaedf8cef6a0e401a548b9a8ab4e905989ff41b9df4a517e111838a706613cf421fb3e69c294878e5fc9d09d1891cb473efc7d0cd44df071e280095d818a910cb49fd5b087dbeae0916e0d162c5cdc001b38a7e7043231dd340aeaa71ea84b6f728a1edd243098705173742cfd66ffcf62ab1b1ae1234b3218489287cd59355b3a9dad941eb2803fa9de45d354baa1725363a21f59fae913ecedaf4bb76502f2b1d37454e07db751c3e98f78d95780609128bda3b3b5a0a3cb84858481d4a2a2b69e8904383de9d6fcf03d7e5bc8743741a4ac333efaa160684dd4134f2dbd0c70ffec3c4d218b4352f4f1773b72a3bcee509a75fd9a492b70f5c391870a75d094795ac7181be8d02caaf27920c74717c8b6fee8dfc441517993d4a6a392df447a93e120f8060c981c4720bd19623f018be5125192b4db44cbe0f37a2922c10a2060123d7ab4c2412e88e4f852e1de5ad2f368f345b0ff33dc883659fdc71a581c28b9caac03273ea8b6693d152f25991a9eee82daf4d68191be2305dc6f46e3f030d5c25a47cec6780a029c03395e28c24395883f6fe40ec8cf977c17d26fa953b46ae04d087baf794321ec7bfd9487fcd05488d99738629497cef03ac3a40974501fe6ec0131bdffdae34d66c619b0a8381bc6578e3266d1b29b46441835801f247c9710c03b5e9b3e426638657c0aaeec2c21566b35d03c634b5264bd7b3bb8e8cae3a9da6d07faeb16ae8d4c0ca7891d28078801088a97ff80222cb7ed52870f17873b1d9f0326ce23321f1d13ae98258e82fe4cc57503814eb7170a903570d8e09990776c1501f7baf87f7b3e6f013b720e9c93fe4130d7e4e1f8c6707252b0dc1217c1543bb6510381bf89dd9a3def0ecdbfcfccededadc3e40cca5dd111762b5fc8795cf5bda07936d7cb572887a3f65414321163ab0eeafc09a884f16c05cb6a2140466148c3b21f9ac32a050b67a0f2879ee331ea5ed4fbab08b9a0853655ca2e7ddf1d10b3109cbdeecfd9d9c6b87cae677934f74a574a37bcd597b8eeeb6f1b73225ddb180266bf5203a8638d3348c73e527073ea3c24532b8bb9e46dfaeabdc221464ee1cfcf42f3a6b0d84b24cb114e4f849d4ac07532a26d50d4ec54288481349aa82467d6988fac140f922103c500c40428915989741c0de1102d95845f722f0b1dd86e7ff155b3749f96dd51bde9940ba7f3a6776768edb7507a18052e106b94e9d64d5ebd847b33fea1641da235dcca2e1737c6f66e19c83be6251ffa08b3b05ea1fb43ccf350112c681165adfebe7235efdc5d9c1bd94cc2a737d8ffb30fdaaa97708278d165eb86dcc2f134d697e5457b849e649a7ecbf3ee227379c389bcec296709968f09b618d10e357d56d94678d54af69da23a7db29c808294c307aca3398a9b396186af622d2ef6d3e4d0d9068af2bbdc5ab49900aa2394e3ed145f1061590d113edcc9211ee123002ef73a60962ab118fdd572a2bd03c0636f128e181ba3180024427cea99d21e3c16ff42a8e0b13a7f0561ee0d0e056312bf3e381ca04d86e6e7354c59bbbe1392d44457ba0d36aa8e5365ab004063aad747c0d4161aa8589438aec51a2c6c4bd604a890a0b9cbd3ab158ecec3f23dc00bb912a4e88edcaadbe799b53e44cdffc6ef030945375afe24580928c9372c53482ce65fcb8c5b0979d6e6d7ecb92ae7e1ac7b1ba88849bd84230273dc0ff3f94dc7440e7bd22669a03fc49911e5938841e99b5b26f521244e8ba36287e539bafb2712c36de20770c5ca73cc6d047c64ff9699a082d2234993a2d9868e6c604fb821ccd0d0bf8fa2f29ee9aadcb3004da32bb291114144fdaac7f092d48d52ba8bf195987fef9a1fec34363498ddeb44d44b4faae5de8dc8c7945ae98f4759b686caf96bd5dcede266f8b4a4de80d0443cdc277f965b3d8e5c8d9b543dd08a406757fbcac0c8cfef09152a457e62d42d1b72c756cd92b991f2009ca08f2577da71766e82832d2b567acfb202b7f183e1f943ac255ba32704e6ab9c5a939e02b2cad76106ce4e0060f33f9f8fb92938227a3f2c2b8f18bc46532fd33bd668e02ba9922799c982a5cddaeadec23295b8a4fe05177b378a3ee22994121bba50cb60cc312facdc200c30f9b24b2617032e5ceb16fd8567678ff4c17d37da79e694782ceea0083552850b604d1cda4f4b23b1e8eecbf7d4897d250ff636483e15c5c8e7c125ef4f3fce9df1bcf24aa70aec2d62cd34171efa78fefc92bbc0599adb0842d4b376e338b6def01778de7807905d51ff98822ddbaeb81b689e2142ebdf5b6b3e01f90251cd628b9aa7d3b76dfa1a769dae6f1c8cd03973bbd5a4aa26350e61ac496eb9139223448f6a19eef78ccaf7bc34e57485a6a346220f8a4943d21baabc9878c1cceee0a60ee74dcb3e2b6e775327063ce28fcff6bbae25c9395c49370d18a6304dc8977eacaee9f52a0ee725d2d30f914544d2d5b12d66d5afb4c06c3197d41444a7404798c0798cc32f500e411537446d079495a3fb0956b3e514e68c0d627936d91ba2f768572e8841eb335c6acd4bf7f2832140d60c02c6140e49dfc412273872efa4e9db176f43cf4f022ba1b7a0a22133d6e63d96bbd41daf1781fa1a51a5365e5bad1d9ebf5e7b53935edbbad7fa7c5128be1288fdf55584a72904695100ba52278951384d7e9156f95c1a4c0d793f67377d02672a1a1f16159d1566043aec27d0ea2c56bed0e89e2fb6dadc8bac152002509e4d4075cfacccfc259469eeb24c751c0994e77d2896936598f265de60e15fbace0771743fa1b3d2aa8d199175e23505728fba29a24f5d6123fd12238971772ec9f0de90651790de8cd5297e389448cdfdb44d4203bf7801eea4b245d479365d77b76231c78bb651eef44634e3cee94c63a75ec938c79ea98278ef5d4714f1cef89639e3ace53c73d75c6034f4f6a8e5168690888f8e25c9303b88f62560ec11dd5b3380a6794ceea289c519cb5a3704771f6e3e085336ab385cbbdfd8b2f1ccc9f9cf7140762dc088cc1564aa41d9e345ee2abb70762bd08e2df1b7e58fc56f9af4c7b91561847f3c5da4f5fda0b71882d497a3fb8ae213049517a8d117c37cc5de7084f7a09464b224345a769d5696b27bd14004fe6ea256856eae3abb87aed580ebd6ae70c7248f866a9830c2d89cb9352744177c41df522499a0adce2b078708e78cd03c3030243f68c76b2ea1a4ca7ba6cc3304202c15230f9cd202fcbb3a03d9d4f19c47ff0b4edcb851efee3a1bb61c1f41af7cb8603ffcaf8ce0e70a0d43fcec2ce5f7a4f4668adb05f60d966a70622aebb9aa87efa5606a645a9ed63d299315fcf6493edda247fc9c0f6e078ae61533cb60f2bccae4f3a86dab32a7bec240a07deeb953534f6c8d96313a9d06034f6d8003f658f271985471a404c54f5ade0722ead480c9b7fe6002d2bc832765e49019851a2e185b7de688ea839a7a7baf3b52484cdf5971327c4f037687fc90bfffcf2df3fc0ae59feb7f7151ce61a50d33874d0a0dc80af297c193649acb68f1873b1361b3e6377ac36c0c5c5250c2571ae0a50f38533214c40e1fd69b60548eb4190223197a241656a9b06f7c46aa558a83ce1f392910f1996b6b54a1375037ab274c5872c4a463614ae9d81c0615253dab9b0ee39ed36a9032f63f3a8321b81fe7c6a0668ed8c8f4d18a3bcf60e176c7c9e82384a3f6a412d50c33cda7f1cc205cf14962a564319b6ff26313e5fcdfee533a4d54b3951f528ce2e8c3f1b358691abf48edbfb4d030525d03ed927e8d8b9881088b1d78eff842f763d3b8fe0c24ae62112769565e5fda22e04d03136049f2be54589a455b655e2c27cb05b407965857679e0e115ef10026aefae170e1c8bb2797af364431d0947b2b5f8ae24dcdf932fc53794884d61d5d65df21765c820cbb07af79c56d2e2934a00cc98df96820c2f5e7ed48c688ce84de9242b7ef7cecfdf1d194ed9f814bc3ab9620f1886fbd99d9175e98162a51f254e4a7410cb3782c94cb08d1a5d2a81a145b92a681457c253d3c0dd3686385f0cf3e09e60d5cd8c77dc3c2c6a671943d0829a3ddd36486b15b34c2fbee569b51fd0e50fee522a359e20be65b8be63c5308db479d987b4e9bdde69ad11cc03107919978496458064be4616329490298f395cbc2946743fbfc5a8178fba4b8daebcd0d1981427f738710ca9f54584842117a73d854e952fd62d78717c61de3b0ae30ad96d259918f279174449aa97ac92e96819937a5ba5301d095e45e08337d1302b85beb6a5411028decd89d75a82c2676c988f37fccf3294efb00788a85f10d020c556020deaa0e080da67f22b8ef408d2fa1ff078eae7356fd93ae986c7ce08d15e41803f397d3b7bd55e5309393257f109a4de658498b61a9b57b275fc4a26f9e14d28269d476a619f1b7e018ce8b3f61809c2d3b139b488b1b20847b310639bbd8888270ffdc2dcfa4b90eff966c87689d2ec1110e003ed5705165e446b4a2a4f14f64331b40b3655563a9061bbb295fb526da5772f0070382283381474c3e27653f1a9119c50e04c39934495dbc5ab581406e1515b08f110177ffd0c7260b9108c3b2b7149d06f06c6e0e676deb7af8872722e35912972865c22e54e9d3bae013516bed511411ab448876e6bc55d17e608235edc44e5acf435fba51c3d9652837e4dc9bfe562fa23e8feb0319cc9fae8a447c7bad34ac75689f051fff185fd196182e1bcc9c7dfec80492a8a1161753b0190b94b31efafcdd237983f4f5016a1f8a4d0359de7a9f86907e7b31b384ffbc80565069c26d942b2c3c3358d7b000d39cb1cc719e45e3073d7c91d3d10e9983f14cae0b092dac66b4fd7801f6a84901b2f34ccf80b311cf06b4da3b53273cdc915beb11dceae657f576d90f19e96e0aa2405ef3cdb61c7f69369ee1ce08c4e2829b65b974810fac7c36a10b529aa989563874fbb38b2f29c57d8cdb639b2f28e1b1e9b2c0df9e3d9a77bdb1adfed130ab425ed897d076c3fce2b550ee6ec42117564f763f74d4b209a26bd417b1f7391ce1e23ef72c8ccd2d0b5803b5a2047107fb1685aaaa4c96ccfe9757f358c24f12cd85930ff93edbf79cc13083b4bbdd98457824e24b8a3c1bba6167a1e8852b8cdfd34afa0466db4806d78055b1ee677e66842b7d051c59266b1413b02b6c6d80bfd9bdd6f9801ecbeb064d5690a4422d8c977b5252904e9a992f4a15b8ffdb0dd21a1e4e17cc2183055aa252870c45fb8a9a7a1f555c0a619cc891c3d7d15978011495fae8a8e195ec7e0046899cdb0667d9aefd529742aca5efb45ae67cfb0d143b4ea20db46002c65d57b86b793c63a4806008e4677b9585999f216f7e52e06f83250ed251b8ea333d6ee0cbf876d1da391aa56d0f8d5d7ff4a6d588ecbb99b7416e352a3f14bf3879076602c7c3ab5c8a0368d63f90a2a83fc4110884ec66008ccda69bf56cd68850573e90b2989096f500ad4ca6cbbd01f37690ddcb2eb05085019774487c261cc3e389f7dfeabc25ceb63093a8c473a6fde2f91021593a846c04f18856f0241b0c5c3219efb55636bee5792a28c0f046bf1aff0195884d3f7cbc80062f2e019cd9637822f9160d08033200cf0eabfa02d95a399d9cb6081d7f6894c19c85680a50a961792bb40366b9bf8117d92933202f6fe662d81ba0d7ebf8081b7d7d8af4af066ccb9f6512a4599822a45a85003e575b8f448b79504473c6d07e7304ab11d59a41050b2e5666256b22f14a5fdcc8c8654eec057284c5a8e061d1a62f42e059d3db499a2a278e09678a3a7cba9dcf84899667ba9b47069a770c529959c4194c60f556034648529295904f5fc54ed12b2c34c864c584ada7f9bea6d735205e6e01dacd1beaebc0d72d92329e32c604c7b5a03d3f19cbfb80f19b98ab497936980f18104e8722fdcbda552c9571ae10f39db206284524e59a643949ca97041865510ec7c27fb01f02c74839565d2e70104e06702ab905b981ae15ae399fdb013e9334f31d7c955f658998c81209c5449e0b094b2fe595844cae1e08dd7198e8221641559ab33ca46b3ff86b8e82d6007c702b91ab2ea4e08a19b223e1c1d834496f480b8b5e1ea5be691508b790729c46548682f0441c3b95b3049bb599465eaed31bce9ef94326f93c5b2c1689d5dff16769ac1e6fbae2c66170cf397172a8536d707900b6ae7677d6fddae6d780a77648a4f1a4eae6ff4caa59f5a7853171e9096133c7f9e3bbfe267a112ca8dc570d6b4aa1a5ca4d42bbd4bd58d8794df97200b67291f7c3220f2c7dd4a259315e92ff9a07406d67cdef466ca946538fcfd7729303c686c1cabf707fde6e8e8e17fc2597faf000a8813f14800cfb1c205401faaa4cbacf14cb2b9fcc86ab494d829895738c23bde9a9ea8e400faa01ccc27d295397c66160e09d9e143b3ea68cf6687ff3288a344ed1a6514f065b842b13bc8f055d6121ad4fdecf114c9311fd54f98c6b3169e8fd2a074c5082b6b6a17ac6fb73ad9c5b68775983f58978cfc8c848101ef18f84af619bb81064ceb9ed43a883d73b21c4c5126281ca20ac3b1c785d6767154891a42288770745c1d7cca3cb577d330239eecefe9b1ca02409046e8684f4baa697990f0df184d2430915483369409e2e282ef38de4b2c60c255e45b4fde08bd7b32a00d61d9eb3fe3eb05fa7aa19a1589dc55c3644ece95c0bd3ec035f7dd230add45c172ecb6d418a99505459ef5aa2a5dfececc0bdb8dbe69e37090dd4803e73cead12209fff6612df404fdf931f34e748268ba0e60b6e6615459c2bab8e649b351edf43cac6676e6cc43bdfde4a8f583327447978163160f2f2f714a455834c767c7d828e4cfdbbcf0707fc6b76aa40139f28506e0572dffe403cf4a1dfc123dd057e662e39d3146b236ee89d2be2bd23ca584d94515e56a9f15e8178d1a78d37eb9d65eadb4c80def84d9fcb4da9a399374c90469289eab034b7286b253832a7ceda9f6c284405c733491131fc9e40eb8cb40e92b7b04fa18b3afb2c19751b3f04fbf7720784438334feb3f6cbe5ee75fdf4266825b309048abb4485dc9dbb29c022d4dae41d572b35d44204e4d3ad01fbf4b8747ef199dc90fc7e62fb31d00807af85fe5e418c9df9f248ee45fb5d133534fab1895cca2f0b6e637fcea6644ada9342a938e8decf2ae55023d0c689c6954801c6f9c0b019f399631c7d812577ed14e48efc52504c14561b459e7f938c1794b79eeaf880a4c052b1488c566104c46d5cfa2f1472317ad07341b6b70fcfe4fbab3d308827284cf9a1391dc557bbf95098ad7e1462875903e7492a888c12881c1b84dcaf49a8f26ae8ba54e0f0cdd1ede6728a6dc042f7a1d71b3136cee1401c8a8d3c3b1d8df4280fd202f8d7aaac4d825cf0cb006b4101afe34eadb6eb79bb012227c5280acde45cf009e4d3624d9208f3f9e6cfe8cc6ef477860e96309cc4b869ac8d21b8ec2e6f9d73611b943bdaa2ae820b005be95015f6c8340cc8b6276b6e0629a49b1fb304f6787859d1e9d58217296b47e5d567490a46283c92132cfaae069e9dd8870d46c24381a8f6281b5e62f211ba935f1c077b5485c2d5784fb019ad405ec4a33adcc385b72dcd33052f75228bd27b82dd28975592904520624cbb5e633b0bec451a20f480025de4c5c6dbbf04422e3281b942fdb7e8821936d6ef3601cce035d17471d492d2f2ffc16742bf3d3c37c3def1be94190606a2bfcaa02ff8aaf7e678547923d48e397fae0067269ec03b4b1ba5a990a2f06b335287d33b29437dcdbd59bbf1953ce9572075d613057295dae1888f6ef845c188b0d5671e5887cc8b21ce08f10eb2dc26898cd44e246e1730bd04e5514174c768cb91167c7214253e6883c3621c112e23b160bc7c7a0a792da6e4bd68eac40384a9e306e5f3b4c69a425889526230a377f954675a6b000738d503a589c3e33a5e17e107fe4be90b45c29105b10be8c99a2a71163f50b70a0e8456b76b46313a17d73fe7b38c4d7523203a05d05fd645caa27d67a2598d4b4400d51ca725ea81ed8fedad71be75403233404d05d0a1cee023e23bf43f8cd11f8f6a0c5145e0ead1f97da37e106313556f3669a7bd450a87d7249711046fee3222bd135a3877ee8a8ee70450ae2a0411d887e8095d7baa3b8b9f06c5726dabb4b22e658f141abf366f2a749f193c0d5dc4b7949a0c620445cc9f0a1d4bd430775124d6ec6c54084bd9c4923884002e38585a8ee4829e056111071d7271022a4a42a166fa2221c15009c100d62ab66df82ab19b135d85b356339522ee4518785fdd21e5ddce812c800e97c6f14a5184f451656ae92af7e267f1008e43f239f338aabe9aecb291a49d87c251f43bb945a68ad8f762859bd26f2abb322419bc851b9adbca7d381cbf42cb9836962c5c1568ceec2952a441535bdc2e66fa53a5a5c1674557111ff13e50c2a4c33e1d4dfebc3f9a1855d1c79f381989bfcd6e444b4a4fa297f324a6e31c8b77517cbbfe1d1d603cfa65f6b06163382b8c1be328336ef920910f126e78a1b5d0d97e2d1dd4136a62fc4ce89b5da4027c4080808610d4ec818587a4a6b2a50c88311119b75215ec4e45e24c42e1e25aa83363a2dc733f93230a2d2f5a3833d00c17738be237f3c05c2fb743a5f5cef877b1b2aa45ff728fc4f4645d56f335426d624ede1962b04f00386b2071b133b94bf0875fcf0a2b528ba9d37a577754048fb62781d5b02f615125b33e0a5af70f01e50bfab2b52098c5bc3ef8bfb5864936e34105061c630a5df3bf13f86112d60eb2e0c06c6308b96afd729f21d03d1e5eb1b08e85fc6465a921029bc6531aae52b5bf203cf83746008326ebc7880ac0cfa16ab0194e0bb1c20d23cba17cfb1175babb4d3dbdc07c86eb7f77719a8f481a83e9db0bf2302253336ae365cf241c13a8c991a61b8329e705e2b605049e95dfeaea2500d6372e62408c230930c90d97860e07dacc0b20f91f9b52a898a89060e32a4b9e8f9e3efaa3f3a7a8eb400958fd286b22cb16b64c8bc24c4c73557ad7a3aa342d304c3fe201e28efd0f206011d3f04e63ced348dbc69f78d6529415729d8862527e9f9d07ead5bb94cac67a09e9b2137a8f1d6bc73fc8ef4ba0a5b66bda247372ffe32958b9060e4d6152d4fe65e952f1b113dbacd89b6b911c19a5f29bfa529c8c7514dab93d56de7da4cbdac65e07b85c234d4d6028a057e695d3054aa9a0e6a0566043648df8a781c00dbbe4c0f8609ebec800eb7d3a5001c762504bcf2e59d22ae78d18e86707c5728c7c2ad6170e0c3f90b019d78f36f04c59a6f1b7774ac60b0c2cbef5d9430ebfc6ea365edde7b6fb9b79432c914db0664066c063e3d3c16ca1559406e3e509d74b82b70d934ecb2cf91271a8db683310dd34e34d50e4d89988669a7a6e130f016abb94cab284653f64476b7cda89330c30e0c55d4a64bad45d3615db0c9c08e10842c4de63b2f63340d7659ad1897cc0c0c76314dd9cd3a155d602354c519643567c8c27286b6212eecb3e50c714970f725c946aab0db7dd1d908792f4d7a52310e4474e57c23c065d74a0e96fd0889ab95640993272c20da3393c57c601f28b6583d2d2019588c0b0c4b3013a6bc8ce0c98ca6684546352223435aae17cc5553115d2a5ab86a2b2f4ab86a2c2f2c609171821e19a49720a49697222d2e2e5fb8bcbc2c018ab166b82036b55a0d06186ab51a0cb55ad76ee8b9e20c1108975ddb8270415a2f12e6aa05a19b157366d0f801975ddb585b4f107f202efef8b86a4d83cb56803b554c1f2b9e96cb99e0a1d7a908c4a5552481450190f7b88fff0099d96d0a72e3987057c401f4015d8e2a2d10726da6566895d68805b2c0d5d111124fe24b9864184d919f2cf6057d40b9c702a16be8d79faeed454d1bac7b41a2f380bceaf43e972eee0782e08b13b8b446df4f0becb1e91afaff81611013194dad80e49b25f968fe46b916a6bcd4c81415980a164d70f14a112b2c2fae55cc0fc922e3c50b1991a352a49ba6ec33121ba212e2a7202cdaa2ae57cf68453248620fa9258a1697971a5c5e4416ec4524127f6658a3939135b24647ee10f88279c10a26a606b8b446406c16415694b2688bba4a6b9445d7d018d90847e0d21a7d304795e0ebe371b087e4f3fd64990fc86657e08ba66016889a6c5006213004f7e28498361c41c1777a0e4e8540abe8c312c123f6f8fcd84dec61e35e84b8fbd2c3f5702d47957d850f161d70d37c9d230aa5468424c992192341386cf06cb00a58c0454db5a341e78941b8fc7ba1e2035f78e0c3a25d33c150dc61492c269bd16a2d5798f2c2452c45450ca2826fc02b3eacb0882b88bd0f8b0c17306246b69f214282e8c8202921b5bc18697111797801c24b2c82a883195d04e5c5bd7286b817cccb106c84628070d9453f8dc551cb6581aea931608e6c841460ee95fd539747ecf18115713f3fdc8b83d1548fc0013d600726f3c6e6d9c1556e31778f9bd9ff79c8425b5affc54cbdc2a4b0bdbf8f305ba529f83cab208123c049a0fb1b43f73706eebb124c8edb8d97c2e4d8befbedbbb17447ae347de63084c990232c26469ce0903f150e80abbc7dc9738b0a705965333663b5fdab8ca6eea55446ca9e8e25ae323ade6da7a9e97e7f95b5e0e5fdf2bb245ddc58cec880bf8d654c4ee1663296bd041c63beb1a9a99c3dbe7d3993cbd993fdbfd90c2a51a2448972e37180856d2cc9b1145f2c541a35f907a58c3c2aacf6b8e971c3425313f8336441277c1bff4a60e952d3078e9df2608f9b07471ee0a78c97e648f96fe4f1d520587657288179e37d87119837e14852930d35f58325981cb7c7cd5782c9d1bdf7dec8c3fbee7bdc7c9babb4b16a542abb74a91135f9df8bababbaa851396346345567d4e4b426e217654f52239aea1b5ac32ba35c5bd9bfca4851f0eb70825ce2eca3133c62e1f3bfd93d690a7d9814c652cc3d328909fc57e4126799b2dad1092e9fda803db65a65ffc1cc8855a6a58debb1b94a185fcb945c2bfb7f3d593e451d9214d1ed45531870d37c1e1c4d419f3062027b02d4508278623db2e9331d0723269e62409be6f380e399024aa6fdc4934c9f462fc9f34b96502e679e74089793c523f31cf175a4a84f6020b5e60cd9982ce86848ccc88ead7bd1c96e234f3a526caf1c9da11927feac603aa80b989ae67de941ecaa60c8d980fbacd3d1025c4e56fb9c9d270ae1e50c7d26c0e564bd669f8e146017b89cac17188a40709030c505292a4e5e40602420aa828540d17ac22b55d41596196020243e83c88d74434819161932c0e564b5a0439728e9d4d82dc4f9a801c3004db20f408b1c1464e40c91b098a570b869c268ea446343325027db661b2d7b29931f49478a961715c01cc5a36b6cd778d798dfe2e212c5e5451ce2457c09429cb1037d4affcea0c1030d181faec7683ead5694671bad32c7caa919374da225d9936447628328c3c418e172b28c4c32169333348208039d1100f07202009020968084e7c1e564c9ae9be6e320395fb20297936544262b8d338c39432588ab958c68564fd434df2f877556eed832b2bf4c0f81cb594e1e998cc853dc3bc9635cc667268fa3e873a5930cf7e4dfb1a9c96fc41a70395933948e0e9aba013041071368bd4fc2d035e8e7cc1b75dc40d3d0d41d7538c9948e4d4d02d0f1f2041b71147d0ac2484099c604653a81a82c97e4d7260440a684b1f42400608418181a33c4175b05c0b52e20c666755ca069fcb751c7075387862b246673af789a62114266475fc8b869ce7c60dea0abd9a6f9dd5dc9f3e7dbd0d44915b319673823575281fbfb2921cea55b91134734b64a8d8a6a765430604c4c88633fa6fa91ed079e2b1e4e4e1ed5bc010211ec70e20866fb2d045f237831a7c7d5a9c241001bf7fb4fa6be9163d2b86863d2f8077a571b703337b2f055e5b8aa46627337ee740dff2ada10c7b299d8a8b5daadb3d66edcddb60b76a3eaf7a94b0180efadccf04a6da76ae4366ec6dd6fb54e0a311098867260f7dcf61b57ef56b7a6b93bb83005cf750bc10368aecf715da5dc0782e0e7dcbc170cc350050a012e736357ef0427ca7eff4f1ad9bf9c2995ed1be2a4f14f0153421cfb2973146318025ceec6d935eef5becfe53ba78e169cf457cd1b13681aa740d770313b1d77b00ccff7be0e8627c60a0ce61622f2440c11f6720b1121c24324080a22455c41e445c577af285654ad140527a9f738e94f3abdc72612e23815c57bfd467fe9eeee3793a63f778d7e777f6bad1d4b73e2275c65c8e35bc98afd953f95eea2febef3b10ce7f962cc4935448b99654d947ba6a38d6c70449e470d65f2cc164d2ea74b091e324c489edfe50c8ac99610cae59c65da4cf27d39974ca33c7fd6e617b99c477912b12197becab9749e3cbf749697de53ba4f9edfe17749ca61b804153944400ec3c7b111e4f0c5d8921c3e18e284af72854fec06c4df7749cadf4794bf4f09a3fc2d9144fe94f8c9df7b21cee7022321ab1bcffb2ec4f13a1b10c1e9ea103132f75b88c335ea71b66a6f904b52b6310ac8d55134d9da1ad8906d0d90642b342448b649d0c8dc9de28680c3eba682b99dc8654ca64632a5482c91291251321523d3ee54005af04488d58dbf72383f14d4f0c316f1a01901b2bb0c212fc1238e88228895fdc9638f0641e08e907534f02287ade81eb1eab145dc113d37d41c245adb1125c8810619d9235e2ccf49e2558f80b1664e0d58d4d4f708201b72825cd4d437b4c157b1c32c08163f76e862842cf0a5618413e83003fce516e2c18798283ca4e08130985b88072372c0430d76e0c188900f4868b4327d9727b0d7a4535626d3f79878b5ae99df5f8ad58e9e918a4a2eedcc7a4634659168137d6a6544c89448a69e8b9a68984b9801bb5ca30fb04cd7e8a73f0201979e51f6b4e8547b465d3333a56064fa5e934c69b4c4a0b76bd4fc3f63c64a1700eebb36d56f9cfb325dc3e57e6c8469ee6da3dfa4dff67d0df0fce98c00f27cb0d61b01689af99c0c9d65e905326fde58830315e66fe6c6db19b7f460d5e9d4311bfbf2376f869e748dce37435d344d8bd3888b7c7333d4a4c9be19f2a2693acffc51b0029d6cb45d3d99fb71d37eeba3699e63697df0e4d945b96531d509a1f7d71f78c16a3ba5e491374042589af973309c2a741ec89c03e6fdb014fecd617942a9ca5d80ecbffdcca4f1f79ff917c5b1bc9e67e68d15268dbf3b065a47d7f03fd1940bbdc2bce19dcb26f23a4f03738e30297ceefe73f7e51c77749cf41c3748e124cfe6babfb9bb639f4a369a9c4f57de903ae704593e70ced1fb597ae1946d8aca4af36ca599a46bcc5f29f128cd55e781960568fbba7d3b39ca6abd15638c31a6791b4b1e1b7040e7ee1a0db019fff4d135ee94fee09889bf863843c47c0d71708bd2167e8b639fc67ca53e4e564b5b4ed64f29824b6779cb51e504ca8ea28ef27107e2a9bef9c9d58f322d95b4c751d5513c8eaa4f598eb22f245b92e9ca515d7f0229c9f5b5080223043fec23d78f29bdc0e8b295cc2172fd2e9a365230e5fa3959abce474386374a4f3276a6824eb268caad6853fd1fe42d72fd12bb17b93a943904123c7a92ae51bfa588b75c8efa71d48b06912161f90bb48f9f32cc1b3456bebe8e0623d76e926b6b91ebdbf094b3f0c7bccad78ff114ccd7fae4f7b591944dd44668cc31f27c3b8b9027108ba6c41963298af365ac2e63f92d632992c612cb7056aeffe1ac88e2bd260075f702d5c7a5c972727e4c09a6d49e5282cd5ee6de38adc8d347fd4b45999916bac178c224d7ef5e92abad61e7efd8e1e47c0af4e2337af881a36017554053f4050847d9a24a6bf51b91066c69767a78c3b6d9ad68ba1461ba11aeb2a9895122e099755a5dc2a8d607bbc54045391722709bb5d984ecec4281cb2e2a2a2ad2f164d117b50d70d945508a2cb5aed54a1240399fc2a89aa2112ec6c02909eacb0fa675192365b1489e54767bf9e1744fc07f000f738f025c4ed6aba84aa52a55dc2da239388557f513d9bf64e95459a89a76bb4d0612600b5268600291062b5a793ed6e1333de6a41b71d2c589ece6781549fd52d3e012ee16b3580c7c03b3229bc47c75df2509140483a6a14f6590a90dca77bc2f7f1de1d2c63c9aa34a1b94e9db204a65311b2bf2a8d8620b994c367bc1c0c0a585bd602fd90b082693c15e331aed038a0979ad8bde01b9ddb05f9e7080dcbf95ac8dcd1b76b45a340d7d3a0a4188b4341a0db3a5750d2a237292084d96ba9f0fa5a4cc1279eafee5c2fa75abf6c74d5ea3d12c4d59206ef2777f1248e5c9f77d5fede80897f6a87654fb6adbeae8fb8e6a9fe785e7454b4b4b8be77d37e0d2f33cafc5f3bc9616cf6bc9220bfbfaba567b2fc6b6464df4bbb7b21a4d591835d1c737569380df74a37d95fc467c6116edeb711475155923da952ec76d1cb76d5b589f86d6e53aaad9977d595babb5b5e6fa5aa110168bd58ac9bac0e5268bc962acd84cc662c9b658d87a71bdbc9ce0f29b7db34ff6d1bed98bebc5f5c95e5e9ee779d686814b6badf5acb59e67ad071ad580829ee0720b020a02028a058146a051d006548382496e21285c9984dc42370023639a7217cd42384822840324300f42c49193ee240f5783b280ecef412e73224ff1dc1814483ce5b272208e6a21295634e537feb42bcb5165adb55b6b1de2d42227a7a840314755214146668e82a96305922b0c85a1b47f1cd5425040c90bc8b42b8e53da0d824eb607d55a69e98553a65f4b42e89020b057cb6b9e5203092402c33cc8512e4779ae3097a33afb8fa35ef5e5418038e935c868c58b2797ee82cd317059817a49ae41c904c82db4c551fe265885b805c328787b1b15479bea97a24cae33b932e1ae1256531c7fc0d85ace75f4a2dd6d2ddd364b9b369deeca2c537c47e06af5f5e0725b657ab4b24747acd577f41dadec11b80a5de18b0b5c6eb36db6c968b3d015ba669b2c7cc97229b7901028b9c81392d00c68b984dc4242a8c8e50b4e807202720b398965db8ac2486ea12884e420b7d02c89556129519b11e5169a0de91be416c20193bcb528fdcd35e44314b4c46a88670641b9dc5a3300da5e4d0320b9ac4c327d5a52dbf2d95a3e99fee6b301c9344a2eed4f15bd108c197192facc494abfc4292fe3299e226c7b7f5b8ba6f0d86e029f8e30dd685b4eb6ba080988c85d056f88038e4dff2b5df086b98bb48672b7b5485d3762ec8510982b1f7953d9ac6dc92177911e7cc836661345999f91bb484b8b6cc7727b653a53492e5461ae56991b47ae6f6aa966283db072b5f19424c64aee2223b821d711c7c804bf00975bcbb6b6168d5159a65f6e2c9f852b2b405176c07e2e6b2d53fafe26c0a5cf6a751c22e5e9fb38496528a27ecae65289e1726b656ac3146eb63938110425790b716cd7a8e50bf9055246e224fd0aa329aaa44df4b5006a1244c1c8f44b5a24bb61b643a6b4d6225ad43594a0cf819a4b2fea9a6d8894f71245e2a994a74f678eaa4f3fa5647ddc29a551db2279813f975b8b875c6eaf726bd1184d6d5456a3bfad688a5b5153add916cc112e67c762798ad9dacdb5b56ccbb626e85ceb7317259c6ac2e66c02c9266c523a5f5a804b95cab6706bf2e01279703961aa20b23f1744d7a864484c57cf1f680b971316032283c16030123661b00963a5268f9b56b4c727af58a917dcb44101220b3213628960b44cc7d2fe54ee05cee69c74d65aab0ab98e4dbbbb7bb64f2a8a2c2982e4456925b32dcd6cc30778d98ef42b054d33acb4d64a29755add12c16b2170aab5dbc671e0bdddddddfcaef3be0f04c31a5e3a670a8893e23581fb555a9627510649250d2170d36f65c593e112cb904122b5b4b8b4b8b8b89064b0b8b8ac60171717159794d0c5c5a54997c6970369087bb1f7624e4e52c64deea64cc777f2c30137110e7872f95e11786485ad23e775ce6e9ce5ec66edb6826b6fb84f9f34d676736eadb59674d25a4b27383e81e79341f0109e5fad9d131b61376fab96dbaae58a783f9cb55c1375b35c13b5daadb3d66edcdd38cf36c16ddce68921f8d5ba719e6da27676fbe97e304df7b5721cb7d926ea9c4f27ddeac8d10f04c1cd36f179b59b28729cddb8fa75b34d78d56ed46e958e6211afe39edeceeb386eb31410779aa085dd394c181fe5eef7c05da53b4f2ed781fbaa1c2b2b2b5506778f743fb89d0e386d5e1e104c1f950856585858647424193264906e0b89446aa974879afc55de07cecf7b00f7f75942152c905f034e1cb558ee1a15d5ec6c5425f3333242f207698049d33533c439b5ace0164b63d2f8b7841e90264beeb7500511642ed76fe19e7e4ba8c29602f7f441ae256cc08b48ed46493368d0a081315671e26886db4a8d8a541f0d189810873e4c0d64fdbf5f304124259733425c5c081921c31746ad1bea3d990a311194297df194995001132e253f3f3f400001840ccaceda3f6cff8811c11f7fc32afb7dbc0318b07c5f96b173a24dfe2b515c4c4556199564ffaed6c950a5f539b2adaee1de11695477ac33726b5d73df8fa87f7de2a8fa9815fad726b5c8fb57226db7aee1deab935c8db2886f8873ca9734fc94e740103c4165e5ef77f7fdbd3276326aeac68ec9c417ccde3d71d2bf497fdac9684aec8ec9bc21529a4bdc31d9be33dd70773476b5ee4957f3c031c65263f87e8de1efff45b18bd11afb66792b4d232e329efe2cd9bfce64f58ede2af450a6e2df854be0f262c051fe2e740d7dcfadd335481ecbcb097972b3e79c73da6de356ee1c4ff544533adae4d7167160e6f2d2ba594a6ba5a5e9d74dfe9d32537d73a373690200b2df942ef9cb46d5918edea103eadf5c2f105d04e3398cf8e290cb27e204b99cb921972a2dca36928f7491cb4e92a90e3d55b05c61c4200835b741e8480f38175c3184c805434ab618e32695032eac98e2e7872282b8e9f1230a22782812044b06d90d2dca9844e6163a924466c92d7404964bfc4356b1c7ab2eb239609ce07efb3bbf7d39df8e3aa51d276bef386a6e73ce3996dbefe894723859673b736b9d9d6a6dc03ab57ef5af45f5075793188d3dee1869f63cc15d676b3e1d27cbc9d99aabf9bdc3c9499bb4499b3b73b6266bb27a1cd5f239592d67dd8bb128fe4f166dba2ca602ca03a60b3c5f70d2fbe9b8c34987a1efed96e5ac4a3b664f6fba63ae6070d27dc03bbc7b52a74e9d3ab545cc707cd7e8696d2d810aafa07d7a5a5b65f0018515889e4cfab35afb755a5b79eccc069f9a09a64f9f3e7d5277ea3a40a18b39cef0988ad1b19e3f847d3bced98293736211e7d8883d3149c4628eceecf445750a1c24233ce7580215761cbae842c74e0bb41674e8d8d9d16199d85a145c366d5b59a31f1b8bfec6dab69eadb5b1b6160db86c1a6d031af2f9379be02fb7737f3b95f2772cfbbdd58eaec4e3a8bf1b7938b972548fa9efbbb1ec71ae76ec38d1d4bc09ff7bef03598a806714e194c76f717478be0f6b1deec870e1ce2a0f7ab89452828940ca7fcf42ca7f630c45c4703fe5fb708c011c7b545113d942e9fdfccec727bd9f3a7648ef743a9d3caf44becc77ff32a4f76437ea7092ae306d7a298db960bd7e83c0aa0942bed6bbb2021114145cacdc886db5d5fefc5aabadb65a4ec4b56e5f6bb55cad5b3de56a6b137cc4c662aa1a6c666c5436aa136973efbd362a1c2a150e1b95cd89075825dad812b4dad8e91aef9b1413eb264db4682594b68caa26a0a25e577ac149fbb4e48293f6bd840127c11bb3d91ed0f0515a5d84db54ea7b5291a727450a43aedf29b4c97e14d92aa1f6cb53b6d3472ca76405f66f51256622e6115110b19863238ad83149c4624e157f9aa753d7504b2291c0ed55d464ed2720972a27ec974dc47e99806c634ee8d404dab4033304acc2e126fb5d2a02f69f3acc59cc0cb9bf3a7366cea15f470dd09fa33b75ead49f8ea5fd1c1bf78a82a3e6cf7628b8ec98cdbd188be23fa61636abe198b151d9a84ea4cdbdf7daa870a854386c543627172e55e4bd188be27f97a7a13172bd8d451ff141ec08911379521b39fe74b47132a7df7f5439d99fe38e6b29a76443df06859c6a93d3745439397d601ba12122e4f2c1bfe316b8bc79f2ec202787a64aa6bc03f2656448f29ffc8fa9c87f0cb8a0c305162e5b9643534dafac041f5dfbf1b50c89a6cbdfd35404a8689a4c7f9855d04b61fe4a9ceff6b75d2787bf06fced9863c7513ec26c9ff23d6eecdf4ec16c234c04bc0f3f87f7e16f1f8e3c8ad8c61cdda78c3c52be7b1ee17bdfe3c68ea58f3b3a3877d6094bdf570a1d10661f6728c13184df37317c6fbfec59df84251e45b0409308c16775f33df8df0c6174c09740df806353130b3489107a8872f38d2e5013fdae74f375a5bee91ed9c7161bf01c4514444795fed345d7c07cb7b94f67df9238caf0e7d691b9c78e9a3da9486bc065cf5a797e975be8091ba818f5daa2cdaf0e5e227287703b06b843161ce086c13765668081491623dcad6204ee906526e32f23417e8cfc58c5303ed56f2ef0cce965485ee0f2b148f202bfcccc49e5289721f7e3f8ffff07ff7281cb5b899e90c228b717b99f88a287fe2c92208c4c9f74db9c6c4cff77824f381cf5ba691fb97f85aee9aeb5de3b96e2a94dfd5e6e23403ff21c4bdc3f9fe012f7ac069be1a8c42c9aea69ebd48c00200049001316000028100806c562c15816a5b1a67b14000f6c883e64483a9987233910c32808622086c100c3184200309a006394a9a11e679e82f972111765844447f297cfbf8eeaf20beb3a636dadc7291082ec5ca3078487d5234f68f6fc0805e34b84ed51d1a3ce7842464aa59986700432f08561eb590755bd1799a0b65a7a307d311a62dda246d81e5278934c5e9231fc13c045c3b7bbd9dceaf1104a3d7bda274591cba5cd32799136772897790db01f1e46b4222f27c942714d14ae9ad80e5eedebfabab263691e0aca30ff0b1b7a46c831a9a24e378d00e1a80af4cf88e93d121c7d6866629429a1eddd0d2417b9cf794f89b811440a18af94862a29301db4409148e145a9f294b24b033f18bcb6268d5080881e0a0019f2c9a145c82b03dbe7eb314c4b3d99235396276eb174e91082295de3e36b499de9215109186ce9932ce7beef28db0ca9a24460a2378989ec6a29a41450f4cc0f49eabb078a46ab3791bfb18910a91d31698a4688548dfce9026a3a0ccce13602b1af83ba0c238a30d04962c674a6166da0d7909dc501cb249cab5c545ac5c9ce4893794a29aa354f496471780bc5860e46270e50b06b306672970f17cb63162da6a0143824c274cf8e47d9e6deeaa448569426ba839e8bd693c2fc3e2970e4d05c732745a61b91bc9d93a24cbd49815d3d17801c4be189f41dd9bd905408187e5ecc8e84a9a6701d09d7349d23b144ecf8ec3930b91d4cb5245591168930b5258a7ba448a4df0e905713fba66e6e07a43cd84167de0063cab55a64f61c186c3210522484387c10b1902fdd5c665d0b137dc53ff1931062238b0979cf912b8980233bbac8461f765f3718372d0e871c640008c1bf0515580b6d411059905910e2e7358685603ddbce31cfe9c662cc5317dcfea29d9e6dcf39a1db3b8553861d77502ee29872c7d52f615f0c1ee043d087b6f3cd533500db196299074e79923be82d72b950d3dd61f8d804903422938b68d799f9f305cdd645056207060aa9d3a11f3f9e4b138e946d75e2154be9731e8a28b1e72b5203a0984b539d2bd2c7844d099c93473b616e8ffa30e6f2e4522e89c86e8b2dec7d0091f421f77a80ae475c2be36a50164d32ca2394eadaaf6a129c25332f6dba39b10f50b72843aa0fca87546317a254633f34b0bcc44e0b9b3545544ed9cf1b8e867b3608953ae777afb3fbf0a6840eed269362fb8fb611f8535f59005f641d81e800ced37f722ab4627c3515ac8f91a13503e9ff8037826cedf2457abb0f438578f59e784d1e3efe9fd9d8bb8f617e44a12ac2eecc4b496bb0eeacb6a92280723def6b3db8594edf82ae1e10a8a3e6475a882fefe6e2590cb60d4d3b2c841e3f5ef717f9526ea51c8eaae90c659e0ba7784db9281b628b8ce5e297a32971aa49a9572f7a7b4d19e1f6b2568a0cd2dc3ab143474957fe8869530aec72bb7b4dc5ad290a6e821423bd0c507e8a1ae7dd1dbb4e4eddac85adc203350f9187d3517c5236190539f525e8662fbcf7e4a663e15f1d940de4017b56427c8bf8984aa0a45f3e7219dd94d20debc367e4b319caf19ee0c0f64befced88c8b30eee123ddc5a5f173c3922ef77a98f83dc30fbc4c6e5e3eabf3c819368e468fab3d879c7d6b91c9a7290c7a5cbc15af963cd85462a81402702b009f744552d280dcf0d4f6ba6b0c2455246ec89198fb072f1f5c20e1607b1d5cae89aad6b4ab2de079a01a5843003d4ca971a96113e046c8ade7987350737bfbcaf0e6e4b02901f69caea5990f070d3969586d6dbe086dc679c566e6870c9e92b968cc92ba0c42d9ca0ed405ae8512d6853ca216aafb0106c30ff63f8656851aec0e086a52275e49dce69c42d274fed00a9b4b7c8145face7c44a76ff0c15a183ad9081a33382726a24d2b3ae8f72064509e2f49ca7b1545568c510f3bab5f662341ea4a27dfa7a65ac178af41563b16a6f32dee052a01fea8b757bb0fecd88dd1cd0e23aac64200fd689e3193c44405d4fe384ecb68d783460d373bef539eafde0f021f7cc838f10d3903416a0737466b4cd91cdd5d46000e320dc735da777067e7a9cd1a08729ec831dd4a514f056c0a4ee24ac67181cfc3d0a159f186361324538763ae591bf6a4901ffb0e4cdc2dc85bb667b3eb446b0c04e2f89adadda66a56e03c12fb4abe3ab5c349caff53315498a38ac34cb7404c827fca0cb169f594e104409cc39f8bdbb81ceab2b39f6208464b174622888d29a2a59fbff22c2e1bd89fd940e76cffdf9bf5c96519b8e582c408835ebf0fe6d2689bbd32c3670e24c8dc52ac9775b6135efe61508789516079945181ba0606cd26734a1c98596e424d7da5245dd934f51656ef664ac01ebff5ec7104d5c40db9b7506238ad506ff5a994579c3353e72fd1739b7ed2f5904cd9183f51401b05e8fc165e801e7b72b561ed03551538df8f9f3083e83b9e4a92420341c5370fe29badeae2507c644c6e7258a819346e0be02ae0e160d31dbc3d9570e7e8d734c52dbbab5e9154dd348df81fc5a5a8c2b8ed8fc4641c3627f1c8097aa821e617a8d3e5f94b742892b1316467207947f0292522df20a7bcc7785cddd67118cc4140c28b006807282f451b065fc03fa48d528ea4b1ac2dd78afff4c1a6d155978dc219e736155d6cd3769eecac5d04683429cc1fb9a6f86a6da7c36a22b6158c1660b472caeb419829352b422cfcfaf53b049bf748a18c88e7abe293caa18ae0c58c4ac42b57553b549f06ce79a73318133ce33d11d71caf72f9933f063cdf35e1413b4a483892b06aca112a5b226079c7db60dec0eb1d0fd5873bf4c6b0663d43b8d24d3f1c0181b43246d2ec65dd985371548f371d0ec3a5ffdd06d02a95ae33e0f8fab61c1d97225c8f3ed309ee09d981541d71c462732145a3c6c7ca4e778fc0a50478f9f53437c6760ecc30ad6e2e156a0a250137c68a686804ca4e54de7a0ad329c0096accd83b349e512be862df5a95adf0504b5004d42a522b1a56f00d69fe72abd8df71707ec5584f2a51f9ad1a731afb649a77962ded9d7f3c876210c9f5607a0046c192aa2338142ec0d16581c5a9f0b9ef93af0e5058df663d1d20730c50ee7cd6b3efa1caa79431b7b43e0e12941382180b5b4fb45cf0963a92c2f020d88bdae3e214b4f038add727f13bbacf611b26b2795dcdb9638ee093373677ec4bc487701fc20e61f5ef92b5bf22add85f3d74ff9590360ffeb196c5e00c93d2e62b0d221c278334152c168640e759ab1b3e11c70934e322d0de30ca01090a2f6511216e10cbf611ccd1704eb883cc9eb67c2da92385ff025b3c827999f36e5acaa1d46a4e20891987642843256c8f11d644dbaf8f0883963afb41ac9082c71eceed615676b75772e2688ef468317c02429d3fcc44a67eebdde1f603020ae1cccbbd32d04af864e05eb03838a6f05e24a53eea03f20f12441e9009752ce466192e8314364d3f352efa8e53d4e68a665ba4fea34a31610ef7ae3e69afa8a32d5a0c5a218847f20df900118247a92ff878079d372920006af437f243826cb879c5401ad880dfeea032ddbd05c3f04b585c240c71367036a08660764a705d81791cd39bec18012ddd27496876b307bff314826fceab590d4cfe69d04df97295cc600e57fddce0bdbcc1d422e57e7fb6a0a3201c8fd664115f80624236dbd3e69b29a2007d2dff6a252f5c270e022a46bcdf3f5f190427cba1e183ed7b9332994932acf389bb5935813d6a1a2535be3ff002ee7377afb77d6071c5e11039c355cdad6254cb01dbb7af73dc85134a0e64734af01f35fecbac84c4e730b9fbee72b87aeaf26464c997cd85b6fcdc9570aa751b089439b6e99e0c964f5aa5398d2bb5ef976898eb6c05d3d38c2f72d6b03ac69d9c7328afb3730bfcbdce04239a228f6850b1d56cecdd40d5b57c5dd7060277e6ebed8ce78c9ed5b8cd1c05e160b79a00d12f9e7d59af386d37084176f7fa13a6ee0ce3fb5f8de8537fa6be4f480bb427a6fd380f72a1d0dbb9be5858aec8ddca4a2037b49003417948db297b58226caa397a76ad32bd0d51d3544fe5121e51159ea91175bf93caaa478fcdf7656f3261a2f6f93edcbd4830db3ba43ee278fafa37dd680b76cbde069baef1e7e33d449bdeabdf3d14af3f00a9d878861a2900afef4506ab02bb5e3a86d7e9ed5c59a3400d0e851ed56cea9b5efc81231bcc6f1a9090dee68adda7eb0a31844940a90caf44946069429074017762d8c658e31c07db8e4e1670006e406dde710d06251c9a06075116f35802d06d6471c946209e9ee2fe9cd03030517ef84e2e07ea834b1cc39486d83e79f9c0282fffe41b2e7ca22c551756d39d3418347a9307ee0f9829179af8db9e204ee2c72a487e0f0e6dfa10b1f923bff0babb5bfe564c709ef985f40d5eaed973e16cdba0827fc90bac89c94016907751bbbbac72ea1769a28104bdba36e10871b6c425ba9b581299c311ce3f83aa10130f67898f063019ab106a9aee2221cd1e37edab1fc7e7999501e455e2f454ad1a73776af626b39d1e7b5636189a39c8f22ea3a76f2b4ba8c4664645f3109d1eeb5ee7b61cfad9643070fbcfcfb6bdfce91d01b7531ef9a3a0d13034bc979082998801c72608d96d0a8f45cd0edbd63b81f15318f3cd126cbf97ca88f97981f5d3c9dca4b97f7183206a4d931419eb44ba82853cba44c6c03a4b81425bef20c6c48e1f969534fd6d5ce06b3b2004626a59c5da32c33de825f6ecb98bd83c0c7eed673de06608b26d046c1040cd2b4349af9d65efbac23ff96402828c64ef4bdcb18bd3154acf23c00bcdafd34f771d20aa0c5156f2d4f6ebaa19de70c4f1a592e55d348baf6bc4ce5494daa538a7dd45580f56d6a8a603acd180eb63a4b32807a683e2dfb3894870e5309f3860da5c4b276002fe86b7d80176750181e41a7820d0eacfe40fde9d64f9fcb120bcc312e0552b8953082f5034e5608209d31066b0af71a304fb69220e4a405e535720a8f6f7395a6df48a84f62a379de31e1cd787023563ce600b124b4269b7c2bcaefcd1967b555b5cbaeb0627e17a832dac937af67a8f17927a6a28e177339382579107fa2c93047bca43ef340229a3e1b020ecef68d984eedabab330ab48a73968ab920e2c411d0a19ac096b5300fd386f3dbd3fe5b48206d4b65fe17de80db2f0432d29deed5f1d06a7f29eabd14132f52418e60756f1895069cfbd66d8750f496cf77752aa091727710fd635ba266e8f6b216a90d41a635d8c111fdd55792f02d09df9ead0efb8a6c03763f7092aaabbe9ee6c8fa800bc19ef548d8c51091352932f0953368c26672bb1a5cde165f73eee1f65f3e5d27468624d22469ed77b56bda82595c346ebb44a0f1089b2273b660afb35a233462f50eca9ca512736a5e9a0cb367f683593b4ba5057766ae74a4b9f7ec4084dd5527dd0d48a01ab03c99240ffc4e5394ec390ca6a54dae6d35ed57603357bd5236f9439e3cc0be39f5add6e4ab745199d9754e2912712e28ea935c66cb03fb0064bfd3b481cfc3b614410ed76bd44373c35827fcd991a2b1ca492550c8173c0f887a92ed42d448e3dddfd873985aad7aeae9d08debc94e9a07dc651204a6d9d394fca570d5f19488b07b0842c566590ec015e134dead8447433ff8108ae5e87affb749d66439579ac8ba6ec096b9e2f8c544d1868991b90fd8e99617b1fcd608ca3e4320a9c41faf97980b585825d9aa9ce4526c2cc99eee241b3a0efb90ab5da3a9c727d774a6e734dcfea06d01dec636d1f6815566aa43d8466159d32ec1194c3a407e00e8647b76b96f074c6e239b11d391ea8d1844f22510b56004a66b77fde19d57050b2c2ff40b106aafa63b053f022c803daf0b0f71ee1a1cefa07d0fca1b18e425b5f159a0531933196994d121c0dc4ccc855e7e15399e47e9db5eadb68d6a0640fcb20ab751fa8a4122d654a081b9867a1f13d5d94a3aae656823ca1b5f673cd0ad7b9536116cc63f6702d1fa8a13e2d1f2403f33351f8b5987c1447fd8cd76142d8ba08c766e29c0b24da92d3b2c05b36742d629afd70c0cf412d8937649589a0dbbd852119fe3e7e2c669121ae7bfb3b24b8bd1a1f883da822d7fd80ae4f6933a99a62fd8f1ddedce7d14d5957d3de82a65866525a7d0fe8d697683af8787c98f5f7caf7016a25acb1c96c0a8bf02453bf75d78b04b41e43df3d588261911173ec70c15bda394f0107eeb9a549c52c27bbf7dd264bddd89b8987c4d05937d4260293d80a2beecfbd7fe2236131d7b2689c6ca0e09de14089a0b24ae99b0d4533fe35fb4229a77d2501106c3d83cf3f6e7e0fe0f43c00eff7203038719d609d71a30e5df156638674bb2818b11958c5074a91fc8ee13ec83e6bbaa87969f774c88d70f4f3825034ae935bd3b864fc90ec03eb0a3613171b52cf0a20c62abd5d30c6eb114bc4df5da9986b3c615cc79c982a7c312b2ab55d79f05295f4f52fe7d403f908643012f79332cbab69d7f6e4a159ff1fdd34e57f156dc859b2f9cf3729de957c1c483cdecd418ae97e34acbf3433e226388daf07d9c17cace4570c1325495386199d26efe09acf117b5169d88b885a79ea08a2c9b9cb39d7a7045384a49b174f3b97485472f69304c8f5101fbc81d9afb1afbba65b3eaadf4f31b972b656cd50d15640c034dba70e87b4d109cdb3f140d7a24b7be197bcdbc43e49f5cf9dc29dd6928b98ac1f4cf3addb2880106ee8ba396b1c85e5e35719b0a6d31af93cafcb36ee00ac87db5c19e59eeef77062be5863041f2a1439e7d040f915bebd8fe70381c82947e8fbd0a56371477ba9e86c422a576eba39ac6c39f67573d5c366bd70666e6a1f2786e64bb188ad5113dc16785afc8f2a31373bf1869f0ade7ed40e53999e56c09f7c9e0e0c54528e17f731827f7a617602ec2e4c5af91c8ec1545a2c91d49fa6298998650089c511a821cda0bcd2e3e26b57440bfb4a674ec404affdc4c1db3c8fdd9e55b5c6add2380c93711191464a5bda987d981a5cd89746767626497ef0293a08089871563bfc2c29044a506b4cf4770d9166a7b94696cb69a7417f9266529b62fa7c20b2e6f351495f324a97048d18665a7ae53fed2544def6f1f88ec2c0e5a7e09231090c983efc39d1a38f07fc5806851f8ffe19311998b92f5212fce4b9b1a8cd891e3b3e8835bdc156c5d54ea4053805de36716ec00f969f399e20115a1589b88c1d0c08715ec70b48b1bb83942fb4c106172c8290154477ebd129b2cea86344279d165b8213add6f234efb2c56183729697e7ee6c050cda13129a0406c2c3bbe5cfce02c44272e636ff7e151f450f523aefbafcc1b799d0199b70eeee05dff8854e40c7b4c9cd59931cbf35df21b2bf142437c7e9cccaba63743371dfe8b31b4be1174d25f50c7fd2a5db21637ea7d3df6fd15c0d0d88353f36ee9743d2d6725861b2a1dc499653fb8169e1597c9b24b04edae230accf47c40721682b2ea146cfd7e0fd3b9984574ce3dfe60c6e446dbb7593efa722f58e263453be0ad9705b0370de4401589292bc43220f0a05935f9c17327c63d100540575b6ad558c4b4a3463a2d6a9e09426aa5606262b551c8349fc8c74937b3d10eb0a14d627443488d2ede0fcaa01b6658f822bea820814c0391bd097035402ae77a8a94d1164a88a912c950c3632348d716739e0c6deee792912a04a10b485b30216dce47f3cb232b6071fc510cea232dcac18b11867a2182335fc56d7a03f4d56f6e783d33a73933b12d9722e42888554414e71d7270ce76c53e6fa20406b7fbe6a22a32c1900a8bb5144b90288d806d792a51b3f489e5248c0ca8be9cc40c142e0f631533b3273b31f0146d7501b377bce98b94ed8fb0b156b97e62b30924292c0be958c91010334b222f0354a5b55c7a4c727146f10df3d5ca3a000982b34587127f7dc01e2b7d57ac3c8efd09fff56b370933a2e7c9877c0ab7d46fd258d129f1f859229d5bec808e177a147e4932bd9bb7670d7bce8254c394027d7f3d22c055ec671954a977d31f877e2e485b850b7010ae83426ba00689a8bdf7bdbf913d3cca2a3811fc74d562bfc17085747c124e092bde589ae142581f30d557b09b1104e77025bcbd4528aaa4a1f466316eee522eebdc903ecdb707a8d1f1c9f97118e687ff2cac500ce0640aba2303091875df27bfa924e1ff341074e6114b920bd9a32f280be71505176c66dc3f923ac0a04a8bc1497807323336bdc292e90bece6c4367e73c0f880d8fa475f633389f15bfbfec1dcfd173e3a889d71993dfac5e371c693de912ab3c86ed0d0d0773eda3d2ad639958c14d4bff4f66fa980555b3a99c92363d779cd017732ff3e48964d16aaa7016dcb8920fb12c9d1eca8a3911704b15751f34791fe5eb8f4810eb21843f05b4e48389ec1ea395b87523fe05921b13124110b6da2117528638e5c40a8b4513344f5ea895abc4e1f9be90845e3dcf2753f8b5b1bd5e4c263b5bd3332851d3676d662c0cac878085e84b18967e34ae9c16e394270dab3276e0f5eb806418d613c87be36302af1bf9b78e768948dc8c1e4eaa1adc6b5c1674de9a8e3b84f11d93a89ba0a33bc6bdb822f969441f69bd4a66c244651ce3c2857364cac51592bc55640f53f0abad1153eec654242e3ac65a92bb3eb376dcaebf026aa220a69fa182550a5261567371243cca1c06844bfb575bb273ec2ffec995a7c17a19dc4a002464e2fe012a938d80416fa5ea5975691102cbb50ce33529d830de9a7d0348a43010bd613ba59591639eda6b893e7e6c0a9468d14ea599723c4f24cbd01c54e74e0b227de6d2afb8a45beb2226366d962a4b674c95b15287782db07fb8962a2b30032aeff7651d8932fcf8ebc4dd1e1723d6ff0f09d440c0e50735994a4270c8028299dc1ead3e0caea632b568b93efe57a6826ee2f127789b432e4911e77841ea2b41327dd82fc19ce72828586de2dc6ddce026a086f69422e0d667a09db97c11206c827c74597b2dd526b6b9a0c0ac7ee25d4a15737333ed5c2135a01dc9e6f774c4d39a12ae6e22c07297a366f9902c07136b06f09591091329dad9461c9c556066a0f8428e2ab62fec170b2c452d7691938fcc1b592db13be0fc635b95d352bd803608eceb735c1fc211e36e49842684635b9a22b458b8515b5334c2fc51577e6d767d2c63c782ea1678f61333707d654b91f0b27f17e4ac7a8af85355c488a0222ab7342c522dbf0633b4ee14718b6c12c78a9dc56712bf60613ad78326719c055ba4ffd68660b34e74fdcedb799302e4690003e6f5dbcc4d00018c8e5b226db91da9f743a0cf4e731bbef713213d2cf35c95a736743d460eda95e483a054d2d75e2c063606f88e8056f9182f19c558258fb32d9f1888835d17495a471cc4539f7212a4ef3555a384cf3eb0d2c01440babf3e863a8040ef31e7968a216f503ed116e8ddcf83ffc080c7957bdd4d95ca43a10235a12e5d7dada53b99d3aea5161106acc423a038474074eb11502a4612f1195bab49e22dac0c3767cb03f85369fa66b924697bf7f9efe21add3da4e35e0907032c8afe34f769dbc0c6c0a493ae0870f7a7e7a8312f5b51f55be8e8f3f5ad0194868f1b80eb62a58ed31ebb49d5b3fedddb6a2f344687b8557f64dfa2d047df3a791a23299137ba0bdc3c0a7402496fda8e8e5d36cd9c7a5561f186c31d6a805b045bba444114252421553924d552403b51a31dfc5e0e51da7526379a1fe7fc90a01b7834c6bab7488ca57a8fc9ec3e46dfc41955d19c3de36fac7588923a4b339c8d9ec86f58802ebbd40f54f4505ebe7da00cff214b0fb994f80f606f336ce0c8497a41894433a1ca8b6e9219ed83f429f513ba858a1a7c437efb701db9aa74a989da1a0956cd7a4734d3f003ab9e577d93a18f5c094592371b590e060126611df9558fff21de3aca20732ae496c89a2f89a898276432f56655034d405e0618e16e1700a9ffe3c714dc1344cb8f0ecb7cfcdda73e1f4e2940abc9c1a31a1b87231528526590339cc5f591c023a9f21b3b4c1aaf6872886a5e5c67173c7837fc0b3b5aa446bd97e42d7306a39ea191a82903aa23ba2b059fb09e457d3720c7b7a73ba47a982ad221a28c54d9fc1969b67ad33bf740911cb8478ce58720b8b26da81063004f62642d0e998b4cf65237cc7b67cad6ec7f36db4eeeb7f9a28a1628e9133b4eaa5142963483b603190438db6126660a545069d990498f4ea079e1ceec14d89e197378bbd8e9052e3198fe8a6509a1ae1be3b296742e2c4df5d4a06a7e465faa20ca0a32f7339ac48b7888952a8fc49310bac4ca0574793ec61269364aee148c1210e63b3703de4194cea53ce7aa8976e9e5a38427b7fd8b1f2f7bf0f24025967ab516da205b0b3dcebc80646ae43beecae1dcc8ee54615e776de3010ff4fcccba15e860562bde705f11ac9acc36cbd1007433e5f5968f6792f6300744fae4b0367bfbb9b7c84592d301ffcccc5b2ff3968d68306071eb934c25e8f3ae40293a6bcf0501a844705269ed8be7e4771f7213ba962e8e6a70868673b3957a8b8b5a531006c5bec906af2ed1341e815f02d45ceb68d75907e5098225b659af1b7409bc29b30db837a46679ccafb4e34d0975c7bbdb5f192ec04309179300cd6711760b34f5a838ec7315ce42095b5000d8e358defafd4bb542a77dad6c1bd4e2eabe834fe9fa89765c408856d7140f3152ff7d152eb6bd7f995fee4fc1c14137b574fd290134687493f0a921c4dbc1718b42076ceb900f7d30289f444eac28b32fa205a4ddf5cb6cd1d185d46bbee87b99e3421364560652f624c3d1ab75dfe734206eb40c7a3a03c0c69921601df9c6b0f42d74d695a96b878c7a92572d42cd49ecd9d5f40d557f966dfde9d95353650599ecd5ed38bda34233a6ef3cb903ad233b6b98b9fd144cbca89ca4f8533f7f6ed44d6eeb0f27ac5f3c3b13fc421479109ff384c52acb6bd46ac4f4d46f91a4356938673800bae08aa3b0695f672f90347126a37743390dc745208b47ade62b2a5d0f02d650b3bd85032cc722c22803056a05223aa19e226806e3f65a65e47f20307f8f86c35cbecb38b9ccd8133512f1c9cab808fff6a0c664b9c186aae313fbf52588ecd1f8fe3fb93ee0e62f60c289bfd0c25918f19eaa731cff5572518822cb141fa97c22200548e9f1808d8c080e955eb023f198124eb64582bb4a1304cec414de90fed9555d2488dc7bda00261e3428a6fdf0b6e335b41d83ac414159743dc2b45f53f2a7731a83137a6abb653d6803d1f12ad365910431cc690f244acd331edd9c802ebf3483964ef0f5096320182455ce3850bc51a9ba0c8e721b149ac6480ef3f6c7c67f83166c8d89bfcb0db90e791a6be042d3a9ae5fc1080a67ec8aa827efe4d38bac908915ef2bc5be19bf9378f638d6bbc2243160cc44451ce9767ed5bc57ea416470067d67793dc3ada6f2dc1381b816331b9943aede56244aaa78472971c68bea7962e19591b13d30984b9eb38b400b2d25331221747da8e4adcbfbaab38c6d4c4695092e64ad93fac8ed8e0e379119351c8acb05f9b1a99b84e2cb4e1ea822dedfd26f1ccb31972b1b44e4c1d35db996188e5f68bb9e4f0ad947ec4d8b10ccecca58ae419c55a5223db466b90fe3c313ceaa78b7c098e0ea0b92d779583f76a03f2d2080904ce46ba68dd01e2df47eb4841d0b89b6b2ccf2d7955f31d90dae0806a0565c84c45ad259f70596e5cd2a629276a5a8aa6b7336592107d28d2c75e2aa0ef9d01d3b07afefd91a71f201378d87bc95fda97940e7cb79ceccf0821d0a4ef035a26d95071ecfe164c43f9034be36a5b07a302b4e402751e69c1b7f476828a4e936850c929b92b2aac68e5a9c7c78b70e16d362515490e6039f2e14f7e62058f4676bd4da59c89f2b55970c3c818c87e4ceee0fbfb0e381bcc5f1f3be23f777c31074c12660cb7d3156fa4166db27e2b68a4b74122686674bee6911f95e74295b3cacb037dd4962bf51af8056c3742b6a861ec8ab83b96fae5bdb274b7750729f83af4412ecd3569c81047e14663811298591d9a3de4c9b77f92892a5909102a2dc7cfbd8cc9d571054572f43e375acd30658627c19c6c46322253918e2d566da70f67a10d5cee7609cef96f4ea93291de78765b43b5bf0b66aef4092e06b901943be974d36cb23e5de4f75794ab0722ea8c2ea4c59fe4acfaca2ae543d2ffe86c55ccd611d7b81339c7f489cadeb40521cf5658899defe52c095deca336153e1038ddab448cca086ae53d4d36c61ed144bb01ddfd12f02c2fe68beeb5b55118b896896b41fd50433c63d7ec1de4136cb60ff3d253c5648da53cb11070fa8590b6113d5810445f765329a4d99ae951cf3a08e968f96846e0a6ba52b50a321b89ab217442fcc7c7a61f07d20f616da8d8f943aba0254cb78421db65f0ec81f1d8dc37d20ffff01535b69c0d0bb853d5f90a7a922570771a251f20fd836e10a6b5f5c2ccd19a0edc90c43a0359bb66fc962be6f51bb80ca04b203a0a59562aaa35e12392d638acf247a8591d29b6d2f2f69eb7924855e6c730508fb18988e36e0577fabd83bc8ef6b0db32218b37293965809e44ce11bcfb87f62b376a357a75997377e34e8663b24830887a453295b384c75987b50de670130daf0297148bc6eb4adcae42b444bfd98ac8150d7837d8e98617793e9f17da7b336dc71b08a092357e521cbb97abaf2838aa9ffd42ef911dfa3dbb5de6a3e7d2b1f46ae5c901bd5c133bdb45768fae256ce880697c215879be0f448f891886e621949986345c3d885732a86c60d627c08239a85f4bfd44974574497de4c1418c18ea4ed1b42ebc04b271d5ecdbffa9c036bedafaa8cc840e2edc4dae7ea7903e6b3ea8096765a3708042be38cfb05fa3c1250f033338b226a2428e3ff955f911d4706a474e2124d8886c35961b84a09b54137326df694f4bc4bf644b252bed1ecebc909d5876c7d596536d108fc3f923f47ead8b09a30f50cf5bf586070524f5fa1c612ea6c267b4db0172ce3508778b59687faf1f85ae28d0f6c47809a67874f53dbfa86a593afc443fa79cb9145ff094da2ec2e8138f23a367b002ca66410bb3e69a2c78f248febe1f600a5d64b9384a8f2f3f87839ecfb01a9fadcb8fb51baf26777fd145e2309ff72ab178e79bb74c5cebb91eb04a26640df80d483fd62e5d9534eda08bea4ced347cd2080f9678535a899f5e04f04f763b450c28ad95df470518c01cc9373d622d75569cd849348769eb002e256d4c497ddb316448f3c9de3de0464268490565536a9adca10af873660d9efcb721086a82ca66e3511fb603da763a5c761715bfb14eb587c6bc35ca7d09ec51e31654ce420a02acf064e521e470e1c4db35291522261d14de8cefe12585ddab81c307a8b202e63efcdb284c52c1f0ec68cb096cc6677fb902597c6f2a68a1fb5b282d0dc66ac840186520b4053b587bb5c9c6e0a060c1ec910f985d0dfb26e4417ffc846a0a943712b2ff9aacb3fe16370a9e9e114ae9e0d1c3d9275302da7875b8896084173198349da90e6c18bfbff1ff1763cf7d6dc2e462686f985dd80b057ff69a3c80caf1a375b57053ccaa4ba25155738077e7650635227e853c4e32e62199220d38e70a8b0c408f1c5ad4ca83957b58c8c9f4049c9c47d39f36b55703ed8620a393b38be48e42b44395d29d5c805ac783ab4a1c3a86fc7131ef98bd79c65a2085e6b8a6ff8e22495ee2429e1c7dd103ab5dbb03905bc32ff8a034b41d5cfe19e3626e6dbb680807ddd519bc0a93056866f8210a65c09027b524d7cd092e8437322542fc2c121f62fa08d9a720e6fd8ca9f75f85f6352236bccfcf5af165aa82b9f3182ae4e877183ba0eea360895afe36dcb3413ae5ad34cf2a1ab94749420a4a70ab55865494f7257af2ad74aafa5247f103f3675a32e4dafc8a4ccb6c1a61c3d00b2b79e89be4d5c1e919d42cf231749234d7bdb27ad8982d4e5688510a6573dda3fcea7695c588fed7f0e0f01c0a741907f5c5bf568dcc98f5b086beb2b1981f0a9bed8901b771a633926631a6f086b16664cd322b60be78b563f1881e45aaa1e61d354221fa3e46cf732c98cada8bfed0d7686cc30b33e87792528c08cc32a9d50c92e748fd5f507083cfdf8013e120fbe630a278d6d1199010d1943ba721e27a916deb0234bc9f3c1ec2b6f3ec08040a16e99ad6ae4feb3827ab59fed0d744867da6170d656793d088573089356d941eda72bca2c4a562d3a1246de70638732f542938ce599405d197d9f9175f99568b0590e331a34a83e06d22f3717d69bdef5d3693611c54f15c9f100c899ddfc53fd3e158f8fa34d93cbce0fe3847ff406f18e07f5877c16dc468e87a6b2aa9598562e5f110d5a3daf30e55cf6ce79282361131dbdd0e5ab6674d5d4e19f5af16a236d8d666f5346ab42ef0ba8956e8bb72724b26e16724eba92518de6d0c81e8c223ddf7e0db9ee6e02f258709223d39f8c64ea1aa0b29578dfee14b80b571bd8d1e3435f49c29b36d692fb562250a4bd8cdec086b4c154ba15c4faf55cf8ca44c37449cc0c5129436e91fa6261b368b56444e844e444352fcd40fd738801f75fcea41daa081bef2a7b6aade21d8bf9a5a2cf4a64678d55648779018001a9614a4529ba17deee26e007640d29d7b7155b0a10542fe70e479a2dd38a52f412a02c8ec4b1b1a84b17636368889f3b62c9107ecec8c2d05f92e01dd93db1973278cdea77a2693aec142bbaf402395293e8fc96a9ad5598794479035bd6ba359e2f9205f2da214f2e5023901b6aafe79d1fc1f3bf8e573e0855c0e2b954d43d4a239f22f2c86cbb1be64d655f658166578748d5c4ebbfd307d79705785723ad012f10eca2dcc808e33ef7fbae639d8e6ae26069ae9c8c8a1ec9ac6168cca87f9b290295a956ab13ca94fbb68cae77653de28ea20d805b1f2cf008eed459a8028d96a24f2ae85598f56e158d75185754927f983928da954f44bf5d143485a3c98efe586cd336fc9c5d0238976f6d1e715020844c13acb16c324a6227ff9d7f1c3628d25bc257d00d8cbc7f1d04e59802e51d458e634d803478c3410179ef429a3928507d220ae45f1d558584d2fb9a1b8ee5365ffdd28935d04f842da0a83bc60c63d9891964971137d71c2c1e5e696bb022b221c436468448163a34ee21535518e2bffd9fdefdb8e42ca0af89c1e8452cfbcde150a8624e67bedf1e84d214311dd779720062fffc80e8d51f0529c8d055c303092cb8b6519a57344f2b27ca805a13d54ea5fbaea8793ecfc4abbb13370b864c3ad5c44b3725776c68fecab9449e58344312ec3ac1798c301ec7641c1a027541443a708ec7fa6bcc85b0580e423b9341c68c583cb04f3a63626b231e1f30e0637ca7bc2a6b746947877c41d6d6f1d58e8ffaa20326c9a4a683ccee11f251e78b7f8b0408cc82d5684f756dca7122d47f97adcd03b6047ae6d192a0fc54e793766fe07a197ec445e3f892cfe67fccdd30c7ee46cd102c88e7671af143e1665c456ec55bfd5766ca152658aa702e4806d4116c9107fdd0075a5f2fd72a5cf31c2dcdd0cd7f41a223ba8407e0d3f0f2fba0b9aa1933b99da69d992331533116d6107402a8dbdc88decb312a4c18b5fa669495f9d1f21252313518d72dd1eb9c45e92a68af0aaefe323b6c72566ec6a6aa953c6ca6a7a2ea2d2ca02f5120ae80cf9b8b4215dbeaf204cd89839fb279e31c6d3f873e0138fc1f902f8829fb62eb16dd91aab2b9d167c406dec2c7e7e725546f838c8742dae9663baf6e311906adcdfe6e6db80d01c4e411f9204f19b697c95350e8d1c64b7cd74a0c98afa1b879d3800deef10a99285a5ef4457d143e75478ebd46fea904dd1e420de8ac8451dfd2a93d39eccfcf20dd9e2a944f2996054733d7ca775fa1d02501313a06b9a0b2a948189971b418f6a222151f711749725de070f361f3daa70b997fe9921348f20d4f5e5439263462f8e876104a468bf78392e9b83d900328236c5416c8c333e769d13bb18498fd2aa4980d7be53f745ed473c9ae8e7f8959c66477b7fa71c53f4c409612556a5c5fd954910d69fa1435e7ce503b5167286d98a6a842d3874cf80df8cefa8cebfdaf06927f6a3285480225563ebc30b0ae6abb04904ddb79d7e986b9a19bc0e944334f1cead234bf5dc4a21cbeb73c952f019aeee758c65d9f01fa1474a3cebb49b94ede19036f1adae5d0aa15c67213594b3c5aee54df42190e67a44d0f6a1e4b686ff646183314bbd21035475050c0b89608c635e0c83c0c19c5ca6d34f2df4809df17f1b142952b9fe83a1296cefc6704676441ef64a38ac7f96e159ea06fdad09c9e7cb58bf535d576e847c8504668611fcee83ccba2ba928b90df28c0e8689a7eb297d4d6517fb1ed4e59fbd1c7e4068e2610d19484a5169718f76d8205b4efb923e3de4896b5fd41246afe1818a5cb73d19980a4d7d2e8fa7344a27a055edca58d49bb0b7d80514eafb62a4a443b13e733fa67139740ee9460c1c05b309c68c655f6e143a6d8ce66ffef418a5754cffe0329274babdc751d9691ce576137469bab81e856c0ade55d0fac6c1d54be5ef6a67b150d2c159eafe2785e5bf7873f25b0f168ffa77da3a41a31825f2c7f9f20f5b07f181801d85561466ef3835f963a21380cd66625706c8aca2cb5d3164e21a61d7944ea7f0f7f330615ab327db985cd29f226a5db4dc55df0d7dd15da9247fa00e01dbc200fd3f5f26456cdf06fe6f085a7a6941fa7fd59e1bf3d0d55dc030419af7cb2d8f1e2dbd9b5b49e8f3800e423717a3da2429dd8be8168416440704474c231d1157688c8018853c15cd2fcc415cd6fb1ac8e7c986080c75f45d6040344180c331b3f0fb335a0d0dc99e516d0070fa4c2fd6a58509eff2def3636ee89b20a0e3e3b1d602d97bad262dedf7848a185b98be921399aa2783b2c745eba4957c8fc09a6308c0a6fa302911ae70c657f9c4d171c00f207119a5cb172cc8a7add900c9dd248c00d7042da6ff918eb1d25aeb5f0e2d9b02e62a1807db0cf8b43c277c1ac25f0baf22ef15443ce19c8244ef8b3e873083365c0c2870456d08368f95ce19727f5eb95afa7b97333fcd4fd060bca5bdc9ea0de90702d9f5642669d6d3087c62ef3da997f7a93d4c93f4eb3c5d16a973a18b90bb9c77c9921e7af7ad82d52495dea2aa9f00d96dbd957205395f05ed3f30a4811660fafbd49105230c644045523fe3377ab673b45bcdf64bb391d465839442e7c6a6e251b4a2b29d7846af987ff47f8ca55f29ae9b448c5f90d49833d6a4f4d6bcf9b1ce7f7dd98a7ea73d8c65dcc693f1478376cda5ea58ff0b571557c0f91ddc4206245f60eca60c4ce00580317d1b532a078b2c0f226643e3163a8cb120f00c029602a95e1ef0b2256ee2b31e130451fa33a621d820e3ce5371745f04675434cbf7d634722048700cd02499d755ac56ab649e468f65029651947c5879bda1cbbb40ecb17d640c4260d9ba6aa891a632836f8fda94ea231df5317c650746a09fcfbd458f19085b4e3fd2d50644de94b72e6c0c0bd15b323158a2e2210a3ab7bc569c5e3e0fe117acd216982e1048e995122e045d71ffc17813fd02170769959616d7e90afe454057ac7e28e2942511e96b33b59be90219560fed3b2b7da5be640defff5eda52453f6ecf299f68920a37e09304a89227076e08b5fa6fc4e7f77d71483f111a6d6c92eddbf10b9634d201941fc1a4f47b11623866b606abd3927312703e373780e45411e86d54096ef9d0f9994089becf1e614c44669d6c6560a7325079f5c52d37ce599cfe809bb49123d0f91d520e7c0ec0fcaf3f723cdef62d1f893af5f7815b0179682f4e092defb385bba8ae131e880a372f103e5cb2db2ebc39aca303f67bc02d88108723cd5c3eeb9d8d85aa8e8a166f3c321717a9f9ba2530c1205465746699675cdc80eb4e3f295788c8722e128cd4fe7f5fb14e38007060496b45080bf1813bdaa302210a107d69cdf7db25313274d7be84e3f348823c661be5c63ef5b917d1ab46766f9e0878b6bd0da8a1a38fc53842d845fe0ab962ffa8289805aa5923424de6e2b220fe900cf1697b44d0350de384a400e63c57c60fc3063eefeec24663390b2da05b6fc4e07fac814872e6e043cacfc35ad034a49f181f866111d74b90e4834732e08faf2d2136d7c4970cdb4ece0875c26fac6b74792d840be431744775e8778a2cbc40abf381813e2abbb65cf4fb1df73f15d3302658125749955a7299590ec56762dd6accc8d61aa897e15c3e4d64f157f6ef2685a43fdb03d28b04d2c84ebc34efa83690d30ed0e9b756b9a719b04215e6460d5cfe3202b2521a1bd18a245e551ce9b95e6d8c05cfd53284b4d2982419794079275f20a90b9d3fbf9e49ad0824105c5271c2c7e00847fb6f18ebd7e151e5d93552a1be0498a8bd99c39a10fcf2e721ca340e99ecf7ae88d4ef28939b6ae14a925db08b167112579ef8b8dfb21ed6d4450275c35cc131191ea36ced9147c06c0819784007d1fbf2f8d307bce6cb9265addb2588731393e07c0a439cd084682dc3e8f658cfa8eba916a080602a00efd8a1130d78e1ecf12f1af8d422cf62f51bc1b55ac758943bbcaeb4913fd070bab68afdaba8c909dd85d62bde66fa885bea15569b6b31d2c9e6a0c7021ae875e77c0a6595e5c8a2b29e25838277bd1ab32ff6b925aeee51d728eec7b3450ca2695a7642412592471b4246371fb1e7118e48b74679fa162576750b8d71943580587bb82b01e1d54541cbc5b87480031c93b921948889281fc1c2a22fcbacb4b79cd08934f7002c0994f32dd07d1a33e15e18eede506983acd57c87e42992ea27d37bec3d08259f57f42bafb78d491f7381f164705b18ff431828478421e0104d0e0a4bf29170b18d2de611d8364421a5b665cc4c36b11b8573a0cbc9f280121d9eb730e0eb17d9703dbbbdaf740e0bb6b0551ba8468bccfec62aeeb772539175e7de4decfc44951bcd9b1613e250fec93cd1d3b0cd768a92f90812bcaf1675b4a1859c098c116a9e5a3b541820f9bb5a193ec64ad41aa604371e7071710f4f80f4aff7b5a8774247bcf4ec7e12ace99476a76d4bf4788e43ba83f4e114ae65df96609d7bbee1f02c44670b9d059bd330319855c8f573ad840c12255dd382b292df4bf6450dabf18fa9d4f87f7cf8318f328955f5ef380aa8e8bef3b48f75ae35e2de109237914dd2afdc47cc14baa4f6295013d07917249d4a96668f07fa67d0a1d1ff2588859742a79992da00671dd00f8fc30a5cc7ba0f397587222a43e4a134148371107cf682d614682db4c949260bba6ea701494281f95ddff0fedf0db34202c0502e13843e08c369856ee2f101f467ff64ad06a697684a15f4fd76c09be4fecd795d335c39a2e6f9c706b70d574c6392a15c6618f707c6049f8879f1bda1b09aa7f80cce672509bc444569d005fd5932ba023c4dfbc546cb2e826ec80f8a6c8efd02566714811d4e33ac36005a2a1a9344d4c64ee940c2749f680d2dc84cf5604974680d0db69e4c9b5075cb0ab898878511830ef187070313068a0992519e6b64014f042e6ffc9d2ec0ff3042116b9a05f1d48a014d002acf110d5d14b414fbfd66ba9297d026e29ef64cf23e8196e0f62c29236b08eeb234169de64471cb6258e45e38090f353fb9839cb9cbdbacca3ecffed7b025f89f6017ef7ece98dc83e7d9cf5438c68276752082f48211167d3369ae171ec35b7ec650c97f08d7af6af0ae1b22a00a8e03a27484300c22463d7854d7cca6222f7a9abec3212f685ba485339ff9ed7edaa8d0458db617da08d55521504019ccd3447420ddaf35a9cdb6482db3b496bf1192719f3e2702062c988c8427603dd474842bfa7879c978e8b2aa8fc62406c2d025ec8b5b6776da2f38db878bfcfbc6bb708b9742de6a1174120e2686c7e597f50095b0d05e05609d8e0743ed233c9d103b848a33c101fbc7d7bed600550246a3fa2c4b5b3ca439b1edd8396c4995142d2c92472e98d7e789f10e63b288e1f8c7d4893b39fd28a63f09407e389df826db23917f2bcf8f3c9c3a4db9d05c42b14d06e400faeb01eaa45fe67d8e3b37b04ce000d5d7c4c8f9be86a675234b0e97be63a67e58e8792456cbb2431ddbeef8281164f4ff0cb8a20738d9ed3a1eed03ea0219e5ddfe4412b6b8936194e3052cde333422bbb1f9f9ad5da37f49ab90659dc652fe0a4dc0cfc92965f4704422d9b89202e69a74dc10150d63b180d590d75835b6c823563eab5e398098dac618a3ff0878888d3c1585fc66014a857112853aaf55e74634f8707163136febab789054f13aea41804169ac920ff09397306713c81fc3a5cacf2fee8afede4006889151b5d0841cb6b8b7c5da03e24951317dabbb64cea5d2c0ff749797a9b58c457924d3c918ee9c13168efde109afe9b4c136fbbc4c3e26d5d13770f0b96990651fc03846fa50a2990085194a13eb504b5fe21992cabc71210dae6f683883852406883300dffbd01ff1af0c208183b8a78179b2110df8b8938f201608e967ea178b12606a22247abdd2088d3748144f5cdaff7800fcd0b1dbefb3188ff708ae9c6a8195da563555a163b8142c68e2645e73ecb98f405add7754191c33a35cc0f6a3ac125d5c6871751d5ebc4dd921d568a619a8fb334903e9d7d1ceb44686a130d66b6d20b42780bdeea88df54ad7619c8c86194c98cea2e421a66b8126e9794a50083011c0529322068d492cd269ee173eb19ddf4878f8d6210316ca49ce2aaeedcf5526c9aa52c5d87558c53d58833a726e49405ca38b73269f97084b40c0983232c881c58fd7b85fda863f831faf459b91a7eddb35856c303c70db07b4b1ca7b15f2e05189d2cc8300c0fb57976a01facdb2ce13cfa9d808ff66c39570ca062447938b2d3c9f407e3e3218c0b0f11080605cfa432edfe91d1f0b6e53399945d3f58a4ec51dc774d5956791f331cb8691d65e499eb380667226e7058cd9c9f739e8f1b9c48e1fc108dd4f6dc42ec84add75f1b6f5b530f1599b766f62300f5ab59a2727c62b205ef12197762d1418be958b8a9bcbd4cd29ff089f11f90ad0631562e15a0bddb2645cb0fd0dc4bd9b82c82d434e5d82fac24e6a4a8038946698b83e57be2d93fd0e0c4baada066a4eefcb92b94a736b4fb0b365fdabd334e492818300402c0a98abe0b3ea3d27dab6a3ff6607bec091e0a8b373f974659e8ae50f4c9c58c467d22d0fe13dd81ad28a734957e71c223753ab2086cc470f42b4dad61ad63d19065b95158c22f5687f4e0ffa1877867848aa814c517cf16313da4d59e9597a2e3dfe56ea115aa8b5ff7d99a04bda07b105d6e50f3dcd8e1a55430f9900d256347b28e447bce9a7279ba5c913589823f19926d588968f0c1e600e37a549c4db8da10ed3e5c1eed5f79b2abca5bf187311beec2f509a723dcc47fa832470758780995ea2528850994f63194e92b5967db7660b8994aa08e7eb35db7c4083d13d3dba70965d7aa5d3d5bc9ffbe5ebc6c5c62fa544ca5d75ecb6f6bdb6345e821a0eb7f532e2657a123a6672a8fac1306a7e7107059baed9e5acf80cd87909c222148225750be23fe876bc2c688d7b2cc4416775acb72cf2ec957b7815ab8c732ee645506f42dab9a7ccc8dc6ab412b90c15d4b45c3d19f9182398ad48da3786b355b251c9ef7673156d9cd566dc7c7040b67a31003589a1803ca2d76e4d58106aaa9d0f70523d7565bd53d07e0b8d04104403c4ad210bb1cc8d1dcebab3a507629817f1cc36062ad79320cb64294c2760c9c4093b52cdb215250c93554f073f100c2982fd111bf594db8ed6b9985db87e130025f58e64fc516ee2fa55df2338220c1ea5527b1d5da4972c21edf5df419522dafb2860f35d0185e7e03d02cf58e7a4eb98ebd8b69e76d3d5b1a7beb3911977e01687caca79a74af99403cc46f01ad8ff012bcb3d13ca96f4bffabe18acb89e82760d9dcd4318a785ba0c0961067e85aea395035c04ffa09b95254ae74d5691c022458ddb55497e4417b92fd8865681194f97d8cdb7c22ca8a206da9161a12667d7caaa1e65fe0efba1a84443ddb2413d1f74c7f703a3249485351c0c68a93c1a4e85c1b9f022088110dbc1d4f4238bfa87d517ef95ce4fd459139012dfc1007f4e134b9faa702440bae4310a0264a2b8061950468b993885d3b69e243359ef25a29cf7b94895f0635a816ee6a0514f5802277d19547ca58c9325ec858d2c53f23ae0f0acca34fa67aecbe8e845b93b6edcbb6afa58869555cf371e866bf0d288f556452c8fa0560a6687382dee62a45558a90269099e317797d0f179b4001f46bc5efc63409f33117d63b85db0355021f5e2a84c609288ab686219b827ef92c2f0aaddb9d97f4ed27fc7dcb42ede6a94b3d09208412768e4842b6cd61b25102999515e22a2888b545aba1be2eab6ce1e27a8563af452900fd554e3eee274e45028ffbd399bac41f34b16ea8d110b6189fb137d28e0bdd1f4d9085f4065fa42e01d532d637ff7e4430bc0f8afc49c9c866afc600c20239a90b411696cf01d49141c059beaee9289f74ab96d004b06101b2cadfcb7f782531eb1f0507397ade04bb71db6b47ce412d314c7caf7700e2443036cef24913254e46f9850c35306979417e7d59f2fa9a78278ee42babc033cd7488a9af66d625fc7e954c3a33acddfd07d082a2456832e8584fdb8d3d903f0e9e7ef0ed4167e1503c8b9fd94489ef9cd0a755cb1035634096c45b64405b51e650f607ccaccdd75417c3e21fa15d8f2d4ce786fb2b89982f878e1d4c5173ebaed0e9ebba977429fb9c313734ab77883e5db2740f1be8922f6dd2f9b86a4ede7e28e9e9a43ba6bd51db6369b7259f46e2462a0b656bf7acedd4ce59c2cae5087a2c49c8f3ca1f6e2f4a37b08f0a707fd2d92efa0c672f13067c0d2321e687f1c329755aa0436bf2475d146dd03fcdc65870be5b691f0046494be679b524a2e4bc2264df61ffaf08dc1bb5492a47b5dada7d693a0ed376b467787d93f7ebefc060779e8b49a5474e8407133d2440d6d4c89aa31f01badf1e436c24d83ef164f2e6690f07ab9e38dc8190b1c29efdd8572be180ab23a00e04944f75a8d3a3bf8a1e380c305f4bd5d798b249602b4f10536cd40f8871c70f951600a38c1dce051d9cecfee7b7b391fb7c4d26cbea9c7c8f811f386b279804a4b303ba07139d541060a96a3ae0498be44023be9b37472843a89658e56523b906cfd474bd885a6764e0726df59d0cd2dde13ed82d2eb3b6150622f75499cdbccca49c84534e2aebbb59a9a1ce5f91b06e24a4eddd6defbda54c49cafb09c70973090fc37d7d01ee0befc285e1716e01be85ebc2bb2eceb3705bf899ebfa7b59787abf1c0347871deb9c51472c77eb27662966471c1b6396fcf5dfaf43e6af99efd731e4affbfd3a8876b83604d70e350c2dd311c33a623aa8e098bf40f0fd588687fc95e4fb3191bf74f87e5ce4af0f7c3f36f29707be1f1ff9ab03df8f91fc85e4fbb192bf8e7c3f9ef96bfc7ebce42f0e7cffcc8fbf8c7cff4c90bf36f0fd33307f69e0fb6784fc9581ef9f99e22f0c7c5fe0fb6764feb2c0f7cf0cf9ab02df3f43e4af1cbe7fa6c85f14f8fe19237f4de0fb678efc85c3f7cf20cd28cdcc66965c3fae2017cc25e49ae28ab964ae211791abc865e43a7221b9945c33d712cd0f4d90bf807c3f0dcc5f3fbe9f4688660a4dcc5f3418e887198758e5f4503891e6fb69643443fe12bf9f8688a688c68886068946896646b354f35313e42f9defaf81f9ebf5fd3535536a6235327fb9f0fd35433544fe6ae1fb6b8a6a8c6a8efce5c24055ea43e144d7f7d720f98b00df5fa3e4af9befaf99f96b85efaf59f2d700be7fc78fbf04f0fd3b82fc65f3fd3b60fe0abf7f8790bf767cff8e29feaaf9fe1d317fe16ffa4fe9ccd3d7017fb4ea5ffaff4e4417a11c453e327f311399c515315812659e5e9fd63f6622d1e881d8f24cdf3bdd27963c082e8e61191ec244b8081be1238c8495f00c2fcdfccc04cdc0668466a6ccc46664334333443345334638cc20cd28cdcc66967e8260425362b221a222a32324a5d912cd0f4d100d8c4688668adbd0c868866888688a688c688e689068946866344b353f354135b01aa19a2935b11a59cd500d514d518d510d528d52cdac6669c7cf8ea01db01d423ba6b84c2fa50bded7882161e8c7170887ffa1871f113fcd0de2c4259826e636f548c9370bdf38df2d14e0fbb51f7fb9f0c2370cfeca79e9f0f87e6dc85f3bcfd3c300dfaf1df9eb00df08f8eef97e6de6af2c26e0fbb7207fc5e02f1fdfbf09f9eb079020dfbfc9fc25c3f76f43fe52c0f76f44fe12f2fd5b91bf66f8fecdc85f347cff76e4af057cff86e4af1abe7f53f2d790efdf66fe22f2fddb92bf18f0fddc8fbf1af0fd5c90bf7cbe9f83f9cb01dfcf09f9cb86efe7a6f8eb01dfcfc5fc55e4fb3999bf20f0fddc90bf22f0fd1c91bf24f0fd5c91bf6ef87ecec85f34344234309a209a1fd7926be6527221b98e5c46ae2217916bc82573c55c535c422e982bc8f533b334339b519a419a39729b19a399a219a29919d94c6c66ca8cd00c6c2668e607e31956c248f8081be1224c84b10cc7a8c0e1b284640b20b82e4aae4b92eba2c375f9c075f1c075e9c07541725d8e5c97f1ba70e0ba18b92e1bb82e1ab82f19b82f18b82f17b82f16b82f15b82f39dc170adc9709dc171ceecb0df74502f72502f70502f7a5c87d79c07db1e1c238e0c2f85c98065c18065c1822172636e4c2c86ab830430bb83044345c98a2192e8c91900b73a4800b8324c385510a726166402eccd28fcbfaf1715941315c16ec745942f2a63f01973545bcac58be2c59cf650d21e0b2880e70594506b8ac1e97c573593b97c5e3b2742eeb7573e4dc1c30dc1c2fdc1c2edc1c05b8395ab839646ed38f7373b0707310e0e6b8b93956b83906707308e0e6b0b93996dca63fbc313b6e4ccd8d81b94d3fbe3134df4be902f82c23240cfdae4b71205d70e142c290a348def4ffad43b712b94dffbdb5c86daa8b504fcbdc7ff08eac24582f18e91479c4b191e2208f38a664c7951d4732f75f82407e0a769489c94831e42f1ddfef49d1610f7d6c58cfa3cc04e7932759502144040cac8cccdd21739bfac4ca6622d6e1cc8f1c9599c1d7dae922d4a18b940e3b0200f8d673600402f08f5b3e3f5d30451a3fe2bf13b1755b3ead977919cc4412010562eb9d881e0885a53fd623249715d6f3588f706c87ac5573246ffed9b02e42feba2ff3a37581f9eb62fc1c87631596fbc11f2bf38863f4ff4a51c7e5ae1403707f4c2206c0fd3189d26d2a8ee11895b9526c5d29c65c29ba2461ff081c6bc168024e58eda344435153260fc52f45938518c89d895f7e206f9cfbc9dc1152762f62599d211c4d29f49736e436fe9a92db6cdc698adbf86b435b18feb358d66a432c96b59b9112be62c7cd28e834c55fdda3a6ccb2ffbc2ea758f67f29422db918cd207577c615f3d7ccc7c697a2ec4a681e67904c5bd897a2c9e392c1cb0fe48e8b06f2c6df632d2558ff9998bfa48facf1433a13cbfe324bd97f13c218f2f5491ba3c933250bf3fdb721c82d0c994337a3ec2d48c8be8515a9e663c6b8b81669a698609ef497b4d328d7223bbf28aca41c1de7fcc9f3fd7696337f9d333187bbb0fd5e4a0a901b719bf9afcd486ca3ec40b37dd33a07817857eb11daa1e4b88ed33637993a24dcb57aa9944d08596effaa117f9974f017d73ab388dc33ca64a1b7af2010222d9adedb4c5b58077f68df12428f508a8fe4692477baef1fcc4d8eb396c5facf23c74da31965f2cc2bf34aee19e5b6e8993e23fe72510af98b9bb94d3fb734e5271624830d7542b939218c721007d4e1cf95c9c3f9f083bce91f27109af6e909ef2568c3143dcca407d1843925b7491d5193c83872d5fc24dbaef094f44f966fcaa82a9cbfa9f325c386fea6ff26557cbaf76fb23df7234e36b58868cf7d4bb67cea9b5a12fb689ce93dee4ad151323bda6c7a071930c5eda5f7f4534bfe92995bdffb6cdf3dd892dfa48af74dbadfbe491510fbd0e7bec9f6ddfb6d49dcfab04ff71b6ec997a2877db8a7df84e296c4dfa1e943b731bd14b9cf91c5b6bcdf5e8a2d0fb78874bfbd0ddd6fb849151f7fee9bd0efde06ce9f7b29ca1fff89ed7bda647ae92f97e909afb64f29732875da617757aad1323cf749678270ad7117b6dda94f6d0446b80b3be545a2c9e47f51b87b6b2d7881f6821e82562d0909cd37e786d084d8b8242438efeacc109285ced4252161aa943ba3dd9d6adba6849d3a3894d25a65582c4ae9465b88fb6ba00fd7f0367302aa6d529c7389c770ae33f920a5d3ba2510b699b6b05cf8af83a3719da9cab0589367f250ee6bfa26d00785652255dcf0f6e3363328f8bcc54e9ece50cc0ccaf2046559c9ccdb0c11d6d481429ae49ae94b4e9b718392fbe76bfb99db93f91ae236d3e80443a260bf0e7bf26c5a0772b2ae07accdb5aa699ab6851159abac0e150084d4f23a96c2f81c98f15158b04600886c97a515162c7d11c58700a0616b9656a20c61a508a59cc41ade94c28403ca5208297f4a9949c89dee596e58354699c4c262afc82c5c2bdc20bfc7cbc8979c7826a144a666e47e236498041272cf1618a184dc2a90216d411aee13266fa80a9a307fa40d7def823e603dcc72d380cafef2c725929329b9bd27b7e7905b86cf5dd72caa373a20ad8a098cd30f38a845ece087263e2021f08d21c490c21604ae4488420d620b3162a005914a024a0a1ca021702145ac09207eb0e28d1e020bbac516486c6f0c318697c715b2bbbff5fc7da13981230720a1b08519db1b43705a954328e241129d1652100292d54544128200a5880930f8410ab820827145265081e683a258f5c1163f04111dfaa3ee0e33abf00c1da6aeccb81ec5519d206f669f09ece8332b58a4649951ef815fd3afe58e9dedd7a32cfea92b3b94a86fb92ed58ace51bdbc4b1963854d7df775a643078b655b48da5ce99cd5fb57a3ce49bd7fc5d239a92bf3eabafc045718499853efa5aecd9eea43a1c28c7ad9a8f77cce76afa7960c34af9ee6151e9b64d5a7eecc2fd795dcc6bd6fb92dd4a14bea5137f5de958c72a3d41d1d29a3b264146ace72eae7cb2c1da2264a4ba5fc73a432aacb28d44ff99222ea37d04783a83af3570b499163a5f018e6b1aab02749f159e7b498c0cf935ac85f1b12d782eb6c433abd9ffefbc1a6f011d4a35ef51b2803ea53efa1ee8f99a5bc51a52e7dd5adefcd0ff529d4a3b0ecb051cff538eafae09450b0ce5af2e8574809cda3cfc67a45c9f4ff193a47b2b655c374e149d738c33adb290dedd1af412132a6ca62b51a59cea7de6b4410a26adb777a99fc4ddcf54d200dae91c39f899bff394e8f203e3920e1010826252605d195cee99ce49d2e5015af334dd3371e71d6dea3c185c79609da24ecf852494f83b7b3942c8c9af70d0a6992b50f3bc7ebacbd09bb34383295336b610bf2c6c10d27996187b4bd7dd6137dedbe80236f5e7260c7f7ee065ebf87fa31fb878340ec373fcbff417170e89c5e9d33749bf9a1db3448c489e8e8546d037b82442648a4816ceeee9eebfb00011053ae18c88f214026300187f231e50df76de08f0efcc199e4eca7281ff43d940f10c8cc443e5082200882d333259971e4eaa4ce8e099e748d30475bb873f2e67b5a077f1d75ce94334e86949ab6b06307cbfe9e1b1dfab694fde5369337ce1db98dd74037a40e37a33ca28888b858b18db98d8361fd37a42d49a973e6c350018cad0a6fd35ee2ba694e44dec8b74d58af55ca2b044f4e28565a718292879a9f9d7648587f2a639322f924fd5c089eec2daccc316737f636ea706eb906c99b398b70f79e13069ea49452ea9ecdee584e303ccf64832674e01cc7b574b5af31ec69220aca1882f4c081844b866191ec2fc32aeb0d2c96b5d4fd871e1df60472a9c69422674152603f55872513a85f2c7fe9f4c87205cdb1cb7ffb206e6367e8b03fc8e7efdf13df402125688a6d873388bf8ab81f55249d4331570403fd754a0162ca4604d6b4e2451af90668e040f15060d8e93ee5747fad7ad849a1113689637077242afcb8e0085cb4414692e8ed82ecee65777797c9734a39caf4c8d228374deeeee209b9af04215bf1028adc596c8f2c5bb27c9d183a474a2973e400ba68220bab84a7554a29a5f552e9d8092bedcc22db8cf2d48eb447043b5f22d98ce613268b65adfcfaf52548838e7ca4b6f4975362253453bca4e67e6f281ed7399afc8dce49ddc6bdbf6269eb4fdc3d7477119e4b9d7bb925d8d95282f16758fab2254f4a2992963cdfdd65e40dad4e9d622ba58dc2a50beba61ed9f4d4dd44a9e947fa1ced6993897bcf744defa6af8ebd9a4c263c769873ff50e7eb159e943b9226216f2665b5d426fde6b4979aa5b4a58cdad7f7b4dada4bb7714aabd63d5959d3f0e8df2dbf623af6c8f305d60a79bab89791368ee517218d90832018e1e00745fc40a588ce91a02b238094e0b8a28b32d668c11862fc10071536c041185e74942a74393608e84911ad4407701cf1431950cca8010b9cc460075aa40193020ba63042afa00b20ba2388943133630da1280081835681c402041b51f080882a32286a58153a8a12826d77c72142f5c81f437709bc1882104b08d18009618894884cffe9132b3f78639392873ec9fdddd3460091653e21b16c295bb66c5a69a595b6d4ef295f1dd7dd653ca672e3f97db1b05aedc417da06b7fa696e8cc3e1c0aa701f8d711bdfe4e4cd7479623f735a9658a8000979c4d1983014abefc5606464391256debe9bd3e1ba570078afe06fdf41fe7a3c8774e06fc31ef8d25f32610c5ee19173171d8c4858f99b4ae7f8b7c7c2e307e9b07a413afc96b66ddb6a90cef9b60ff778173b613d2031e02093cacbd7d58fac961fadcb8f5cfe5ef0a4d261589f0661f23cebf1e036f5bda7a00f6f52e91c995f539c4b7505643b68d3be2914fe88c32559efc7b994038f1c0b8f16068fac173cbe0b1e433ce2e4d59d3cd4af1c67e592cb1596562c1d24c449a8a5a0be7ec73ae7fbfa1eea7a9abc5c879be68d93895cadc8bee1af0b3b84e3aaf778900e67710452e6b094b9895c1ae583d00f82322a4b2b5c089165ca93aeb105a3fcf23d184a096bfade3ac70a9149d9e223e0cbecc9378e38e3c79b600dbfb0238eed88c99def9b0632ecef74c4340bab39923ba7a789c91d8af37daf23d63e64a0d14588ab3f997ab84f3f999e301eabfbe01e09661949851da13fb3047dd02762fe79bcb801114b0834e64defc5dc6a32e17ae41284056c88ec593aec7a6485fdd7d79a94fb717a5c61bac4f04bffde7bcf70d89f5987ed865b580b917faedc63981bc77a075ffc80349b886247cabbac1c57878c757550d111fb8b03f30df34bf9e5bd982599988c4c47918c8c0ca69f366778451b4158a060c710f83826775057088a2419118ad0111b710c47e783821d5d8470f2638cb3a81f2324774edf4bc830664a4c2c4791dca18f7a9690dce95e26e6b9fc18b3548f645c6296b0df1f33a55b7e8ce954ab1f63966862f24686e2e8f8d111cb16b682c91bdc42246f4c482625d3ec9a96ae098e0efbc8c24c41f2a61f3543cdfc85e238d40c658452e363c37a3fa28c728fafdc0f73471d45b9ffe5bab45858f561a5c2317711aa3ea46efdeaed2c1393a1d261cbb4beff5f9ab03252742882ab444a3002017819dcf291996c18010d2251e69d883dfdd2c557418b981a296cb79463626dd331b1dc984ae778068192243a7cc0031d407264e480910d68200318b880052a9003052680c30d128800048a3cc00607f83480014486d4b0001a6610a200198200f9e123069a0488b907010730400f9e1d1e3aaf1c185e70a1002de0b0e0ba3304b85941684a4c36b483c85f4bbe7f47118ec91d8a9ae5fe51f2a0669285feeebdf0d6fc8e8b5f00377c9bbbe357b802f8015c9b27c05de16fee007ee6defcbd04787a4f79c905baa8f7ee0d80e8cdefde4ff4fabb17143dffecb542d746e9b0ffde936861f7a2441d5474c8b08e988e98920b1482ff41879f9f9f1f51040f82bbc327b920f80fdc24afc355f21db81f780f5c1dfec8edc023b91e780edc233f5e24bf81cb813772c7cfc0ddc06be01af90bdc0c3c06ae06be02f7026f818b81a7c0adc0e7702df0385c0afc046e0e2f818bc3df7027f010b812f808dc1bfe0117025fe446e01d701ff036dc22df80eb80ff5bf33ed78627721bf00cb83e5fc325f2432e039e865bc32fe00e792197869fe12ee065b8425e017786077265f8205701efe302f92517ff8f1be469ae8f8fe1fe78f1d27c026e0cdf73c5cf37017f80dbf308b8f97bdc03bc012e027ee7f6789e6b80d7b93bcfe3f27cced5f9d7e5f12fdc9c2a993525d9512ecd33a638aa81164804810c23713eeb394e4ad644aa67e050f0a3e0d7efa2f96e951d9a8615ba77d1801486d3075e2952d4f75d096229b1f781940c76a268b0aa8fde1691d3835fe4047ee005025fba8d279e40eba552df4767835c8763ea6d053a78ae3f6f7edead22b90fa73e60ab57ab6bdcf5a153c2505b44c0ffbe4811e93656d82472492a6527a213b169c59bbd40df4bb731d5406193c8a52c45295bd0fe87cbec77ec21fb530d14e242f6b65aabbf67edab207dce35f7eaee14ec4ae4495715faf4fa50184ae01ff5b01011e43a76991ea99fabcc4e32c779df0485781c71740eddbef34c3f72ef5d204fca16b8f9c349aeef7d9f5f296e54c82a6f55ba39a7a99e3abc435319aa51956d661f0a03a5ed3e1d9ed2867622e0bcee56a19476b74a0529a5db2d125425894e3e12662762f79e0465708a47edbb0bd43d942a4974f2126f0104e224f7d3b187910a5965ad06cffedb9dd9ab5f77abecd0597bcf3777e7ea89c33b346d4116f98aef88ef082051983a4d9b796269772b1652ca90524615b4d22264e1b66d63011613508992a59a0e30033186c40e73b4ebbcd16af807fdfa1d152d00129998be874e02094062157f27a2d75dd395a00f6eeb01e7b67c3ad36df998a4dbcc37f57815b256bfbb5234d5c0caf393ccec01569ebfddb1fea6c9669e588d22d130129614ebe532bd5cee7c6b900d9b923b32cb2904319c60855ca3ca0c5d44460d1c480b1e48a1a80bd1a3b30540305043a457ae5145f4aa16704041123dadc8882755446fd3820a133f103dee9312c54cf4b8af4a232a4dccc48a397a5b3e9589cb0cb1623bf3c4861d4e69aa571605c18327447a5d6688d2d5dddd86275de39de596cfd920109457a4983134c20892852dcb614b11826d5899b9fc2d750efdf9146cc37ac044948ff84761c7ae38b9df93a0fbf8cd5cccb02d933726220dcf9f0e4fa7d92773e84f4e1c4d5998aee469c2a2c957f76077addba050f6b3a08f1697a0318ebed908237337acb52d150632e7e9d320e4f9d448eef83b0cf27c9f0224f3bdf1cda44d3f8c14b9ff6b43be64eae3ae13d19372f5cd7c7cee26a26ff62d75d85e10442722cc10d888891d861982913911b9eb44b447b0fd49b9a974d894527f1859e7d4dc0f238510b91f66a873bcdc0f03a346bfba198c159d23b180022837cc15a3dc3217e411e628f707c3028a19e4112629378cb750eeec337cc1841bfddf6c4626e5b1a58c3ce59ca62c4c45b905090607b96164d266862ac82db03ce55219798e2aa43ce7c344215fd6bd0cfa98e08f16f09b73fae775b78b98a5a048e4ae64c30bd173f9badb42a881124b2277a51b6988decbd7dd16a45489891eccc7dd8e7b0e33a14b90a224764e444feb402000c828c99dbaeb44f4bc967ae9fffcfe99feb069ab0c1bc4c8e44d0be99bc1c8fc67c2c8c2f0a08c9ab9a898e5fe3a1d53d909f441af9d4ac0c828cc108591e5aed4e9fdbaeb44fc408a79e8d0e361e94507768491c1c81c4606331f46161386a55f6384602b8c0cf4315b2f7451642472578a77a6f50b4fe84cde07b23c93046dc7cd130db98d3a6a0946582beb1158d26b45a2569fb39b02124fecd0d94ea59461f56cb1539b74cb8eff0416941c27a9b79472764dc368028542a1502814ca89eff4360582e05b107502bf6bc1995a5a62fb39df544dbfe126de7429d196ccec1ebd4ebb0ea7bfc739ce5a79a22c96b52387c7ee65d7d5ee7b5a2671f087e70e73f2a6fe89091c144ebf110ec89dadbf763feac8dc775d8771c04f7d55fda7f37dfdeed20ebf4f5daec3cfbfd63adb3bd28ce224d1c8e54f03e261fabd4dffe61dfbe79c9b36e79cc29a4cdfd3f39a7af66c1f663bf51eb9e3d9bb8d721db71de13aaee38e70df4fb9ae67f2cc5c2bb771db4c150465514a29a5947263387eb8828e8bfb426e4e262f1fe5b3bd867b68d93045eda7a8e1a04fdc3e888a40da43e17ca8b861283fa044ed5bec69adfa18a261db39f4c7123598020a2b60c83c28b04c39b0238bc5f2a0b01463a2c2f6d29209072e6b7812c736ad476addea56ebcbd095ceaedd3df1a6d524a80cf66d09e923daf67db59fbfd5d0afcdeda50adb9da95635af74a382922ad5fca6ea5757b55a7d4fab56ab956ab552a956ab55ea57ab95cbaf52bf52bdac56aad56ab55aad52abd42ab54a7ddaeafd57abd56ab55aad562bd54aa552a556af4aad3ea55aa956aad5e55ed5f2aad5ea532eab4fb9acde25d5b24aad54abd5eabdef5bad56abd54fd0c76af528946a85eb5bd47d797a6d0ac645d5a27a515d995b5e7e822d2faaf754a9d56ab55aa956ab96a7b925f5aa3b338ccb4a3553aa4fb9b4b8e03aebd0bf45858fac5ef5293cae3eb57a55eabdf9ad5478fc1ea72d0d63734b1ca4504d6a52d3a8e6dd0a6cd39ff4ebe6ee9ba443d879d4a19cb2bbd6937c5fa29f9cbe52c9f5412bf267edf108be1472fa0a3ef8de7677e0c00e8f4572f77d95f49299b7ede9c7bcf6600d376bb83eaabe89c5b2f633f7f43d8e729cf6fea8db48c28c6ad477cee9fbf1742ab2de9fbc3f72fa5314de69dbb6292c0882b8e7a4de3e58bbc906b8b2b482e54ac6620511a414b941ca9ba420ecf420acd7d339adfeec5fa570dc6fb848de6c26f0c7a9f34b1556fe4f9e99639233d0bcc3cca6f736c97db7dfbb9f938e987ec91d4fbfbd768f9c9e43e5ca715feb8338c9cca64bab098c2cd6c1c092fd5d8dd3d2b30237b27f0cdfe35f7fbd47b45f32731dbbe7fe48f7f57d851b6fbb49b8efe8f6b57677fb9ab51d26d7399ef4e672c563934cf169b658e149d78f373f3979fae7e8a9af20f773fd9a4a6ed3a677d864a11fcf2c53a961fdcda24a73e60df3b94528da295f9388fac442bee8d745eefe9311769c59744e51d73b8f3a9c54a3b2c1efc7ce9e943c3369b2e0efe17104b9f23089ee54a343ffed9be56d12258d967b107f3329b70bb2d75adf4ef96a98b4f11d16c84622bb87a792bc31c2366c56d92725b39b4a4a4979be7647245f87484221a84cf13c1abfe97e1a7bc8dd0ea451879e4a491c6453b2ea515420caab4761a192558f7a8ffb3829a5cb87bad58d0e25e8e33381ad491d1615b172c894447dbd52830a572cb816c1ec9125337f1dcc1e51fd929951afbabee46e744884bd2dcfea1c7fd49d3965039bfa80f0d974a7a8a71985c726b95ad1a1cf594671e08f2dcf2b1da230ea6d878e51980564ab5287fe295c913af49f499b92dc49fdcc1b1aa94fe196ca8234a03e95c22dd586d439ad142a95fa542a85678062513fd619d22c8f3e933ba9f7f750976b01b2f653a877d5fb7baa3b738d82ab706b75026940bd4a855bab96eab57781a00ca85fe1960a0b09b3ea55f8b3ea6b963d832c5f46eec85aeba42b6cdbb605e99c4ddbb64dd3eaac5aed9c992b4e871d3652a833be30c30b6da58ca33cb2e2b04f3ce99239c7f5518799532dbd7ad9598b1bb9beec83c3431283883cf6984757f248dbeb4feb376a82ecddbdb9e62a87bd8d9baf55ce39d7aa341ad223dfd03d720f2b6658c9dede34064899523a6484e5c8a88d58474633644dc9416544343b6e1daa436cd83a14b272ec605d59a6b429a59456a4256418863a2ce1418739e34187f97d4f0fe9913b59bebc573240d07bd123084734fd014c3f4a2e8238117cef0241d111c10fe244d403017348452a8612a77ec5895155b155087e216f546c7d8fbaec494f4f7c3e27dce21e7c29fa3c6e71b84504f5e0db807a1037a9e2737af09ba03050908ee83d144e34bd0de009fb43e144efa5f8e1216e43bf27cb2f3205f3288960182517458890661371dffcd180e8eeeeeed93bf0fafbc0d3a99b9e49123d8e6b86383a7fc4e34ccfd4716a8001eb1bfedf08fd274e6f4407b9c365fa49e48ef7f495c81df0e92fe1a173a807de99694ff7b427fd681f75c9e38bf2b6e60f39612e0812ac7fcfe0bd3fc545dcff860ea9d36eca7146f0f83df8de77732872c32c9203f85f0e9d233d3c442748874558b060f1e91c24fb47a1ab731c531ac44a1957f268f3b332e53e18c745777777f7ec6e8efbbb11318535265dc2e5fa81a753d79ec9eb6ecbbdf7e9defb0f3301df7b26dffc9a8a5fc52dddddefbd0bfec9c475dffd09bfdb80f77aa68e4383364563d266a2431a9b32a5c6ea94f92c1d1e32a70ad9b0222143fa638f4caded96667d6891b0ddaf63c3ae22d123531eb3caaaf0a48b2a51b2c4e6f949bc0a5aea1fccde906f693484368ebfc0373dfd2072877bfa32c81c56879f97d392214d32e3a0626a076e079b1dbb3aa4a8ffee29fcd511d6f4a697a2f6d5faf73ea6ffde2d66f23d13cfbfe977cb13f4d122023eea7d40ed51d8e7dd467310df20f286fe77df4db7754506e95284ddb01d3917ed9cd4ae4e78312f54ec685bfa2be5e67798f37a411f743ac91ad6260b5286f3adb4ee55fc6cf4ff757a8adcd02fcb92495f810e28a8480e72072653e9048946cc77861de551118eb396daffff96c934411f9e3771cb49ee3b3e386f8dcc3e783ff03ac97e97d4fce1be3dee5ddb216d912100d9ff06c754eab4b4b46139cc7548b52b7f36275a29bb1055d2622700a2b895401f9abcf96e0b3ba27a6290b9ab697bcff33c27995ed3739db65d0dbbe3ced267c0badb34bf677777d3f6ee9e57ae5c9961e628620d25a44cabf721c1fa10b5811d25564287748e9ce2cc1e8edc01730f923a47a9737a6747a6f42791dc61656a839029c78523c18e8f437f801947ded0b764582d480e240bfef71b11290281ffbd0485c80e9b890e694c488ad7efa4e57f7a2722785bfedf3b118bae8cb335df7bb7a2c3eb03fef7f31201ffc34caa38f6f98e9ec983611ee7504f257943c597c214a2cf71d6d2f79cb03f873aa7c3417e5ad13959c81da76a1821098ce9c5cba76de88f538b4c3fe633c36e3808c759cb628d619e5634fd31ccae9e432d6d580db3be32466b913a2b39e0b035097536bfa25a548b94903a7cbb86ad4a156986fe59e6d4291dd6a125b2a8456cd8ce632dea91375e8b42b7a94b64916518f3d85887e67704db5f67146812bd4063ba2b299ddf61dae19bdce4a6cddfe47526d33451ee6639521c44a7437f1c3b4aa41c8c8c20993f27ab916c0e9d6304031689bc620611f9b39c53415dc77f067518da35ec14a9c8c308648e3592b39270f85b27d8cea30e0ff24647ee80998a15e7e0362b384620437f236ee3ef5284a59dc5f64f4f22ba22cc9e47693401222ee2901571dc188f4b7c68888a215a895c7a9022c3081c3cd939b21f8f483e6e0d4a59f64bfaccf892bea40c46f992de80030e2f8b87e421d17e714f7570a8144343540c0da5e171fde2c62b93929266921a4af06a5085e5468294e973afa34cb187a232b913001e42f008e201cb3c887804d1e601f33af3a4dca95fabd35ab9973f76399595f278b3345d296d2e776b6d37deab7bbb511d456f75233b13b9a76bbbf175bb517d5bc2938d9b3e11eb3b47b64b4129a5747381d741ed3c5172bfc603e311a23c4299a728f7b45f1e188f100f3e4f4e1eed27cb0f65bef34c77e6aebfebaebdef388ef3ba9f5dd7756faa613e97b9eed2dcddad7beefbadbbeebafb30d76187db95dd6f5b833e3609cac0fdc418680de1dc46fb21d985086b24870e3595ccf68f1249ce8cf094c85fc2e5296b0991bf7d67cf328714e40c12a2593cf88bfbfa5138ae88e5aca2c9c456dd264adbd41fc2598e659f559dfc482ec41183b84d155d88b03336a774a892d929ab52ba5837838eca0a583228fa618c5c6b665919e38a6c9a29b1429656b250912d929531cec81f9631920490a5952c42e4539656c4f0c17cc2001c229a84420b55c842892a8690de38a263db1040143d41c2ad4da3684be30adb72b3b432861219ccd24a1a37a844380162092025981054c41a513ce1c41351dc108a021939c841161fb4aa562b165180e24515573801054d34d14c88a1c5b4224616464c11032a6734a103358a441c31acc8a37dcd5f6eef7c82086e5011fbfa932fc4994516031a887d35a42f44176860022491c98b0a281c41ec018c024610a27f5f261d4cec612505d1f1a42e40f1c5510f3d27acca788478a2f0c0788ae011e21122421a696e68330d0a4434ac8580c1907c42087182a0134c2160486ad02bcbebe845c60b89082e8f4b6a2b69291869ced37d73ea740e51adf6572c270bfe74fea555cae9d4b194f3256c8acee9f17594c725346b23921ac403c64388dc3c82780c1185078c07ac9a8cb0f3e5d74a399c5c39aee3386e48e3be5efadc1da714b956add2cad1d7ee562b45025115ecfcfa52939ad4344dd330d7e144cae3924a97787fd799a4eca8dc362c270bdd6b72029305ca4396b47322e9a4c7353a1d73b40a540c79018791f428d9b3c83ded511985d20278148f02075215485c5d802fec283fa60a48540c79d18242a5067d6bdd6a1daadb76b79f43792a695264ffde6a4fd7f75b9d1b976cee57fb4d47c3234e76dcdb3654eb56b75aeb3654b7aa51a16ddae6dadf39d4a16fb572450ce56009a562c88bd64f921515c8acc09848ca1d652635016826cd2432e69559a6c3963ab0a34cd892fd674b52a697b28ae61b76fcdcd3d54d25cf50ca1c862bb07ae4269a2f03db30e10a3830724752ea3fb0728474c54a2b62fce49135b312c62c0ca51410acb4726464e5e8c7ca5191953066703861e78c0c14c0c41e3610e7179958fae84e951a76944b4546960f96b821d6c894cef9933559d9cb0186ed1c3445ee81a02040e43e688adbb78670b825bf7b29caecd3e2865b12b7b8b761fb0e37a9e2b3e196b481099325a2a8f12372b825f190eeb99e969307b7f9d95c88b039e0221dce0f9ffda8040d35a6490c154233020000000000f314000028140a878462a150284b6455d90314000c86aa50684c1709a420c75114848c21c41862083104114444484668b8011e4bd82e8c8d1b6a73b1a465a3c7f4085b59df3eda020849fe15f0e7c9eab5efc632e7921ea28e00c1c021e15014ae5a94779e7a1b77bbfcd0e728e76ebb03c508832c9128bc85613bee27218526ea4e1c383171679b33c83a5a6c15d8d20ec7b35581a6ba8444d66ca59de9f9ea5531089771309dc2caf0a1192f9305a30d598f3639c523e6040a8530c9e040b5fc3dd30ab56555df177efc65e63f2ab2e8e4b321f535312424efedb62263b7a457d42e0a4c6af202db2b3041e7b220d82c4c253fe802f47f8eea2de7873f326c03b3c6278c95147eb9afea9f058f0db93d21494d320f4f685294c0eeb6f1de380985438a8d96546cc7d20f1d6167a0272b7c8643182814e29dee337bb99ef6d239323a00c3c065012c7aaecbb64081dd506fe4da8c2f0abe175ac61bb25f3d52ed68a8a9deb40708185d706499488ca9c8aa49b1af6de49ebd647b7879313c6c8d5c82df0e6e8d94f2b927405cde685f165054497aa92d2c0cb2f6c8e4ee26ca2941c4f0ca902963035f37257e26bcec2a2ec48d3c0c67010fa7ca0dd606988d26a4f80ae2c41a5ce318b5e55f920f2bfcb3d2ed48085f76d06c3974bb9f7840b20b3198b423f45f749554fb8f605a0740770aa00810489255497c4dfab2e524d166127f94f481b2250c9154e23037a6148ce43c92d05792605e71ee82b06c0a438301c8c42ff9a663eeee95a95aa10b5c4bffd437c1e82d91c3916a2fdc61838b6c9895e430996267f187fb9b8b3201317f085eaa4005164c20874d620654f0b4895443692436d9eb3723e94127203da5d82c4d3412855a43c6c3200487858dd3a5193b52220c17bb4803656c0edfd9afe534baa0339aaf907f17fe46019c64d1f772b9d9716de1cd0595e16c25c4df02c03caa458ded593aad0e7e7cb917a2ff0123adfcad2bbe9e8b5dc60189ec16e85086db52c1b4d8d216dcd47fd74ed35e3dcd7b75d42ff0717106fe337399ee227479aca6a37136099d1298dc42836a4a324e1e8d0c5c6608b73fa2e49367e0e66837e952013dbf6d6488a5271211449f4c498674e146c638e8bb9ee1e65a17eb7b35143d87a194a98d88ce1137ff73c5636c00be994c8bc277cf711fbf8ef26a4757cd186094f545d6c970f4041f4000f6a8d1b759d6e2aca69df70e5be6cd32733925e5bdf8f10682c12fc8e7482230d918ef22e498bed91113e4b9a2da29dc4ba09af342c8106aad42c50cffe96e638feb422aa1218ef930118747d96490d81c411058738a631b274b53a51490a6c21b5506bd1195dc8d28e50a99dafbcb85fd80dd5554430d836c1b71a6763d835f68e08a21c7aed8b7020051bedff6f63cfbe99b814a0a744a253c1c5b499740528449891d08f89144bbfd07894de92aec0e3ee9a175187b419b1a910dfe80ba53139abbe66f07f795a1fa51fa1c700e0fd6fc332395e0dc5907a9f539dbd4a71d5388dd4f68e4cd62db23664e25600c63473ca691dbb6d53d539c4c35f8db2e9463090941b820fd59a998c1ecbbf7d3bb8e72c1062eecfbef5a732cfa785112a8cdbda865d3ae90225fa79b2e02859c06a8f64ede7c68464890ffbe81847bcfb401ddba72e7f84e9df904c847017d52d7a9c1953fd00d62c804211eedf3a8939016dc7ddd6c6f243e71b8cd030c3b5b25648e55409699fc3e215761e9ad7ca0e42026eaa6e16ccff8cfdd33e61f226ee430b261a9f4d1b0b7417efa952cd43c81e214c4c3fb8805c17852f18682bb2495a213f96f8586628db195b6a3e1401ca780e8ac84a51fd253a00bd85682eed4b340ba93981111f5f78c3b614cf8700111c9d29df340512226b2ae56f24bee80be6ee009d698cc575bf8b53997c07a4350be95adf02ee8072575f173992ecd9c836db599cb09cc7171c57913ae885fb3367268ae78d1ed53ef4f2db32a7332c2c965d4367827530b116d447d2e5b1de45a9428b854fdccc4079b6e5a2ec7c4ceeeba57ad7dfe3b82e0a025eac55455df4d8a9f23b2a0f7bd90497162d6295a84e92a61c06fb79aff40dd032818088d7f884423eb46185ae80b53d5da44a62be3955649f463a2b7540f38df896b111b8aba17c88b6a43c7a011367757ca2d2dd5163209400c56b795b53ddaf43d5cdb71932c20b9b1e8bdd2bf0958529baa402834bbf9d6fca770850c59be9cbc72113e85b69bf07dcd2a60fd58035df1fc14624aa1967c55b9737edd8dec4b526782162cb84b9728c54f52864a42b209792f33efd9ff2c586ad95a0e7387a12596a139b34d7f0421abd71aa3cb9bc23e4b59a3b0c21a97c3a918a813319f0117a6b9e390d3f3246acbd05e72df798df587fffb19959bbcd10c5a51365e463433bafa12d5e4740b34b98092ffb810174a14beb241ad5fd7436d48f13a180084a19bb334fac07bb8f8fdd0858aefa312b2053c133dd2bb25e3fd3705e5da2742d666b9488eb19761e889bc551429d3410d3779f1ad7e850ee8d60b8eb60f38fff7ddc04f3e33d16da20ad1b668fd75bd2754d02a9ef68e84b0afcfa6aa416002611ee95552711a07480ad61321a4a25db941c5c0464ed4cdc970424cb89bfcaef4984e76831f0ab54ed72affabfbec94a92c8b26ee4478581a55edcf65064b0fb477fe1c4fb2610e5cff682bc68c84d8465d79893cb70868f3ead3d908ec36a2d91b022553c7359438770f95d936229383d8a09a2fbcd872342343d4b614cfc5d532f33c9c282adab692797c9e972d9a347bdc8c7197ce785ebc49ebc088c7a41c3d12577528697d283a37ddf87827bfd02734ab9b8b2610a12483bff2bca3d3fc72efec69e026fa471a38f798957af436d37bdc110d3a0512fb2c1861f8ab30231158d7fc1289580e528323e133cf35d3e280ff9dce06d4baec07871bb90c68cc88d6445afd4df3b26719ea1eb1740732ab2f6d6b9e9b573263addaca7ccb0bfd5846bffdec951dc62fc80b0e71138f76a474177ad6e9ce8e44c29c6ed1c97541e6996e7ca01986d8ff3ecdd5c3c963a5d88cc243ed71520db18059181e7ac73963df6b1115795922fcc2b2be603f809499ba479afba4c449c831955b6515805a583fbf2ee87bffc4772eba89ff52792789e4a1b0aa2294aba15c342da063a088a4d8e0d98532d09767a3252d5143c5e3c062aaa7d8c7ca5c8d16a83e88292740f5758185ffca328c68f4ad4ca7cd5e86ca9f6d2affebcabce9d6422ceac08c36531e0646ad2788c7ebe8b7377432d0c3e05d5c23328480bbaef1171984e4fac33fe8bc4fc391e1489cd89f8920f52033023a543b2d02b92c4bda76f3d5125f96d13d5f217340d3c1977746b92100365b746111112ae3ad1c5384d0fc6b5b2abc251a2915653addb605f02fa83ba527af215f79a99b6b6fa393570e33b77af3350662e769be27ef92d12d7c600c60afc68dba2d4261c43730612c2b40120a7a8e8c4fa1367cdec174a9245c46952f977950c6108cecb4c11cdf3ada3501c1da9762f5e182ce5556b6d0dd90841197eaba1ae8f2bc92f73f729e18a5d2630e6b358a1455a593b60fb1c3e9cf7fa1db7b78e127df17704d42846a0526df648ab4c700e9c76a4d29f7a6691bd89268e27951fd0315bbcdaf6506656100a9cbc9e4aa99f4a01345a132a79da64319a4518f68b1b531b2fb64915eb92bc40b1911bb289eda064563adc8eefdd143ec249d5a3c9e1644d514010b3186f35c67d6c466b5e1e2481bb8d68645da776f9a168476348b6215b04c01c09f744ae384a49311118d27986ceb4bce441e07097c1322d2d50386d51b30596a8ac33af6bdbe07c50ebc467c242e193f9477edd85636a5ebc98be82ecee29e1c5362383ac3c426cd4b3809ab67a719ab217f78200b7246f1833af9d8acc74271c1b78ecfd9d5b07f64394e82d61aa20ce57b537ea0203929cf33649c351676916fe2cf010981a79fb872ab5abd524660f6d22d02a2bd7c42fa3b0f403416a92166cee8cf046685c57786cd4b8a8278600d13a043b30520e6b1e846600ae22853acee140a962a89e164669cd6a02a3b0408177baad8bf25ca958170aa025e7fa5e269877abb4966ba2aa391b4e1cb5882f50f9657f49c4578d575ec25c28f3be4b7290a5c06557ccb05b001be64dd484c78c35d208ee4c2bbf8d09d2317e3cd41782f3301e4bb263a3b6a621c84503daccd6b3c553a7cbae469783222d7a47dab7a834455a0035e5001655b28efd576084949d934e148e5b201412c3d98d341d5153d8cea030d64ea7d57b32a32ed722e84e235a435852380b5e44b646f82513ca250fb474e6adc12673fd3e210206ddbf3735b18c6434788a74dc1e7d2cb913c31be82d68c748d990df8ddafa0338a07eb2ae2abd8958b2368eb9e7560dfa25b8009b73f9a05d5500ae6020df76ac5576fe73b62e6e9bcfcbe13735e2c00e6a8a34d12e31eb5ea9388e210b8d9583002343f625dc01e72b242277ee70048150239e298edce44c5c227e1601428bc34539ef0a6e0ae39c93c590e8b60828016c97dbf949cf6b72e1242125f7d81e48ac156fb4a78797105e5984fc9f13383084644038561d5a76412102b4ad1eb322ce81958ec4a0a9cd6847b8730cc89cad4e8f81fa02ef3d70f2e9c65e892442f0fb338248a41294296c7a40802c3d794e56e115234729463c581ce7041a808162b9413b7dc3d7781b23a0d3231eaec7c8cb07172141d2c912744889000ae424f4301fd19e51212edcec9dfbb0130d8df88c7c93ef90367c71254ec0222a09c167253caa04b0e3f1303a3341ac5a39ed044667dd5d34db86cf1a0903c20752ad19d7c717ce20e5c8941021a831a306a202161b586d484ad5684d8e067ddbaa7a5a94c16030a1b7f6fe61ffbbe27a5c48d53c7c4acd4eb424a6f6c1e57c4761ff3767a1b62d3f9801b6023c38d4c16803d1bffdec91f4a176de863d51a96a7528cab6ff12323eebc2e6881f5a18ed412e73c6b8e04f8a87576703dd9a0a16d63992c067d0dda87edf3492971e31ce532f55e9c4dca64ed594cb8a58a7b3ef3885963625098fcc7cce62570af47b1a4d59cf99522e81c8cc117bcdb3deba7dd09b1fb1f2d1e570dda07978f71db7aaa91d8e378049d2837b89a29cb2d6851976a8671385cc45bd75014ce923eaac063afb815339de564b3b2abd085831cd3e93dcabb7740f33515e5853596b1f22e59c0da0a9b010639cc5ee31aaf65900e65e11f72987966c53d945a086cf6fdbdd3b71bc451f9a4b1044a65578d579a98e841a5a6cddc45284da99f011fb1e8b58f1d98abbab1cd4470dfb494986e32551f8ecc506d1e94bdf92daa273dcec3c0aecae007938ff3f073874413bf34cd44032bfdd71ed28a6b380491ad1703ef2b62242461e48cd5d465a90d32c16de31095e8032928301acc69b923699e9e2a39178eef9200b08cacea3d408a84b217966ff76ea6c12b0facf36bba0fe41122ea2e729d20043f16dadb871227d2cf5870706eeec9b8e5c160d0cc6a851058583ef22c257c44ef9c4d8ac192afbcffbf5057aeacc7b98a489ab01d002d88b07c4d39ca88986c5b3477a4b99464b23e01477d33f72921246a159c0b19e04c4e33747f9b1e6f1bda7b59eaa74990d51ad2e09fc0cdd3b3d72ae82fa0863bf8186af7ba7b4946908bc19829046decf26c5830a2ef2183c6a26a1d8be90116ba8886ca07d0fce6839fa22fdee58baa08cdf108de138ab9b494705d184061dd67ff68282543178074320eaaf3a963e34758b6acd1e6f13c3983830f23dd76eb90166b2a32f689120b81cdd6df7bfa764b5c754e4afb30f4711b25c7813ecba4e671a28ab50543608eaef9ca31083744be78ef1a38c214c21a37b6d43acfffbed7c42bce4cc8df78adb17ee535be66a7014f384c83f2eee09337f18dee3a57376b07663893edf010e177c3033ca1af4389af07c465f69adbf82adaf243671a5586650d9da2dc99fde172095672e1ead1947871cdcd82dde67f7609f71fe3ce5957c1ccc1e528e639adb652c8fe3ff59edc579fee1bf1bca43e758475ff921bf153d61f3ed0734b411345598e4239a96eae168fc88823d8ec2cc1658c8a25a54fb78ce0539694ff6d195da4e1c8a258ca03c0296ce34a42cb4edb1fda02b6071d5a9c7e30ab4d8c364f1f8d4f880af46270e56e6d9aa308cf5feabe423b4321a4fae9748b138742e6c8515a29edd5218f4211ba25d79404a51ad70691339916d7f01cc3ade0822cca1a058315650a1d3de6b38a885745ceaaed8e7332bb8452d6a638b5134b0a5e5c57d41f5d2a50ed6406b7ce3057b725592c415bd2829d52b4806ae3dbb8d417f0813bad305ee818ea24bc9011d5d06238e0efc3bcae386e9beb44c83b0aee15a65cbf358f92808fa152ebf7651bf11a5408bd71ea3c7a5f21c7d6feafbb4c23d0847d640dc538838fd59b865dd93736a03b4b64778dd51c891fd520e6f37319742f9b7d4e1927d25218fed7e30475283fd9c3853ff7d88a6cc5ab6713956892f86fa13174bc3a09e35198d5d0b92419307a2b02a7cef8d391cf93dfd8618957f32df169a165dee832ce3c241ba11296831176442a64724028775817aaffb2902e886a216363ecd8b2f6c5a7305c403fb492af21a6a4ff24a24705ad9f86d3b5dde9bae1ecc093eee3bd0a712ec302b94f78b7c354c2ad21dcde1349a3be2b6427064a00e42d74d9936513854a0c42ee322810475eb877b28068ce6f7dfe4715c2ab731d936a3d4784a9ec436255fb4a84726a69c79c82fd0b8df914b0cdb99ca5145678505055bf1f7a653804c7ae47ca9aeda59d10cc5faf4be5d7eb891aaecc30378a32a6a54096fdcf7bc97d6a48beb7d984c85d80965fff9047464b12668acf2797e418a2785b5bdffbbb2d6a029a3c71a03ed9b98982aed721d863f5d7ef1231d6b664ef3759aa093407e75ffb415e85b6f1fc6c8ea3aea8eb97e0fd0da7362a21d7cd0a08acde89eaa45977d3ba4cc69811eb11f61956c73e3036e9d4806654845be273edd1fcde51d544ed8342d6e8580385eb2acc2c22e2dae57240e64a8d30f9f778b9454d40e76029b43f80a44dfcb697605d3d01ea7e0961dd27e9a8b61dc83179454deefd5f6d111390e64c9090d3e35a229a00f130f3235360e229e1bd9d746abb925708689f98f80664bfe70b48c8e9d78957509b69659a1cfc5a773d2c8ead4df2416d0390ebd9327b6bcf2a937963b1b3e616023be24254f7361f93cc9c0c390273a3053bd7e59d996541be3f3a6745ce2cd2f2f019adddd010e50e27f3b4b07adba647d5e74e93b416fe9b5bc557d13f8228240598d2989e04f57cd22496a35373163f584263ac26ebd8e3ba706b325ed8debb2df14d0b80b527d00b07badbc40945561a2b0493d9a88b2bbb1067ec679e58b3fe49a82387778c11cf129c4cd7a0b23a0787e1531bf7942c0e2c1d39acd840191ff283afa71c61c3fb187b4e4260718f3f02b23e25131db23fb690e622651bdbd53ce604e1b999c4657457430e985bf806ad7c37999819a5c07f99562e51c6e92fbb785060d26a094cb0cd17775404cacc7ca56e6308d6d266d0a3dddd6b655aad22902c11e63b49c94f988bc0927bc0f151c6b488ca479aa9a866eb24cb90685dba0f5641e4dd50fd0f2ec29f37ad0f7443b6d2b70101eee01d38cecf068534c9a22e033a1095bc60600a4caf750eae5da05c955be289b26535495b72c77cec9a3e641be74d11fca76863ce55b5424838aeeef2d14809be9371d3ccfe1179be39483af25aeaede46c6414a53128103d8c1752d76ca9cfd117e0c99e67529e174fb936f593d88f0560b449744ba4099c47d6a45e1b70f04a46d23b780e3f20086e07ab6dca68fb31f0110fe918d9a84c289be0040d9d894e1e3ada537e11cf682c495ce86c55950818bae946f6a90693fa3f6ecebdbe288535274b3f1fc9f832d0a78e25de2dc6e44daaa839c1150a99bc71c4698ef4ecc3cee54b7a3d43266fb47bffcc48d04e856e22ca5de0ff7a65a968f2060313f0f0e13e8abd50847e790d3c919c042e093e41fcbb1be68970df0fa8e680b120ad6d6147e8c7dcf073b1a1526f7d9f35a66e190edd51a1ab230312cfa1bf4e76efa50b7fbad19124ef6d2ae3a4dcbbbeb734ce10da06f6c681aff3a74043584281e4f2bde57511701683f955b1800c27720ef5646421ebb107242b9b0bfeeb063ae7f4671e91d39b168194e31506458f743a97eaa206ccc06e613eb47de4ebc037351b25ad49d02259b14592539248e2d2c76d52cf0348b88ca9fef1f241bf6e81752fc8ef488cb7ef3f7fdb04e374e14b7768e339d39f28fc8f87dd20700580033cdc4c577a68c3c904637e8904a9944d2cdcd237759599489a2029b9e09e4c40b11ed7d667dac27071d8ce882ebe3391ef4f29673813dbeddf12960945a1860c6da97446a8f9c701cfb20fe5489ac05f47961b70ccdcd75a4ed83f8c243646bb6f8d58234aa715fa776d1169b724a63444aeab44b019c4813a26241d0a73c7d12300691c965cdaf4e7e1d7f399bb1aa40981256ab76eb43c527da4c085d1453ba0ed9c188e1880568a134c199a962ede24acea93acadf90653520e2fe6474acaef08cce2b0a6e36d822d59cec1b779f07c0758ec33f963125cb648db2c5a990b5de01ed2e693f18f49d30b80d6d29d8fcb4d966e0ccfcc331a2eb648475fea5b6b238a31fd6ba4dacf32ab13c731cba3fc99645081854f49f43472cdb75fe8b91d97a81205c7d1ac6ecbf0be423ca613413cbda19cd5b612ff833ae1bf2e74e1cddfe290d103c13bc5fd15b34012665910fb46b32dec1125debcd2616b62794298a8733b40bae728bafbdb6a26ad43210d7795edba6c60ff0cd0d1005f144c26ca8f126fb31e262f9e3220b9cdd0813c041f2af61b629c64fa10b3a3c8979dc9c1aa80f43cd6ba5066ad4afb0a35d9383e8e3a829ed1328e120494e882a6d7078a11893a94283828e380accb19cc7d3bf68c94f309aab54e1e412e6b8327b5503bd8ce0b5e95357efb5a04e28bfd25f74d3e2b6a27822b0ca195a4f0096936e3fcf29fc4637e8936833022a075cd94ef3ff2874d9a8279f9b36de4dade908ad424cc9fdd525cf65e6ee8dca0692abf350dae0217f86f4db95b626c8ece2bd23f380081557689ca8c460653bce6a842877183ac9a959298649789f919030da4952623370a4fb773b415812a8141e63c290d28d7a5fbf260519e189994e43c0999025bb3e4c114e1154442940db78ab5e8343f3fd100979f0b212b349b7ff748fd29e33fadadb06e53e633fbe35525b4f3d8514028c49721e232b5d8a2e48ba4fead439449740418c57748ee8633130d5c622c46e027a497d88286ae3a8dfa910d61e9b59bceaacf64b006ae69a19bfb7857b60790fdb091de1205ccffff9929d658ce939911c46297ba69af66a89a580229b4ecc4dc15922ab8e91c5a3e388ba16581dac90f118d24ccb9481f65ce0f41399e670168c555c5c53d48d09b4dd3701ad1165efb429191bb318417b0fc8343c7aaea4aa5688faa447deb1d05308c4d401f07e771ebbc7b8f76a6e658aa1ced289a9590ae2432541d7fa65fb2aed37d3a3dc525377c76809cece3ba76a1894539bf5afb196cd55303e638138bfe67b6ad6cafe5fa3158fb98f8e69d24578ed346121df83bdb1182279bc48697f9e816c5b5f810e62a1ad11cfdf58869b5f8819a13c24152308159040979162530e7935c6d84b57e882321e8825468c8bda7451b060a9c44d39666d8a8cea944ee8e18664f786f526b725005bd12a67ed672b8b992eb3d2736dcf249b4d001e238ed9f60c343ae720d1f183d814913c626bf57c888dec28c9b246b15cf92c81d09a96b3378d753db029479dc87c881e3980a108e67138781dc40f490443791d879cf4fdc026cea1f66e72d96b85954d261f9959bc60a8b7b05d54bc7616b38b1ba101f20e4eb0bf0590970bfa624c1f778adcf12e7a74ac7176fb9cb4abf7559f5fce50c95000ffb006bf74cb0d0f091baba1d3440bf4d62617c2c0b8d44e286415b7c186eadd2a899588b8c6a18dd159f3bb18ae0e6043863cd7a30100e680c72040901867edcbc5d1020a506995cfb8868d6472fb1f31e5becef1062cdc5d77efece6096bdd658384a3e60ba7c2606ef91eb7d497a8d19824c8912dc0fd73bb675a8e47a16addb66a24c69d244c5c04cf4968e3df54741f80cc5b5e73e78e0257eec04270c82933a57b5797e124a3ec57bcba917abe44be0e58e9b176fd6b1c680d3e7af698210cae21609e3cdbb4d35bcbc3a58472108ff0dac1e583c1df11b05db27bc1118e9ad29a81301c096f4f10bca498a94db839ef09351829a316b50c62f7ba6451a2910231f38c4cdebe4b8f42af995334cd728fc0a29435212d0bcf05aa7ab97b8485615db192bb0ef5a7f5ad025e07dded54f1f58eb7c81eb512b2e758e918683badcb02766102a129db241e4c427ceb5d14908604073fb46240917716998390b6eb2307751cb8d40d8772e94d103736d25830ad5b1752b236941815d66ee6e0a4bbd5bf6de62e32038c39ea84b51c87e79e39b527dc2f97df69714104f1075c6921d5ce8b8aed6f94898b49977b5713f9e3eec01cfdffd67475bb06b12f41e937f644a678aca0fbfb2f59d14f6599d8ea02fe13643c6ecd0cc97cd5583578782541af7a3c394a1bafc8de827ec5a0270094526295d40aeb02354a2109a3bbe3d7c02683c3fe1479a1fe1bf9e48cc4d7969ee385ea308bcc6bc67fc5a90ac6b587ba1c058a4ac690822ba4c10ce75058c4c41dc602a778e79b37431728fa58211900fc00be916a1eb74040b51dcfe4012dc47fc266c48e962c30da79e92047b7a58b27861940a63fb2333bbf32013ba05bcfe9d3b19f075e5d1824ddbd8987a60246623a6aa45d03ad73c3d3006e1225c9eb3c74b7444bf3b4987b3ec29c8f202a1398391272cdd35b3aa1a09974ba8218d08cf1caf11c6c6294ad7fea165905be30d74927de02360154abf528bd22838222dbe6fdc608cdb164ead026664545183d0453dab3a2d60d3b4f095fe9934c8a70525357de1e8ac424ab7e6f1ab4303af8c07237c122340d9e8dbca030ac3cadf8e0c9171f72595f14c4f7317752ba4d61174e7216ee0d5316f4c8f446f127cc369eae76349a960028ba8acc8be411f583f70be1b000e425eab6e6b8eafc7ececeb86b60978599efcf8635da956a59ca2e2393895dd33dc0ca0cf3ab88d559f9a83e7f62603fc16f75a9cfe1949f560c3784f5ac7a99b5fd9ca986da573b295d16a2b7a870eb7fa973b472776c65594aeff647a4f6562af10aca72273540df597c261a5cadc22464f9beebca081c08363e33f3a266da9ad18ee16c3927cbf50f280aa97223435cf0bd1745612d04b7f5e0795fca1b232bf30bc84aaed77e8146922c770e8e42ac25252ec2334a38e1fdd8e0d6d8a9bcd618e0012200f69330fe269573020bdbe09b885b294a908880997ae4ccfe9a2bf2f03b39cf87ad9fe36a04f4ed5e6dffa59de874f3da9a46c651b1e7223dcab3a4847541733e2e752a0c7007a49453c429f3c989104ce4a2644e85f1d36acd8f20a9843b70a47701b171152f3d65c24b8a6872e693c79ed778905f8245d166a5220568dfad13f010d24cde8b18a26d009a0cdba3162cc9476109838e24eefbe167dfe545aa7b4cd96ff455ba9063fe3f2e470159e73402d5d5413c1aafcf78ba3ecedabf9f1dc30c1c830f1eb5babce617c291820fa0e84d35313316886e122a718fce96dc8d5344ff3e3d1d29a4b3952d4b6d6e6c65b0532aabe6655c3d3a9c0e8a12c746d12d298515a5860ab293878d549efb8414a90a38c6401d69f64282a680e61949a259157df82908a4560bf0748754d5cbd09796305d462c46f7e4431a299a8bb6d7f48d43f80c22398c92c5c89abe6a7aa620964247598dfdca8ddef80ea17313a9154d0dd2e7a1f2a55843b64cca9382d4f8c72b6733b8f1587c6eadb8b0472c2b58be5c9d6f28fdc6fc0fcd716c705e6948efbe6d9fee2b06d10224057aa35c4adf9a0414d1eafcbc863d44cc9b4923a8167b8652f1e742af423a6580c08eaea0c9a0bc6cc864fc2c1b4f5ebed71b1e185f7137185c52e1c611efb7fae6f1e066139e2b3e16223465282ca42f64241c5cb6f2a0f7ec757f5c6b0c0518f97058dfe99ce21e9e6c5a8de41b37850c5dced977a2a4650e0c92052f7aa5a981d4a861aa1828abb7fcad2d36f4177b3668128ccbcd90ef485ef41bec7c606f36a27d83cb03ea5a586ac9ffa48dbbd046732d110903ac24558f1e56d4aeacf53c51b67a860d424b9fb002e9e0ec80f68f69e198426e137c837efed02c7ab4d8e414085ed854fb6ecad80e3dceaffe1a1636233a1ced3652daecb80f3012e1ea0d58bc3432be551a91997d5d565286e7f60702e3a9601ce4479da3a2eabbc2fa535f3884610ff59b71ff97750a313640c499371962f986e1582bc6183e47a484daa5ed5a169c396a7422132dd41b006113f6d4dc0e15f04f51fbf0ec9a7f643d01fdb3c84bf5b3ebda2439bd86fe0a3dc50571ec9adc20d95e2885688729fc58a22e39cd3370d627ec9787f05af0c36ace4b5a2c758c23a1a6ea425e87f7469bb64dd3cf8950980f2bc11c80965b0bb4d32e96ea040666ea8d7fd2f5d79e8a961b44f684719d7bf6e5267f741e3d1fa7566bb47396a6f66fbe102b680ac3f4ed0127949263ac43d2033a76a7a285be993b0276b4083afe44954160e0efef1fc8a7746ecefe5ab84f1328b7e7a2ca545fa14070aef32abdc5a453c2cfaa1255cdffe348eeb4947e7fcaaddc7d0afb7c6be740501e282d8fd261ee64b4f1ff745ad56deb5a5a72e2aa0b17773d3b85c4ce05f9408d14946d8069ab65959527d14c26759327842d13b83f5b0f85d5ee2bb7fe89bda29a18b581500a48970bd870417b35ec1a049760765caf199158b202d3ce8c184214897edc60c977b696878c2e90524128fd65dcaefac12bbb36e5cc75de89f6af4b4567684ed0d4e53e1138381fdee9e9a35506533b9a6a2cb5266b3ed80d415d7e78072e87f41af850ded82045bd764a52bfd94e454a8169a1fc2d865305dacf06e5cf24676beba79911a21ef2e7e99c2ff3845aa3ede25d3238940971abc05d4bf43c73e31a5729877bd8bc695077cb7def68e3141f72920cd2ee9dd19896de58af49376034589a787f44be316d8347c203bc93c70e7d83f03b45baf8f485219a7470ee1c1a1c94931012ed40e5b2558ff975dfc61e823cdd9911cfef6792bbb3d3e000a6cba1bddf9c636763768415d20defd9507a04054d257e5d8ffca23fafd90c8c1c973fb7b416e8137a8b0bef18c1b81a82847ce023caea3b3925915a2af5127e27b4b6f9fdc0447cbde65fbd1a392bff01fe3fe2917bde2c98b278f5e2f5b29fd0ad0cba7036eba038dc2af94e13b87e60f34f136bf9eff8196bfd167af62e9a10e2068fc6050f36dfa8cb47cc8262151200041af731041a3ffb94c0a2b43dc362835133ce44a2acc0e78f618b69075561ee14cb343d4ec2927ae66cc9ea41eedbd65a074ade2276d290f119d28eb0d5704f97876b1da0d0dc2d263daf8fbb2fed35eafa6087303370488d3d03809f7bc91bccce26626b40e87439a506bcb7b5ecec840a27cb59feea3592905bc0f50a59564dc123eb98c3832a3c87196a4c46d5cad4b2f31b63a29658335839f5becb29110353b3e49a60617beeb96b26c4d87cecab98c2d48faec1c448be8c383ba834c7a38d9bdbda8e005a596c5856165d296f0c0d0bcdca1e879c08643aae4135251b8ca467a39b4b2ea36b06b3dfe130f6502f4199a3628592fac2895ff04415728cf46436c847ee5e3f72187a645807bbc8508a10e4b85f7bc546eb49f7f26c867b44af1aaed1b07a8b1c38084acaa34d344385c331bb564a6ed838694a7aeda294619f86fe424295e75a879dc55776512ed8a64882238ea25ed23c551782eb9f6aea5d7b64d0e712f015f5e19d6dd9ecd736d515e1fcebd65717da1e62de97090e66dbcec80a0324df24aba8ecf7dda0b2f797d158276c73122e976de3d5aaddad5c8ab7ca873649df5950a43354505c383f33154239e23a63d6caf8c4bf9c52f5902fb7d425679a6c10b37176ad18a4478695e9b260ce643fbb85549bf3227bd7275a75cc96f6e989bf190c569f19123adccbf1ded552e272fe9869cd7a2d51e5fada4d03009b7c14bc7128565eb0e912e95d8f884484f11de5333343fac54534eef25fce9ba54ae5222b21f5a1fdd6ceb789dd33ab4d81507c442fe253916a89f17672de07a5650dc667b6b5622a453df590b1c8c7f10cc1ef91dd5bf6d739cd308486c34de174fd0ec2f53985e5a4241751ee1397bc6f53e1b9233041287c4c7f556d28a230299076996136eaa22bc980de7514050c5c7a83a56e9c02dee7727845748173b17c51d3db2b860ceaf2f0f4d465dd41139c831e31dbd49655cf6b664eaa4089bde1c583a88055211b7becfef6ee65885e7e6fb41103687dc259297343ce465191924470466cb6cdf983a893d916abfb54c3ecd0b1ae0999579d8d59292a722f263ad2649f9077e2f1cf77b6d9c06e861fdcfbd770ad29ec5d3a9a0f26ee4578a275cf0717016d2b5598e389099e838a61e1cbf5144565fb47eac33af65ab15a588caa1cc6aecd6d1e91d36dda087348414c10097f15008ea6aadb67bf03786967b6183b62c29b2514fe38a7bffbdc80ecb5f8c7b334809e6682e3940276a983a5fb0849ff0a30e04d13397d2c1577e89820d67fd303e22f5d7c2c24485a9684f461423006250baea2210c860458024756bec908cdd0593d9b1e134f9f777fa15436665445aabc0af0794a18eae9035e20d393efa889059b988680313eb823347804670df4a9c7284d68a9d84a865703e1813328d60ef9cdc81e79289a9d79090142d1e1998c7b971d004486c18ff9b27a6aec070d9228e96b55d6214c402542980200243538a27b4e351c86ca8336b633fce3b9059ac02a15956571dd3186f4c00596d159d060d85c00d307faf401a33caf44406e76f3da2753ae2b522810db61103343e16b72889238bc1afc231774a11e8a52d20e505389ffb1b65211451466e1ef057277edeac0f9aeb2124e696dcc046c4a1385b29c95a2bc0609fc2bf139c96d24be8954fd4728cda4cfcf463ad129e4b797cf809946977124110196045fd2508f57d07b9b95d0a125c8330097df51ff2c4188feabcc575f956dcce1fc2f62747b3e47cbfcb6663a53a0da0e2c282d6a667e7450146b6e5c6325c765949bcd08b5a725dc8b1e049c6487f09915156adb4ff30726003c03214ff538f2b44de30e37797bfc61cbd01594d70058e30f3b2697de5591b48cc6c4296e0c12f0e46b43764438dc411d15bc80c0d85dbcceb016939bc63f3a7a5bfa2d78cc0fe4ca2e6aba4890d6ad7ae39415e221ced1688abec207d1293fbdbd3a7414f1ec3e590d5cdaea04c104bdb9b7bcd45ee1498c5a4050518ed7383e2023bfbbbba64388d4f3b4a3d9318e47b38a9781ad8974734a06749c955d1dc962648a56f270eab5f9d11e862e220aad1c85e8b00b725c5d1b54c4ea06986e7f8b28286f8d6b86cb8b6e061af4ef93c99e806219a2b23b82d376a958e6195b4f6550c5dc8896f0205a97605b4ee10a60ec49c37eff895c1ce08968a4a8a0534d12950bc23a5acd8afdf893886ea47dacf52dc27bfd113d62339553f7908cd5bc7fb50917f45a03ac6b4c346e6a4eb38d99015fa89bee60856e01e256964522f352191278438e68924552f70e8c2b4d9ccc9b483d171d8dab6c45cfcace7b42be013918fbe8c25a5447405b62ccf51b26b93690b1f4bc0f137f76fbea556cb6194e7f7368fbe307048333edd08a9685cb613f351d657e2bbc87ac98c00e79bb23fa28c1d3c00b51240aa7cea672919416cbff7cfb88077df8f1e916c70a787f13ca0d279365a796eb3abec0efe6765a9db7c5342859e402941ff8763ff23645bd1344892b1fac829ced021c88768741100ee9ac7b62203bb21abdf743a7c09a28b24b367ee9da009e8f70aa6f52007002dfe66b98a4cf6a9220b92c527c85a7ed2f09b0a63ad01b1cb9d7901e7c44c34e2b81244464b899be0ca884dd051c386b3ced7de821ef51cca106a315df1c2f4e47b0f60a3e1a91579525a6b2792d656eca39ada1640e20d33d936beed218b6bbe321f59705d2fdc45100078c28c17496483530e4a54634ccccb25cbc669ce15a44cbb32d10028c06be7ba5e30a4fe1623fb9f71c24e8344eade2822a7174703b565b7c53301170e022a057aca296abb1bd3b0d9a61f5d91bcc6155eae6675cf00173e3240737ae7bdc121db54b97e5d636a9625fe1abe5a12803cf2a1b43831979e0286848f0587de02c7247df0534e2c0791fab1683c422eadda111b2cc6b4bb88b11b7b8d2e0b1e9a379c42ba50045cd9ee3b164db8c5de5219b5947ffe4a112781e898236aba231d30376193bbed2d8d81992c1d4643b6fcbf31c52c0ad1e7f4ee4032ef1abc5bae7be99b67182a07de2bb200aa19f42272d5136a697fa41199b88396aa38eb8cfae375ed037c79a5f51872e44699d8b644c4c3d73cf72d5c05a7d1a8b90fa4ad3690533d162dd697fa793fa34a6a2b5358c6c4bf19028d89625f729a9d88d28ac320b7072af633fe4e32e60ab484301198f80482810fab9e569e1b4cc330025ed4fde34b04d815e2aa3d273bcad2e8aa98e95805335c4ef288085e226bff5950ea5064a132319b043a062154acebeece7e09646664addfba021b12d6f9fd78fb774e04ecdc6f2a09ce0974ce61b0ae5c0cb97044d4ef28333f5dca7ac5551bc19697459a327062f75b4e3b5a7519417387b5683e2a737e7518f82d1b1309ad9c018ce4e32dd9d65d1979e7943f09fac38c3d1216446f2d1a790094ee3907f28aabdc9294a234a046b3d0a077361f1ab6f46e6893ab71e613419a42d384a14169df010cec43b05ad93fae70f1d563c57db53042f42ce839be9b3d790d6c70f173c092bee86f98ac5ad19137cdfa4fc826723cac18d819f51488097ba065108d042da42987ce40c2f599dfec806ed17b52acb61a1d8d8d018a846dcc0485e1034b270ab799099509bb4f6db0d78c7a2e5776bb5d5d1460ef7e86fd0c0967a98472663a4b34a938412f8eeba5ca2db4fc0c6d3b9474eafed1b5749d7de60c78e827fa54b5d955dabfa2195d5672871aa0173b456d70881ac1c5b34590c4346fef964d3b122e80c2cf33ec211a20d5a2911272d42746fed729244c698259f391413aeaeb5a7dc91ad0d5aa98c1e245ed3102c5c6c62c902e155153b0427206e3ced2d8cd8f22c089fa90e8dec3e3cc5c030786ebe2570d9697d43787af8964a6ea4516cb31cf166c1bba093b038d99c6b2f71017aacb39dd61c53aafeb7a987f6d5680333dd7f26d621b44a1976480f29f93a8af4c61baf6eca7a4b248376bfee4f50136ab34d497dffc2454c0b54aade4a0e502447ddcaedfa243804de74dbe312c3485fa32cd3d9fb5d7bcc9fa50ccb11ece6dd0c7a63baa0b6c856241548943be22c378575909f706f5b5efdb61be3aaddeef85f9f37c1c5cbc6a1a2020a7f5a43529535c38e4a2b143484984960b2ebee7a790e967c992de3c2d1bdcebc104718e99866a18215b4bccda717decfc6e9fdfecfc0d4dde2ee649517a4c01f9ff62d9ccf0775ca7842f70b22fca6dbb5cd77ee1118b516f40c8e058cd89fc7f6ec75fd4fa47c8401075350a59de8b6a6055b06ea37b93500621064eddd357bd985d3a5088db3ff6456cdb396a983fe5216e19d8aa11ab7fc3cabf000971cda98eb8911a7b2089d73d0ac5bc71999786cb346d51d447a26ab98c693f4e0ad557f9ab20d41227386304afa0f81a2133792da3b1167434db519694f97576a34dae6cdf321f6347b27659d6206d8861ba9ee1ebc86164aaf2dc66c20dca106570e75a8f55bf28372a3699136e916270c6904ba0c1e8f28c39cd808486c634e608445c11a693793721331ecf0e0eddeaadaf326add1389ebab0cf5569ddb36e8b50644a84269445ad7826a0ce78023e56fb12c83355dc1760397c821295d12cf74122724241f0f15dd2779ceb394524880a876e5a73ce63a98f2a79651166d83d39e874e51699c948459af76f0ddbfd97c3131eac20e5014ad62c0ce24b24f1019324ee2497a1d62f895bf8a981e965a0fff871da1aa7e12844bd0c4b47531277df55e09b81a46cc777b624f531658ff2c8cbb7b6770a23b029a85d55f4b5a3ccb288505cac627ddf60a2a6f3230676081ccbf0a31ec73ea8992c1d61ea1b8c3710d1fe636773ab1d5a474dd6504e58bc23f85bc3174959fb42639dc9cf4deb490cf8b886dfe2be1506ef435237a995d484baec46245f6ce33cc45b6ce9e59161a029ac7ef84532b3929de0c5341f2ef809322d92f51b9e4ed859ac6b6ba87126d69cccac03f287c91251a779ca2f81c629501970846508de6fb647cd29290e3225602b80791b61c4e7bff8f854bbd2c241f81beca9ee897a7bbb9e08dce69e4ce8432836e54dce4790c7caad508a71e434c27894f75760806e6b88236fedb939dc3cdf76bca7e10c6a07a48a0831d9448b420a054e3a08cce381bd15024f285058f7a9c77c0bfda4640002af632448a847bd100113596c85a20b9c00961cdb1b45ef6ae65bfb7d43b2f79d7d2ef5aeaddcbbd6ff9f79605ef7d4bbd6759d644cf0113300a40dea24cd4acc408541355f55135acad5f9a15151e10c1b38bbad241f328c67157ddde5aa8c436ffafe1c87ae5ec5f9482e049ced792020e2956d432cd8551c9749587172752226cd533e2d63d273936706e93407d0df80ace70c771fd59a8458ccb7996638e0b2f126a83ef5868b55b7c128d587289240b2937682753061b81edf9be23544ed31d4c7c6e4f404c07c3f11253abbfb48049552a2aa0f5591b29bd68d910c78eaf976d7228d8b5366bcfaaa7958078cf844e8d7509f9d4ef9b9735642c1403b662991b57c9df3501ba6b3abe13c20951b51b4963b3c726b93e2b8e860a85365dd87a273c0715a66d9de2fd49036dab25cb5704e26d13216534f7d5efb6893e85122e3ed14ccab3c621d80605ba40721f23c2c4888879e4bc537e311f2592a716d3af23f42b5e6828fd56f6e3f83367e33a5ed1f01071f25a173397a7eaf7fad57e7d04b3b195c9585f55a435803138fe9d672fba263578869827ec6407fd259be15789b8dc58c5557e9a9a2d76e3fbd1bf8b5ee889105c329539a110a6e20ffbcae5da43aabc5ab04da5e9660332b0c272379c90bfbb0ba864c3897556de44da39ea89e14495ac1939d2b022367cdd35c132068bcd97d2f965539d16e5e53e43aaba8380f70b1ae930c806f4d13dd036879db711bbad9d526815e1f1732c2f7b85e52b5060a952d83918630104d1821c9ea87872456c2c8bb99ddfb8f621f11f822eb495ece11e504043fd40aaf277258ab8b0cf418924affe93d04aebbb78ef5b777123468c20aaf8be6342f19e3dc11622591e043964307051670f3aee9a186533e10938568a74f39f03074fe9eb4ed9ea714ddcb0036ac2ec3dc5c2c7f8a49a48390a0e0f645746cd8530c71118700961e6949cbe11744f14863b53f76f349fa64561a17f3d6f8429c97bd07124b5debbbeb3077e7846398d25c1ccb86c8f8826651d917ea0c3248be257246cd15e3ce8a2ae5ce29af1d68b7e18e666f4438e16bdf30468643d9625f75e50e2e15a3ba61caaae40482bc9a4f04d20716361d71de819e50b44c72760e1b3ed1b62f59d4026f594baa030e7b6a1802157f125663553d0410b5c25deac9f2d93c62e0110b6d330cf2fb1c1e204c7cd1e48d96c409d54d4333eef55a05bd43f6782430ba8527cc5b05fcf8fbbe083f060c2038853115219dec4164ea68aa158e329500fe5f3b172a60e1be5f793b03803248eeb69c6cfcc4f8ba8f01d16618fe1c4982cadfc279b39c95ad0cad58040d40f97d338e9def73047c5dce41268728cf8a0f09448613847e4a5eb9ea74f0b5eca10135ce85233bde4f050055aea4fd3ecbd86f16630ee94f7fe1675d11f0af5cebff03c44579cb8292a0d08d7768cf5f929a5f894e6bfd0c249214b1d68c088f91464af3e31f0196782794d1e2785ec680d7d21cc428bd5eb197c443de6ef2d72b886d2a26ec243febe8d7a10e63f0270de1b423013083be65c55424616350d21155f26027016d8acef9bcc15fe9cec8d411f8f294f0998b5117092473bc2b5f761d2a86981d6c6e4bd4c1c4485138080b5295a799faff38823c066787b5f85edb6d276cd078750b8c9623478484f07e5889764ee6c538ead82611a68868e150afef0d6a46cf93aa47388a2a82f943d49e84e838468a0465cd6ecb0f0a874885ca1dc85c79e5a6307e901498132c16abccb3a9be23135ac7824836a60ee79803f68b90281a47f2996f9bae6aca84fbc03749ad748b77e0370b1a02d578dae0c026ffebb212046f27e27aaf8d8910bb92016c638e0c9d2ac770b18ca43778d1a8b56f762ce064f8ddde23d72564860e7e049f6f5170e15b8b7ddb67dae4ffd7f7041b484dc09b3113cf1d77343c4cec415d5b7a4611289b0e0be206eb6808d0e86c5d0200c90d97408dcccdc32edf922135bd9971c9d1fae7abecac07572448864ca9693913495d4a272696873c47987e8d2949429098433d241912867c1ac29bebaec29acc61cbc12e9b0af4cd9a35e094b033345b883c9427632a2b4a85cc6784105a038172532c31a80b389e9d999af6c3c32ca39b43d2ecb180f7845a91acd0d11193e7c86bfb6622bdb1c3748688f1a6bdf10fb2cc101fcbf740330c59df64afc8dd66f73d58e4784232cef3b8dedf1d54e609c1dc1149ddd459d786068f7ef9be040f554e544925b425f2f510361396214e8c41e3de5f8949d6764040e0f82b5c5c49005df607ba09f603d77d16853f96984207fb4ef466af4e29d16abacba792872760e402725608ba91077151d1a9d824be949b775aa0271e8e82896782d234bd858921beb0ce72851aa0d3e9ce20de0d39ce89b38585a88de8e756ba8efc2c337dc34c02bf6922c3bf3616016f2d75d38b29abff219dbac0b134c584d406b7ad3e21d301a41d998fc4cc848fcbdb5e89243231d0380d7ecea5cf97bc1ebd783ab1207aec2d88f401d553f3a493362675a8d268c891a035eb21f52082af508f78d71ef671c84fd88b09feb50b65d4972f9895e347a5d3b2ac4efb99dbcd51e00076d92165f6b9413102157f49f200a3f97d532194074ad482a106871e4876f1709d517828c2f43672b0392e6a8c5218c0b892c002857095907d9047ca098dbb2dd1b798ac17f31dfd2305c14ab29dce599166c1cfcf38209824ef4bfa0cccc82a4dc340219d38a10c108e8ff0b061be3452635569b5bbb2b16d0859f16dd58b2ada663f3a412fc0395a6ab04b7bfa2978a56899c031be7aa4cbed1ce3c3a05efdc9f243238ed538e49995714a48c68502ad7934190adb7ee0bbcc6c1bd11a8d0a811f6c0a9fec5379337b39128768f70f70c845b129228413b487da43b91dd9464c52066bf4d644dfd23f1eb767c0a8d7b3185e157e64e341fc83857a336f075eaa869b280e047f3d2ef04ad7a14c62f5f36309fd5351775b23ee75c76dd91b012945e7254bc71f22f06601d3223b8b2557cef83ea01ef147df191de714484c614044c4f5e36d7e494758b5cccf94d1841f59aa375d8303c924c3d4e1b223338e73a81c5ca1a802b3d424df6620141341cfc435c1c16ce675f4d70daf14d2ad368d8be21ba5e2cd5bc98efc5bc783105a995e2d80fb0ba9dbceae628fed6087c566cb5700b245c7ceeb4bd8f29edd6d9e4a4755938315b4c25a29dcf0ff9f6593b8ded3c28eb9a47a05b29c2f4f26392b59e7363ca2a0b65bd322b2fdb35a8e9341b6774288a057554574be8d8709d0f4100bdb8d01457cefbb4a305aecf6250a682f48c27d11a9f6b22c5da86fa873f0ab52a313a34ee3bd37a52a7e639d113f2aeb1b1e293b1b5780893eeeadcc3da0a019c992c0be65661b9b5e4002c939100bcb544a5884811c2179438397d40079870c5363be6ad7a16849bcb13fba0f72013509a4eb7ff960f2285d5dc40c739066387ac43aa024b6abeb6cf40809c9e7f585504c749f71e0e2873b9421d6ad4e5b6f22873f0a605ef14a07b9b6a945197e0fbf94f1045e437a057c972a445819c13a70c3b2cfe7a0d9bb2abc2239c7b8b37c89bb1a1753e5cf186f5bfa3c7e6ed870502410073e2eb79fafe9b98baa1edf716943ffde7a47938d995dc19fbe15bc634768f237160bb790254cc99c504f649887b2563e5736311354a41756650dcde11e070c9d21c5ada731654c65ac68210840415cbcd179b050a3a62188ff98b3e851915562b971adf705b74a375468cfa588a655ec973eb6003261106a3432c656103ed3a8912ecd139870648c94996622463f14e1a2294296f54a452c400d24675731d08f41e91ee913ae58637396faf362966062907fe9807d7b876f381c537b8a35b10bf7ede02323232fa9b363bbee97b0b80e64b9189ca02563c65df9200e471ff8e089c052a308493d0a98637d290d744f824e287fb6a0fc11966ee1d0870f056b0a44705e20c518820582d0c80efae0685cf68ea78b823111089d00a9621f9f97216d4728a974414dabc454d8d8173ab899fb2690e3f77b2a60c3aee7dcbe64fcb7e813137d63414a79cded4bcea85e8e9829cd3193398fcb3346ad852428a053fe521495d54e3ba5a4d48be4c71102c8f4e22b64f1e9cd78ee561775c68918185d235c4e8f54b023e1d3a8a9ae85822d6eb481dc5da8a658aeb7a2dc836d45234f2f59ce92cae5c58543e6fdcd204147f6b82ac6602605b09d861b3c7513dcbec1ff1dc11b6da735fe1d023c7ef41684ec00f53bc614f9fbf475f9921ff0a471fa3b69b2ef18c49b56d15e86b41f0d8f4dd1c12ff63141d41b42e3f50a5d1693e61967bf10ef1b0f44900856166102626277e3ca7c2ad960277f42d5d9ed45faaf0fb3b643f157ea3b18dbbae7f6c5a35d7d198821cb76c73c630399304bbae044d921f31a83bdd4eb2f5302d6994434cf4fddcdb32b419ff730423f2e9fc32680d6745e223e613c232afc02a04c5d7c99a3fd6a4c01ae870777f85339340613503199d81ee8f740ab372c5a53eb6d6a36dda6ea5f8390a27458065b1186c1824ea5384f838a46b4ee914d576a6d100ec8553549a98111d0b5c2c67b5a4d1123ff4a3117addcf739109e2aba5a5459e7839aa03d42f1d84da0f4e81a6af9877d7bd7b09e60b4db280ef2b3158d15752a39de038abc68285c5dcbce87fa93aae915a937ddb3c87b5c18ed69ccdadd3c3726734cdddf847ff65991b9ee51459197013eaa1fd8a35e0cbcb6a96c6a5e0dc2bc3aae66562f81de0da7612768579a2f59253ae299124c2879494882238c258925286b5f8edab47411460c95bde914402b76d8e683474bad3dee476736a7a5ff70705c1522c357145a12614251fbbd11a31eb296f1cc3fd4c6645ffd2b0f6a5b5042e9a073c9fad059b7fa1e63b7db96354ba9bba1997870575600ab36a70765d790133386463c4de25b778870d4aeac48e05a35acfa660661a445923602c853ea8288f004e8da4307fed0ebdf1e25b25a816972449e554bba56dac67ae335c4c197adff913bf05eb3cc15fc7fba7a8e28cd40de4cd0f39ba94a47760c9d3499ed4a5ab3e786a751791ea19a91b929b5f52a0af5de6a87dc0e47a1564aba9ba13305d1ee5dce272de12fda0d9ee2e2d2e82f1cd85b7863a970b4d9f27896a73d82c1f7bb3a14b9f6cecc07d5fb5300ac950f2f368636633389455624cef2bfb304bd97b3260b1cecb123f3eeb9912240f625924abf144e149c1dedd70a928c7743a3039c01851d6626c4a056311e5e203a304b8cd0153ae9dcf741c5337e901176b18a2f732b8ceb722f525663fe055d4acf28bd3cf1915329fbcb0f9655c0ac524ef3c7e328b6238e431063fc59f78f63899bbff30271a6fdc62ed77ce44ec31c6bb5c7d30654a9c7f12370efad13d52cfe1d935ff768c7cb7707fca375d2285790857bee20747886d3770c55ef08ffb7872c0f33c00d03ebee32309cb576b8df2f11d3d8cd94accefd36af31c7d8846d3a619a2070f5430e12aabad2c28843f70937567e0c7ad283f8416e58d55d24cf265136995bce7469ba9eecad7094f5631469caf8839c19527a26e206bbe89d1b594bcd4eedc23a8c68829c27107b488635cc2192279ce21b7214ebb80652ccb2adc18cd676af8ae4fd8c69da777e71949f9ed0e6284c021aa72f4c45379c4347735d92fd0cf5ff1c0f03f323737528abf7ce2623ba358fdbda1a0899d04bdce7781e1299590351aaaa3f9c0828a0e01bfc51f952a8c777a0aa8fdcb853bb40e96d8ac25b80332ef04ea0eccbd03bcc61a6b12030dc0d054ae5873ed05eb2e1e652fb9e445e9b855a0ad19320e83d75521007285ef40323c1652fc5753fdab693e6b297eebce41f1b349f1b249f1b24df5dea679d9a578d71ae895b16d0efc0bca391327c5576a6e94e47d79b670bd4d7f8e97a04d1c9b606c497c6c29fc4c243bc4cec8aa1cffe2930b8f789110107e0af3992a5cbef00745263e7c8ffca4d2063f701b8fe45cbba9de4fd81afeca8bc69c16b000483cf500571543bc76d8b34d64bf21b424e4e6e7eeebf0e5e7be1c32b70550919d38fbd077d565dcab90599ce9b2e2cc2299a7d39c2d633004989c116a8bb89052cb6833784fafe6ad84178bc82c1f795c281b914f409c3fcde9b3bed5690f37d32f9d1b0243639c58f18ee9f8c44024f8c8422a823060b2f77a4d462ace6b6bd25899747b7b9b2a1d37d36534355b4d58e8ef5077b0e9bce3f705b365efc3c202e670143cf5aaa10b6abc74f8f6cb2f27db48590dcc56180f4974ad49f5b35ef422955f9f3cfa1b1565eb25ab393bf57c7caba913ee6b958d53975f514c07ea4ddbee8d4d32d48e57b9a759c7b31703dcf691a0e05379a1f9e99fd0fbdeb24b189f699bc56333c0217ee00d55778b12343de934b8dd6ed1c75af9fc5447afa5df38d07e70493c357940a7fb51805a1d7ec60207f00848601935206f29081ade220020ac0a75859b7cb4d8a1f038a63ba654535a56a0e680c1d20b503ca4ccf22f9eef6317c4134334c8741dfa5e9605a4270159488ccb99b6299fbd770c84379444561cdc15199be815b9a0fde325fe2156e9172a0d9734e1186b33539197571d805f63252360b108e21ee41d39ddf5e236c8fe8f9cd0b9d67e4da11838aecca1602f949bc7d6bcb202228bd7137bb9def02b972e0a011ac87735c9d00671b12e207ed7291b45c9accea46c607bedc3671e1e51fa359ec5f4b53f279a6a784a7f6cffb99f9ed42d889fe615dfd06bfde619949e8e2f45deeb05a2a878746c79f5c5a2774b1ea468b6d0920507c86cf1ec85185fd605dd45102b13c4118d64634ed686a48d7eede74437a456c44f9ef3549c8b3d35fceb6af049dfdf905f3fc22cf1e1bf9ad58d8fb15f4f06bafd1f11853d522b902f404cab3764e365114c6075ef969df26a0d7ce0cf68dbdeb46c6ed52142ebfee76d305641f03fca594dc4961b50699db2d4af7a6eb90bd1854d4026d665022d1ff6c03ec07f604cf08c6831d7880cdccb68941a63a94693c4a30fec3caf919a1d9d3750631479a957f3f720885f845cf340cbcf0b589c4a1d50fe0b0bd6020461cec5ab006b24b4ce88b7d0ab9110fc37a20e88882ffd0cad794ee9618358abe7eb6d0e7985d117bc550785c1979a09a703ebcdd2dcb3d4dbf674efe06210f612b907c95bac4b98690d571804546237d853aa3183d5e31d30ae3ab626c1d6881fd75656a82562989a819d3349016952fb4cf4da6790ba36c59fe48aea5cb858b70ae3e9e26abfa1a7c011664b9c3ec9c7a0cc514a57059b867614ca5010f015ae43091422b717e54678e895144bd51d09f6304c8f4cf42e97d9594453adce4c42831035f8d974773dc8efc0252d64736652c93e56b578a0e68f5aab3e25958a5f1b53f6a834e7d9c8c0dc93080f182803de069df811974cbf83c7ef3aed8c219ac1aa8be443ae12028be99bd9799f87fc4e38450122927d588cd675a25457a76a31976e5a13463e77954e19238331cedb4ed265386918242b3d8dd0e4bd77924d549763f16ec05213f1ad4f7638451e8fcbdeedda32fc7b6551250802e22697cb27991887ea0140f6f687c408d3c655be128c2a8159602f863c303587b8547df2b6a023ae032895e97bca149cb5854a70afea199aa1c671ed12fec3ba22bd3a8e34dacb0be41524b0d9e33f7ff574700f8b7f17b77cee03f4eae154fc1b15b0f8eb3e46de433868a8d4132cbf2a6a19ea8a2d658069c99f56d64fbf66ac976bbfac6d81797d58f68cdbe1a5ff298f6b5c301b4d26fb7ae07598cea29d04c58ece6c1e093be816d77398f2fdf8df74904c681ae3b0c579f2f70ca91ba244a359e7d157c5cd6c88c87fb53b4dc157c142192cc4a958f3b2d0b24f3c6653494c9323065343832ea725a70df0710d0a24811b18601dcaed9d47dfa67c9bb06477be92edbb4bccb6cbe12a6eb8fa1631e1f5e672aaedb8482bcb547ab609b91ab8049bac55e9cb8495f79972b62c6ba4ac647988cf658eaf66e5b4cb0dd916b9ccc63445c8c848dc36931141f293f838f00aaa68e48ff9491d68b9704f0ad859c983398d39db5664cb201b7eac5886d3ecc0abb181d24e538c4f06d890a94d244c9c993973e671359fd5eee9acdca0f64159c73c50d5ee8fd2b9e88a5d347188b432f5e1e41df9973dce0017022d05b4c018e0b416a52a561eef4010126e2cc8013fbe6abbba1c723a50f34616fb40b57e644a411ca0b77e0856bad266bd8335fb99964964f5389c05350471704b5721f2618a48c92fb71a097434eb133e10350d0b3dec814272374d4ff3ca4e1f1bccabfce4b8924702470e33350e8404831cd7f718f2561f29131ddb832d584d3c9ba39ad451483ce1fae1f0d515a9122a018e3582f11202034c9a6a71a0176ca62a115db6f6bb5d667e55444834d5d6a55663f2ef56851bfa211f0adcca83509660c1132218accb9f443d848b8086c0b860930f2c931b8be16070f3bc67d2db39c64847b2838f75d82d1586ca9bab5314044acb9ae39ab93d977e68d55413290c39ebe944e40dc4e1ebe96e94595d70291aba04daa1be0bb0c5d8c760f80dd3953f8230dda69aa10326df2dc904035d69d57cee76a50da549bd9697296219d54ef90e2014b3ac727f5b83c58db08f23dbc1d07909470b608e7f92dbf9932b3279cd15337f22f0fac1130112ef6d22b1a1050218ba5d5fd372059e01864034187d10b31b10b3d8cabe72f7ceb876bedf5f5a4ccee77bf73bf4ad2f12a0915e321d0455ad38c41c82ab1e9f91cf427b0e418b34c44a98a83244dab79a883a8f2b8254164d5a45aab1d102caaa5bb256e8ad5690a0924b84d834a8cd82da03d84a180aba28ae10e0282265b6fa9842cd5ce5df3df57dd719344741ead48014c24d90e19cc3508fac963c2482fdc9962eda66991e155ac592add9ba1d7a19c9f8422fbccce6c3c99a893d00faf938786bcbb57c07e41910597c8058d20c925cdc7fcc40dae79900e87ef3f5f99092073ccfe464f1e1f7b298366cc510c195cd626fa4f61945ca51997e34e363c3ca5a05e5bb8742fb57df8608f0a5ed56261077d850fad2c5c31d986733163dec4653c06ef924ade4800f9d2b59713f76a2fb4e5d327e08e3d2f8a9d4f6169ac8c6da2d466d1ed9b2b1adaa44b2e543eecf47a33e52c2d386d5586e54f324c5640822538685eb43aba35baec564f5721c7638fb86720bb6a5fd172d673e7e6aaf6996cf85dd3085c9034411d4751a138e5c73615d457625d7604d07efcbd5b37b14389c08336a82aee5b17d72fafe79443a404c6bfaf34c733bb6dc8141ba1cc80b98613d617fb315aec4a7347771dbb8b9f6478e23a91ad70ffb6d08d49c485602b1b14a7cf39524702bd416a1c0858a5774617ca952cfc792e6d412666d434be355311cd538a491eb22dba3ef22c2b6cd04fd225c6c85bc70947bdcf05d183a2944a1234c3f7ce9be4bc07eef79607138fa10c39d63288f19dccab87b585b80b729979bdeaf0b45d26ca981fdbaa6d4cd8c9ae3db49666852fb2c5ff306ea6ae5f576b94030521d31e9c13a4df0dc29236afacee35208a1e669f310de0d50f30c306afb03b53a21db6d36d88986c44d6d83a784687b6453833ad3eaa49881685f7261da92929695abc9ac462d83564e3a706759556bb872183afcc0a83a0b3b93b79e92926c86125151b2ab0659f505379c49093f273b2245e13620389430881e91e1ae380bfa1554d0023ae865625b3052b29a0e65b0802c01264208ddf6249d56545ab7552366cc480dbb787f33b52d97632721dfdca9ae96a0dd5c6031988b8703608a41dc02105c8e343513d7cf1aca6676603ab6f770fb38f06839a1a116d2f6de7bcb2da54c49a608066a06db05958bdc16b51a6326dd5dc2b4387f9829aab1d15ea0cb96275cbc205c78bdae2f5d292bf02202e7016143569c21c2f2c7fe208dd3efd379b1f11924a18acd408c71847234a1c8b26ae377919999bab3b333575b639c48f3d52b0f553eb35cf77066c1618f3e95809260524a29a594724a29a56c18aa08461cb9b8285c9dc1368f148a41a0d3d84241bd72202d02f5d8e1a3570d0bea1f9fa59a8ebd968462509258ccc7536f43dbeff22202a949598b03851f9f9f18909f1f9fd88fb5d6fef8008901f1f9f101f2d323d66852445f99854fe99994d6080844559670f4cb96524a29a594d25a2bad94544e2965ebb06c96eeee2a659294524aa9e9008ccc06560a8c1e6c68dfba5bd60b8a98cb4657fc400c2fb824196d59d29ccb565926b6ff892c799104438ae2861c454fd4d1efff427ab97f6a08627451047283c581c28fcf4f0cc8cf8f4fecc75a6b7f7c80c480f8fcf800f9f14cbaf4a0e6b667f7df525b68f6fd9e4550487f44b2efec5fe81fd22bef18a3fcf844b82a6448af5c08114dc81022e08f1e3b7cf40aa6458b160a8059fc6b9e0621c4a565826cedd57b4a78dce0a13fb9c824a0ae7c18861029b255bed19c021d4408f8d32bc961617f749f8f7cf93b1aa7470b1ebd73d353f12f82fa93833c27fb2790fb396b7a23f64f1fd6745fc6003bf863df88e9516fa4267a342ea0def43df64fcf45d4f7981e0516e939bd45812ab7892dfba4857a6e75308ffa1e98478146a8f09c1ef5464c0f03b2db74600feaf428905bfda68f5ba78f5b3cdd1f39997878a76f7b07f2f08d7c4fa96ab0b01464ad7456aeca85a90a8a68f28f322cfde93ff8a64b037550e3786c1793f86505b681629728c6950f8302311a27f6f01cd4cbf7e1393040dd2f01ceb8305be13934dedbe796e9d973526fbf27f5168c7923a74f8134dea7fe48ab27e6655e0634623f06a4f1401a17623ef53c319f028b50f1be088c09f4409e984f7d0a2c82fa18f0488b5bf1b767def1be460cf5f68dc0fce98da0ded6d846a818393dcc1bb15ed846be773d3076e11bf935acb0308f3abdfd30d278cf737a98e7b18ffae8d178a00ba787f91efba867180c0603694cef7d185dceb1c8f3bc0fff16a1c203f328b0c80066e01282a80503aedc46fe0846442d6e516918ec65bef1b885fab8c59daefdb8f5c3f447ae090ce34dc9a0eff449806d644be123d7956f027f446f4b7d6129e88dc0f6471411d7dc17242e5e63bfd813bc14faa2a085edf7ec179b310930573e909fcce7c7c7e7b0b0d9878cd4a3c244b1b47bed7b3aedb937b27d07f670df7d071ad19e03b9957d8d58cdcd7ec526c8a7a10609e9b434d0866de4d3505b390930e1ca472115bf7e8d60caab28f04d0a0bdba0832acf86d992cb9191155dcc0c29966991a7ce4cc3a13dfd50d3b4d73ed3aad5ac6635ab8161ad5956339e1fd994544a29a594728b1439a59c50967474542c57e9488077b4c836adca410ecef1217d78ad96697064063103ff0b2d70e5d37f77972ee3bc2234c41c2296a466aecc06472c3879506c587b84cbbcb23285e6b4414be7642e21de8934b3e2414599948c281be24c34c8e14e1ceefc705ae1b9f8663efdac7c9132d4a0e9c537f36586907597d436f3bb181283a8fb22e54e2fa8e84e2f89953bc3ec756717ebd5fc191aecfc7073a1f826c2dcf83303b77f520ddc9ecf4929a38c7d7265475660473ba8a0d0dd5b8eb8324cc9964c1b542a7e568f67798bebae7eddda3e186d66057bc30c5cf9c516274d28d495b741cb373ecb90af59a74829e39d586c96554b3489b24934896ca5f2a395f3a857fdaf5075bb67519c4c706c43a5e21d15efc8a7d56d8e8eaa09396c4cb0a9c13935547ce3839d1f56cbab1865d24c228a21f958c19c387efcf001fb61adb53f7cf0c07cf0fcf0c1f3c354c58b99648b175b8ab26469e1b291156478f185d111ec46adb54a67adbe246cbf7bbdeeef387a50ab842d1162207152caa04faf7e7ad5f1e303f9e18780ff3c33fff800e955fbfc74f4699c78bb81f4770bdd3f3c87e04265e82929a5934ec9b334a43d823f7cf4cab7d8b05b12122312b252115812cc87b747a01b666739062ab5d6396f05555d18367e683fc638b6412daf7ab256067291566cb53a09a01fa6510d868c88b8082cbc904ac5c0d0d4541dd4ad117e37b4995aad60065a8992268f85148e740a3519eb44d8a7aa718ba6fc298bbc566b3bc6c25844801b461bf39c0fb04dbf0c4a362cddc6c17b00ba1d896a4861bb1948846ffabd244bb77b3201804790e91a427f638c32bac7d59c524a29a5ac4938c91c3233c7088633c6586f6d9cd92ea504a3bb8fb3b33333f3f34f779f93098e6dd02eabdaa6a16ce33c7de6ba0d490f5dcddab7982cd96b65216e1b87637cffd8695a707852e69b78e4cb0844506aaaf801931164f84110165c2c01c0096b2f1b7111e302690b2515eaf2b3e72083472dec005a086531c5ed9fee4ac2b8fe38dcdd5d5271c3d4083c58c282085854713be6091b53495c772a4f14218b201031059293255c54362c969470fbe3111e622d98e18b25a6e0018b1239e4c08b495d08402b41ad5d134d341bb82d311eaa0d42db163274c884ac683730a142aeea439259832b13c2259768d21137380b342ccd242c35740d4732c90957cc614108c96db826f50007166a28721babda22a6d485ede18b9042083fc414343c1116e6b2d1144e02803405954fc11457ba207a1b3fc0c10d9f1505c736c218bbfd128c1c23478e71ba4f9f3edddd9f99e3cf08cecf9ebf2019ec5629e594428610e92211a873b667bb7d433c1e71dbc1cad6be21447ad542c2b01148089121bdea1fb2f1a717256c39c2ceb04337410350105bdc70a508586a200586872d4fba1d4c4dc84087212bb83cf105cb96259e3341050ac228e28a2b5eae1401b5434c18455051c3cb0b103730bae207312760f560861d980650bc9060890e4244b18406475c2c90c116af1cbe60117ae9409b21891c24b2b881881a242185881918d9ded14d846410ea41092520b41022092396643c3cc00450849a50d2c475850caea862124193ee0143a908d8175eac8c81c410b3893351042c08207088c59888c1154d6a745b6c0b6c7f68ab74411558ca50c1165792d8e008ae41184b36a8e1063e40228b11572cc99a87fea8002bae6c21c30932beb0e04760c51052335536115c3f419529505cb96107323882728222c890226025134031220b295390200a0c8e20a1022f576cf172c1acbc002935c3e54ed5ca2a57c6a0580a5a02090f455034115664e99a87c8748976b99994bba339fed96308400c15f8b6b7840d55574a0ff9f9eb91cb201de3ab8909b2fca18a55f16bbc1bbf074fe748980d2b90c0a1736394ceed828e1fc3220769e6c4ae8e8b7fe6204d202efed1d012395c94103685c5bad7e72f9c42da0d2c3f08ba64cf5e0c92e3835882bcd136fd5dd339bcf2c2fe0d75685cbea2c3971bf29117cbb98ece0953bc3ba473289051ecc9ed76981137e4188dc5a2b82177c171c5ea6ec1ed21dc1087b4a9303e232c778c31466721403a9318638c3ca59cd2250faa9492e365b6c102cbbf39b1fcf2fd9db3ac726fd10571bbfb6b8dfd956f6275ef5ec5f75eb67fb6ee8e069ab557ff1d114a1c93c944402c61b22602e67426bd9a93c3aaaa6ee34ad006b34e41d8227e849756911862905addc7905a41cbfb22ac050f7c88b56260268610238756075279214381773af4dfbee7ddb64f876584f5df60604019659411c5f4f28bd27d94d3ef689deee3b68f2f8be312bc50badb6f4a562e1b2949815d36d202e9f6f01cd3672f3bc9f5519ffd346efb88e3b9fa6ddf71fbf6786c271047dbf4490359f1a6bcd8bf39e4e06e713350e763b94dff8c1236e4a31f1468a117d07d47f140a16f75204d7490c62444532b8e8fc585bde2a2721d8c5da489dfedcf59c0124fe3b051154a49ae167c08e74b0ad78d3f9da583a328de689cf85b6f4fbc74a6cb73b449d438f2c1d58d39ab367f9855c98ad116140723ee7c4fd239d9cb0aef44a52a319f82e440c0fcc9cb992ce97031a201c5e1c5374a53908e5e4b4b21ebbe58385e9ee331f19e1c7940c0bc294855624828bd963c57c8ba2f160e96cecf10144e40e22e5cae6e9c5edeb54ba7c5dddddd3dc618dddda94f778f364b8cb1633e30ff6ed7740046e6c30fac8e1d3bba720862071b651307706c239c44b72d58abb0f199c88763458ebf707b4ac1babdb63d7fa87dff0b1f83a19602f30dfd4ddbaec645e1dea63a87036db48dff6682067f083603ab878333b1229b48d101794a5097532ad5bcadb2a86b7927a2aee51dd4f5af7147f3c75e0ef6f9c6fb061c6f68e35bdee96766666666e6c992f9fd7f38933e3066666666698db4701e65886cc8b1587ff8e8c1fa78ad3542118b6e64db96659c941b2725a755ae7295dbbe6e9c161948d3836ff62af0cdc07abbec0bfbe9f6b53b72eb3764de0dac806b927905ca5ecd395fde25120d61437645268de38d9335a0cae0cca0f1fd97759cb0837752301f83ea1c4e934fe3f3f1b5f0f548813b78fa84a8e37209313060a873db8221eb04860c862b0fecc0f039700339c982216b6900df2ce0f6338c617782748c3823ca2cb8f833586037a02d6813da92685278a77ec848b24be37c691ca019d438dadf190e0c35156852f8a689ee068653e8ce20e9dd85bd14621937945dc22944a7b3cb2e9bcba3c43ae79c3229e906a11b92a0c4e87d61fd3b7f61b13058154e0620c5dc2bffa6a45995fe71dc2639d9799ef42fec78d8930723a5fc826c92512c5de6fd0b9f068d4863faf775308c40cc8c2e45a393f15431302a15caaa54aa93cae4a954aaedb5bd362738eb3176d3f8eefeeea0cb65e4a434be0fc5f7a0d839a9a4557644377e96a5525615b3b5d68e4aa9a951bd924af3a15bbbfa5cd76f8f470dab07c57a9406a145b657f4a307e55473fa1a6b512818184a7fba6799bbbb478f203f2dba0118c3fbe28624ec2c8694724e4a5535b5569d1390b4703a5da7aa49a554f6744aa9ac85818189898949c9c8c8cc9831c3dddd65475535b194caaa6a52aa9a544a65553529554dcada946ace4929cdb255ad75dbb81bb6529ad9b09cf5cce362352de571b161777bf0b8d48eebba21eb3e7b82a13479ca761d17dd7b86c87a103aa775f776b7deb9c6715c673dcf33994ca7ee746218989818aeb5eba638f377d5360e43719ddb7d5ad9ed75bb7b4bef097e30a4dcc1b0b31685b226eb81613d306cd8c574168a75073b9fe9816d96e9dc4fd7c1f002b7c1b823b6025b35558dfe1112877c07888d9492ae732ce87a90f864be8c7dd447d7a30f6f8185155847f605e9550585b88d5310035c82bfc037fe4e58187e78a7e6fa0b89364419aeff9c9af070dd456f02ff512ad842db340bf2059aec7947fbe8a1c4bd7fb54aee6efda32bd3e26313c28651886502eff0e5b8c69045e5644a29bfbc0c5ed68db1841b85b884fef0660000b81129f46e7cae1d1806891f88021b3f2b810da3900af14814ea4fc69bc291cb1acb59b23e0dff06648cce72d6edf92dc69cb58b418d13659617959169536a7a0d073bbf67dc21342bd00b86315c06e5d2ddd19ca6c44611aaadedbb8cd676a3716eacfe55aa2f071bfe0ce5f561135d5c8c1340972cfba1062ada44e07a649bf9dcb66994526a81eb1b687d08deee2ee79c9e65ef31ca8eee3e25a5e017a63a63e618dacd871924d6bd57c8ccdccd20b1da0c921924b39b4142bb1924593783c4ca1924d66f7c4eabfe838d1f767703f10e69f619247688cf20d9ba4d0533486cbc33482c6b937651aa1684f7e2e2e643337b1dcb5615c57639a70f72d2acb6367fdbaa6f7163e99aee1cb76931e9dc80f8c49ef8a51f8f8fec495f5a9fd42727e83cd1919b104e409da0c718637cbe911bc77dabedff04cf91eb4c52b26cbe1a5a2a484aab199d9de7db945bcde8949ec98ff8a573f9592b2d4a29a35633bab916dc764d27af276e73cd4801c736c28eed92c51de77d03798e0746a00e64079fe87022ac57bd7d443c8a10540d9baf003dcd36f37deb40bdf4d3d8667e7c2243b1cdfc6eb00253ca96524a197933493a297529a913eac4afcc9c644e6475529db8e6647330c88f13a5e604cc99d50eacc0f58ff3398d3ef79d31486d8322137927729d496788283f6471492a484a46c773b48f400cc4400c04c4b106ce89b0b884827188db3f64544ef74c5d112c1ef5eadb1ba881600d046b205803fd8cb014b8db404e804e4e4eddc6694559cbc060abf57253cf668f9248ecdfd8527db3e001adda0dc58205bedfe60e1cdb9047ae8c49752c226cd3bfb2f94f55c3a2765297ff0883618c723bfea0e2c12ea9b186d52ac6bca418e3979c1344854e97d73f0afc4002e213f3eb5f0b3d8e7866783e4ed0d14161c7fc70a83cd634510cf04dbf0743bc3f4f946850b5a80cf18410e904f4cff024122b6fe468706f2609722b062635a3e3e768a8663a43c9782b80a9984ca56dffa1c0ca44fe18189a8ebf5696cdd1574347ab497901bf7d351fd89a0ed60645c2075ab877ee9d7be7aab0719535e7542d44d5b89acbd4031baeeed378345efdae99bdd5bf8ab2d1abfaf3b3d7fe46c6a1093703695858dde93dc09fab345bf6d25bc19f0369b6edb7ed6f7c35206b3ad8c87a3022c2942b49a64c4925d5b870e631a464f0bbaa8413fb43e48d1f043785843c46d0a710ef643c7c04354813411a4a93f5f7f83768840a4ffdfe9e3652bf9f5b1e055dd0def434f3eb6f4f13c19efadb576e6d200dfd13d1047bb437bd06d2649909a4a1208d0bfef57bfa4fa011effb2b68840a8ff7fd46baf7e7568f6684dd46f6d437bd093452dd464e24155677a2aed83aab6449e692dbc8dfaccca25e4998267682d34ad1d12ce21b94141b4ed886c43bf3e5d665e95b8a2b804dcb95e116bbe14afeaa05c03806e0957c49c13a612bf90688eff3b3f934d984c99f5048ee4abe39aaa5a965ca95ab2e96ae9441199df26f4829b97a44760d02c73642ee75e773a6245276bd925c445db332e14b87a4e46ae3d416a74c49a49414883ba9943ba96b7ef6c56e1797a2df0a692a651fa5a7e78f3f893cc73f8653ca8da6eeee7f538b14cf3175139c4824a4801be21d7a2748e382bfe969b2ef3f3d4d4fffe9fb4f208dfc13d1b5204d06f6f89b4ccfad909342bb74cf2e7fe736b32c7c33fff4d1229912d80d9c48304d6c6dff52bd9abe729b3937f051576c05e9d05394149b8134d9b3aa5773d2d9857b125660e7d32a9df3148aa659a6c64403824aa145da4f97dbcc57a9c00a86fcfa70881aa7ef8c4d2512a6d8fadcd7e7c01952d7eca257732a4da4a965be09c8d2ef7a359fe536b5740eb74c32d8f9d1e351e97c9a5ab3a7aec66152e5cea7438dc3ddf9f4d538593a072646bb0071e78774ca9db4e84ea2194cf9ea22c50d29963b29d19d5f33fab590650ace6c1bdcf9ab2c37bc71e7e45e2819382d2a64efa06bee1aa785d52333735556596595d2a57c8d8c181930328ec2602c1f2f71050540add30eb6ff4464b9676f02dc9f9c586f85ca0561e9d5ea3e6ecdc46c685bfe55ab1e06715082f38bb2fd06f28b552ff58135afe11e79c814e8cbcebac894842a7fab9511ac415449219dbde8b0640b911456968041cb9dd0610aaeeb003e8c2f6374314696abe2c668c1eddb7dc471e37480de17b8579104eeb9410e3f15b22eeb5cd58de2898a4efa2390609880096d020c265a1333a440047dc394095eaa30f2b284972a920a1a5248a1194620b4a3dd5dedca80daa518b4c5a8a2c3a56328490ea11c425936e79c3387cb8ae906da3141f8a29b52450e0dc1400e3173103333771095ab5dadc4e5449228442917fbad0cf6e2fb17dddda95377774a9d3a6da7d4a953a79452a79452a794d214e29dd4d59d6447d330882acbfc67c711156935c78bfb4eb5d6ef6618efc7f574fa6eec660e2539847228f142ed4d2fbf5086d3c2fce9abe7a0fe048643e43d3df733a7b7603d813994c412ea2987500e25a7490484e99b5264209adf3ffbb9ca24cce1ba358712edb33587508e2439841a47490e57ad395e37fb6a2783cde1ead5911cae1cae22535325b8a36c661e04b81b63f420a0a3090cf1c6ded15547138eeb66d1dd4c1f5ff3dee4813a9a782a789f3dd7c4e311e76b62e995ff3cdafae517ea78a263e8ea68a2c3d538f3e3eb60a2c3a5a3898e211d472620ab6348c7908ea1d9c5a9850d414642502c75e19501a5a59cb56adad0b66d1c4729a5d493a64a4f276bdd524a29ad30eeeeeeee3533356564522919198f4783fdbdbd68f494f80955b755d755b187ae5fe5386efbda11ddeefaaf9c35e5faeb9cd0394193880a26ae572237a262e8faab6c6c2b346b4f3fbb13c83beaf100415108806e7b3ca47784328db429a56e4de1110ec46e83617ded63d98155e2206909e8d6d73e158bd153217b1a2f05e29d17bae7be5e8efb4f7bf1cdb48183f014b39103eb3de77d369cb0dd8733b6cb49b38efbaa66ba3b7c26350e0df74aa6eefc393463bcc3ddf9da8b7768dc39378f0737e78c3b7f8604d64f3115b83b80db81cc371c183e43a05ede88ee7ceda3610194604753b12766b44072ddf75784f69d9951c4baaa9a9afdfc1b9d097c90dd26957d2a2e0b9b5da894adf824ad644a680000004000b315000020100a87c3219150248e436df50114800d6e96406850321707634192e3400862180619630c31c000630c410691a2aa0d00ffe0ccab8c9176b2dac0dc7f04b15d3d286aa8ba43390d5822007e77885ee5db24d68ffabe04e229bfc2d5d3e416ede6ac302ff663ee64507444d876a60cd4054849c917a22db79d7ca11cca17e50b79570b5b7a25a87ce19504ef031b48e22e1ab9b8e3daed38048d57d441615a470d6f65b00af3ec903f162acf67517542e73babc83b2f3c4175f02865dbd748e3c00fee0af535a4e2005868d8be9eb4b80cd835aece6e9c50778e16d7a1d247003a7c316d71bc4420e1179fae2c8a989ec4845296d9ce5c1d030828e04b0ffc5eb016f5ad48b11aeec8f607a39ef13c78d657e4f69382dc3fe7768392c47608218c58b0a2a3db138e97d481bd5340f98d72316bee70c536bdd3b399a1f82665c643231567cdb0dcfef5f59c7dfd5e7a7faf63d3abb4a81fd6edc7defa6ec7d0bb380aa2cf72e6ccb8ac478d1c52e5f2d5deed1006ad57fbd8b917195dfeb197478ac48b45ca81ba99e1c766de7b98354930586abc68d68784d4ff1f1708860ed2e9111c73af8b934a64efb6dc257ac680cd7f3feced064e50b7abe58cb6b60785360a77e36a56601cb86b33228a2eff6adcad3fb102608ad556813a48225926356cb2b937159c08296c12440a805d992d6693bd33f6c481a244854038d6ebb3796ab28d2d9b7db3e6f1ed288eaf4037abf67d877a448026a1f16bae67602b50b052f8b502a8626bf0a87bb548f48e41c21c6f328dca5bbc70f682d870dd16d4c83070c78bac1498da0bdd0a0a4b673e94645c9c4969b48f1d35803f85f1adbcf1b0612899efc55e2c8f5623a4eb3ccfee2fb514dc5965709c95e967713f778563d389f303b45d620cae1a7a02cf6331e838d989f3e0a644451b56f17c5c5c1d337b7606c40360f6744098856b3db9063bd61d3bee0862f350f94e9b2ca8cd82e8f1971d8e482889e53b384da41264a93254be41c2ade9deb0cdbf32ab55e61a223800d6aa736561adbc31c33bc3a19622e9a07d06fce8914659b61c3f1d68b47b8cf3d7caaea442c6bf3d4b3791586d6803bb3a3f8f878f5ac6c3dda7c39b968c54d2ed1f12023a7dc21e3a74accc10343f47c56e509d8a28085aa31be809f6f357334890cfa51f483005fe91b128462bbfe49f0db6956ef710e878b76f227ad07732c5f8aca3c0784d73ec248ad38f6ca5af88117cc31826b6f375e07b77fdbc7b6d434922dae40c9c5130d88a5601d6d486ba3f57543e281b7f8181722f601caa6af7899a1e858be5c66903b94460bc8e198abbbc8eebd43ee82297a538c5ada08326f8b8ce045acbaf8e27bf003db21cccb3056e07c5f2b5b8605f305f5285b8f6916156977de5090eaa069e4b1828ded3e1af211825da7f9d0ba55bdbdcdb68c6170dab99c0f4a09515ae557272422a69c02dbd1c6f4bd23465e470129761c2df7426e61223d3651c126ad478fdd9499c00f6948895570d40610db496c247a154ff1101646acd12d5b2e5b1c991ce162951a5c30cdfe279569a1e1bcbc54d71c9f2a5b14a62bf45aa9a5f718a75ed80c70b8b25d2b88820f863128e73831c234894447d0c6e42ca275d85a8e7cae967bd5ac5addd1d0c97b5b2991824bbc368388c97aed14554d925f1ea833387c2a7ca064a6312841cf1cd431b160d8665fe59e41a0f5d4194843ef3d99d41a51219272a424ce203a377e2ef31d5fe618b44eb7dddcf9bda0f3076137a776aa37bfdfb430d7eb0b3a0c75e21150f9589cbed7b2d86a08a099a97579a752e40b53f5f76c8623ff943aef180cf34bb49705595b8b5ffff451c123221b61d6d333c93e479048f3fbed1b84e6631c09724933125e6b03c0317f633849b35ce6d7bc35d390be0df28888aa20beb7569f0c32edef2a3949958272ee990d9242e389f5aea4df7d90688acb62c726c6f69b8b3211411b284e5501626df54eba84e5db979208e5f434cf24c0191134b5e182f25a492083c0ab5585afda80e00805decf58384df5b5a138e33380375cabc71bd9c344ad763d7161c4d999388b9f690e8d4ade06dd090294c445a78d556497a30c3605eecc97ad988bd1053ac2633d2abc8be6b43ea45a004195d2846387d5e1f3c8d2f104608441fab2ae58f0165792693845de9547fc25741366f7ba486d8b1797933aa27e20f1396d38b1608c1b1798ea2e8b78e16bdb591fe4c6dfd6ba90ad29085b6ab229034aed62883595a9c251ff5873e997242a203f2fec87318163db436834388cf1c7823f329fb5608309f169b0cce8d04db9e40f82cae24462553a4872ad7e1dafcd05ebb0d61c5912f25dd4a867de2a15e6894e056b28df727ba80050e691dc532bfdf457b7fcc85334c06d30d1fcb57f88367be1a0e433e6ea2c21a0d223d8d03e3aeae82711993f6672722a7ce9b3ac5c05d7f22131a143f46556a5ba69929abc214d79ce4c853b59405ecae314b84261f716fe65bb5a6b96e7ea28f2fab354564ae997861cfe0400d636e966ca50f49ba948783e4478d164be185a7af4bf3e9cf44e44c8b02c854aad008342e759dbeac9fea174396cc2473044625d70cc83d11d5a6ce2f764cd9f875bd8fcb2725a175cd57ec0f2c879bdfd99602dae66ec10e424fc5cb8aa6e9bbd061beb3282738f3bb39a8811e5a29b5154ea66c05e4ec1dcc6fc49c8e9274c021e75f1a7fed38bae859b5de3238e6ebfcc2b747f62d095c37eb6b0dafbd624b5301da445f55116667a4d34d74bb5f0740cff48ca9da930357fe115a149aea6edd60441528c8f46c5e87ee4a8665db49341e96e125adaf37a5eff6312b6148c929ff6ce2244b391659d696940d75f9be9f722d053b6ca4f7f83772d52176004fceb7e4239bc6ed97d936b702b64d0a827c1a97a478f39f06c249558e7f56a29309711fbba118e018ba0cfb1f6d22862be3252921bca9397b0880cc7dd2a8779ba44c2de6283adcdfcb16014bc8df3135919caa078bf649717075a0f6c227be8ea6789895dfb12a85c96b445fc299b04f9354bf44e870a71972c0549f326615570bebd139b7060eda8875a242c5c9c3449badee9c1c08662748715acdf9fc5943bea9e34e1eff75310abb42140dd3e73276a7135274051fe18955700eeb5235585021d38c6f5c9e41e9b5aa23a343ef9941bfac62bafc069d32f4f70c23c555a22884d2811bbfc7ac2713cc26de02c7ca00c7e574556f839520ccdb10d39c205ec36534ab21ace4e138d6ac34857614ad9931e293911a8e4b2c87111b46b1732f1c6a0567e938b551262c74e53aade99e06c05be55fde979bdd8492e4a8daa2cf82ef67512cab6345f40487676653b3e3dda7ce98f3ff29d5bdb5cfa9ce1484c5d660d1d0d90e1120407fbe537e9d496c203eddcae97f17424c8a2dd8fe808856971c45c336140a0ea24acc54c7b6df655a5c6ef8e44f3df0229ab6271c9c68d9394b0050f38181097dd2e19e8226984e1cbfc6ee77c00b021427274f51139499ccefcc5f40066e3b3e758a3728b1d67204165f2034930b9c963d04eda615f609ff01ca36c16bd0bdad3f7b029916f7b46203ea04fc7715b55cac1255c0ee170c64ff2b5d269cba1156ef43b18e9c8183d3a40687fb6c4165c774286d62a042a5e0109436921ee9063ce2d4fd938043c0ac53e44cd5660e645841d8f5a64009ac8ee3cfef91e27af745a292190ca3ef4a50fd391e7f863fcf562b34b3b68db880a9d851284377609898391c4d6b19305aafc16c212f23045d9feed0366a8295ac3b48b6c8046141a10c1d188111d5de7cd67d48a83c61d84173e58a79c62ec4192808322894e18201cfd33733b3c27c10be90b837fc3810b69a2435d19b24b9ebb202805a0a60193e970022c347291db464b70c2bc2f119fc93023c481d5b3b98380d8011dd3e03c52d9045097cdf6c9d5ea0bdf4673e49a7706ca1c6207fe3a27dd31012720f1be7f5770814a4abe963507c74337587eae014174814744ec48c120b096a182f33f878169ef8a4d4055b86cabaccd81e86b85ffc4139f57304ee5e36625b240fc00bb00d2cab3f0ae8021c28bcd4a4c25ee2c7cfa01bd410ccc05198c42157c170bc6f736d3a020e18a82bc6b46484daa27dee4d81847e418cd9c6075d4a8a7dc7e327a51fa8e575ac9940c9f179c7370b94e920a306df32255913beffc35510ec9d36d871378f7dd8cd95a6d17d5d661c07c43e30cf719ef5b2640dc52aff427145c062ccd26dc94692e01ad54020ab0c2f759670a1847eb8e4f9e45d701d194448a1695d5230daf75644cb7260c86f87c67bf7154439468f731fb5c548123d199e3b7e1d92b6f6a50f584e828bc2836d0539de8aae2773d1c788428d870d006ccdda98f43f3b9f38d471471f4fcb7cb8e3d2d2e93ca74ac701b7999984d721cc7d013d5afc552e62a00ceb1ef1a2ef16b3eb5b10a96814367b283d9b3b2f02c8687231b3415987349ec21451a08c0bd67740ba8f93d09d2fb790e1bcecb56d9f9801eaa1d196cbd241d9a25323c23ca73d58ece798b9a8774bacb9d2dd42265b59d6f5c940354d992bec4795e0ba33f617926b8bbdf59247ca744b0cea638470c7b221624e6004cfc564af0bba1b34625a4fcf7ffd8b28fbf072112e9889796976c37736adf445eeeb1a84d697e5fae89ed91b2172e9c6416a204b9ca68fda0a83fb00c429f757d80473175672ab5ec2c77c54673998ab304a664f8e078ca9af0872b11029c4b8010efc720d35b378a5aa321e2b608feba4220ac028adde00a2929c926ed85ec88c00d33b79044f135f007c91f43822c0da74f86922d66d1b81a1e99384b35e42e1c2938a39458000d7ee00ed8acf80d0672810ad1f507e54aa6d61ad4d285391f61b6b10c6cc53005cfa72e789557d30e9e661de0c79ebe091180379b2b53d42a01aa524501374405910aa7fe5b9696ff5197f64f135541ca027a639c19e224dbe34d656a80f5909b2e129878e465d9d4194be7788430d92432954eebb0caf7573467b89cc9dfefb46c98c66c913b52579fca24fb599009ff3e913217b15de4d24677c25fbd395a35ae254deb5893cd7b9d74c946b337a6526c933596d02cc7a4f85c45c166c6217e4d29ac5f87559b61ac3bb9c15c2cab7178cb6899006eeb84d7a8c2435b9d223b891366b26570725997fdbab8b79d7e890a1c28b86e3693e5c8262a48bb63cc67bfcab7993dff89b00b805020a94e100c054cd7a863c0bf0d308494e07ab84862be3323a9cf9f1fe5cbdc25c6346842e9dcb291373a5a4f30a22d39d005ba5a0b2a35d41a908e02b9822e0c60588d8fce8285d05581936eb43f61545acba3878a1c6fbb5626525a567cc695acad540cc15ce9463a9ccb195d25ab7657a7dd30ba2518452d6b710a9d844997612bcf705d18e30d3d1c244a1b5f602fb8968b9deef00effdd236c1d4bf236190b445b5b2c4bf4f521afab098b2d14964e00bad524dbed11c508788759aff4c7722ef32006017a686a91758e7be8f0774a3c395c60f8324b360c61ba20282f8aa5cc28017542e23436e0684e6d2c0b5b9621ecfa04d4a541d2ffab14ffcb2941c9d42d61c624b1ab152439a72ead415ca705a549f8c28f488634e51b6d62b294f07ae47beade113e8fa4dc1bf94507610c3f0182789455290e7f1494a81795e2eabf8b562389a30acea0e565d2d11b78a2b63c3ca03c17b18160f1f4623e7a2651c7c9c94532f916fa8a77bc1e13597689477d4247e1fd94694d7fc0ac4b7881d4ae798e8248c10b5dfaf6e1c2fd7a45100ee65b4066c645458e90f33afe1bf3bd7f812b3f8392ff7e949f965be4e1af7ad139dad5b292f51bb1ae9c444a457e1ae4394f7cfc6295519888db191f202717588822fe4848c3aec0d37dc09e2cce0770b775c2c80a0d7549feb8e83703ed4e59ace75a3c4288c18d3c797c9a518b186f850f0a07ba5078073428d50b31cdece630fb81c160d18a2475cadb8f8ab631c3abff814f4a21eac9c07f3a1592f67a13cafc7d5ed54b31617acf10d011ea3850d6ab953b7adc010f3d19f4a9ae1b53abe30a23b6988e36bfe56cc80101363496ab304cfe673914fcaf4332bc2e253b5cc3ecfef074a3ed31dea2a46d098dd508cf9bd591f9179961fecef463f51acca7e0189ddce57843994467a00f846066aad9866916b82c5311e1038b3ccc9fabf03f1188027fdbda731943712e58d250df995f7e5bfd44fd6b3509ecf80ba584d9fbaa28ce870a1363d78b0683d31e60159d2533a929c2ec94f9d1d10ba1364400622734a9cdda9f33c350d41d6dc2941e3cf64829788ae7bdfb5acfa90cb382760cd613b5160e2cc47d19ef46197bacdd69cc6d48d12bb3577425834af5c1c94647a74942eac49b502e2952ed9acb0bd99c17ee0a99bc485a03688603083b0e4aa736047c1407da2b396bd041eadaa4c0e76d2229ea37321bc50390c310fae24f346022d659b44905452afa19018ba418af00ec99f1144763fa7a2183a0a6cbcb3d91de995397c9b31b7c14d0b3557fba3d8345412f080b5cce25728f89d04a10a1b125d12dedae34c887c8993e550128455d55f8aec5412d59255a83a03dc81d3595e2d3f629640e9b4d80715f5fb31772642dcec508c2f4b4114bf2e8c9e75f15fdc048c6d36b98ec44926450f709d9cadeefa6d29be099255923de37ade043bd651bd5fcc84c052d013304502f4ecdd45f9383c88ee5a9ba11a71fb2cee059cefcced3660a0672f385dd8333f30f01a16128ac57e0fd21deebf73f1db49fd1e82fb94cbdf535ebdff110af145fa696d6420706d4fc5acb554623b31da44e682a0d3b249129cba1d751cf51fa2dc4e9f3287f5204903cfb68da92e5e1433e3a2c35c1652c92a068a59b3377f28c621ddcd726d03f7854de46c865fb6a15442dad4f272b3b1b0cf0ca280daabbd8835d052d36dc4e6edeb15d1ceb386b6a754d76276dcda0bce60eaecac6bae8803db1b343a2fad44bd5476cf4eb2e691e3d388c02831c6d7aef55f469789895b952940fb11bbb1b75f2b940ec0f28cd098eda028fed5be1175eee5eadfaa35ea97b3d469a5f2292c76e30d48260054fc27b16941da5df8c64e1c4ad0c98b1b72cca81a731aff33810a5206961e71aa5881af6bf909e886ab2af533e66e4c57bcbb49e656556f354ca01308244e849d60704f07dc8d47629193a8084bee3077ff7794da039ae06db8f3784872953dc25260f43d15eaa286d808e9c4ed4c32ba94526f1bc3934b7c654780c5ee2f31ab77e0282506e4c990c71a34fceb5ef122290d10cbb514cb7e7f3e4f95925d13255b0171a299e09b21c1158598ec8a8bc70af77d177f6ad9414c1e34749ede85ff0262722e066fc76d95c4531b9678f1753aa3df20de573ad1a877eb88be59dccf9fc324be522a50d9ebcd5593989a1b49cb36edb1f90cd202e5766ac29c363bf478368eb29c30116da9270cbf9f4c1866eb23a632f926493e2a7763c0a3fad21cf9c26404cb6e0ccb29d8ce82689255a885ec8e5898d4c5eca8eb4bc26bba74abe56d60549884fe48089425e614267b5ae37a1ca6c641b29547dd0993cdfa2b9fbb122d39c4844972cc4f1a4a44f48f0ee4881dd7e681b41f44d8336fa73b4fba0cc122306772b45ef4a86f8449998d7a4573f4958830a9bb9659f7036ac38e9e18c7cde50c26598290454e78f8ffd35c50e2ed7873303975a7f9a97ea3d41545e19d001a0f6f6a2a5696de49f987f614a8cd1078e482495b736a0e2561735e970766db8ef1cc74b51b6851144cdafa3306eb8f61ebd5ca1fb4824b1799afc698c3a135083dc8be25c57842304914f71a212a9656c2c9dbd989eeb62908b0b0e1a87adde9374e0023de17da9fb8179884db62a12a2683806a081f708544259ba1ff2e400a4c4aba26a86f886857ca3498d7ebebda16e862455364cdc322d707c418d252ca70722104fddd458fcc0d48385b46dfd2ee68e6778f5850a062be5ef608606ea9ebeff7a03f6032be54d8182696227d8cec61654bea87fcb1a81e758059aa07ca7cf6d6a45a64bdd963641d2ccf46de93e6b32d4a127ff9b24de2bb1fb882d4cb6ee52c7d481a5522391932154cce1e2cb86d0df020d8783b8a275a1ff371629d0026673b3e4285bb0cf7d9d79a0a057a032643c76ff1df95e729efabfdbfe4263b3941f4222dd630c66d7628cdd61a86fdffde9662ff2791bfe4f7a6f8cca04f94ca536fc35f521636e494c716e5c87cda60a20b89721cd8b3cc55ccd182d9cf95b01ec293924c20c6a84d5fbc4071f06657aa077bb43a2cbdbaae1c6cbc6a6c402a48b4a4c8189af43561b3a2d99e45f80ebe1e40824b26882c25e5829fc63b0f595d673ac45aab2fc0c6c22fb864922214e94f90c025e9dd607479f552e24c5dc2f85c87f748f5df92a608f9c7479ed580aa7192ba70758c981dfc4208263dff6d751ad6aeabbc182da99ecb8f979dfa2d790fde6daf72c6e5136fb1b76466160edda1b764704ae1777ac11b99e0054db236068e44b027fef423b4af8f03f289993703d84160d0edabd99809fb268ba7b772b02859a6b1a8f53ba06c4424de90da3a97cf055ca6e36f035074b5ab85b8ec3bb2158483cb64356a4541c0c5015b0203b084658ac82dd9ce451a082aefefad1bd48e753ca0df361cf66ba24d3f2589efd6f0c304e239b9a824a119604beaec0cf8bc54268fb2a3b0416a70a71bbe3d8a3447bd58683e83874b95b500ecac42c8cb0ac2949b7f4c272d67c5a7a49a117862519c58cd4ea366f46fe77e8f6d1052fc5acc2cecd303ddb953a8f4871f3a3408a1dcf68bb4a8caab6b36f76d04bf9d7a00018c451f2fcc14229fbca8e2be58c1cc4bc28944731d9124f7dc02b5601bc1ed87e5f9b4e84ee03988d80be45f2707ce212f5c78ce89052c0502694e91a93f9d219edd54a919026334f4f068087688dc40bbacfbdb484228103fa24e011d17438628d2db03d85b872736d03471cc8fa2377585c38acae6e7923fc0b22ff8361e7bbc90cc0a2c1c775758a6355d0fefa32dac046022decccd3d7188c585d6b7a78790af160c8cf021da7fc362e4025aab7445fa2aa7d7aa4315dd8243839a9598fc3b60a90d78440c90101362cc6a6420b07b212cbcae9b6110fe554f6c6bbaf669dc9693bb46920980689a77d5f7a0ce312c4ab307a18e2ba8312b736b01477a672ae74cb1de6925797603b9472444da50ba28f1b90e834915dc2e7bd8144dcd8da3f801ca2d1a9882855513953b99fc48d4cffa005bc69e2db5eda68d46fc3bd00241d246b2f798aa175c466a6ceb36056acf556dc254b58d872c270710612aadb9f2a7048a479ad18a34d808e9cdc8e41a48092563e7226c2c1e1fd0cd2045ef71f90ccc5405988d4b2ba5542c56472ec5ff1d56887fb7d65c27c819fed2341c3f9cb3420096af5e6b6d5c38d3e5a9dbd856f21dad8730710aad07e61b2179aac91a701c3177dfe1e819c0b2e53bd068ca95ce17ea2037e2cc578d4fe3975ad187a8beb7e7dfca56a0c608718927f496c243e18dc8954a8973ce7d6e86bb541b4bfe61ec1e922cbd25ab6d80982e6b7a22a515a0f01f409cd3d0966f4b1fe1980358c132d43cf4407155e9495b2adda51c7a3559ca2732aab2df3e4e382454ea53ac6c2ac07413abbd2696c90ae1e6ce5a2bb71c5e75d2071a992a65a7ddc7f88f94157c5460bc8da0700ac4cefda332dcdb27dfae2c902e3e54adf646f0da46f0ea19658bd51c07deb653cb9082cc95fcfcf19f4a6e19cad34bc5377f09dd65e93779876f0a2e63106842957442e9795829667f42d9f032fbee9b2fc6ef3ac11b1ec31d1fdbab2fa8ff80900b6c6e67037c98cc12db38890de9afd696fb3ab4c68652759bfc8a0ce9efaea4cdb73534f8a933703fb975994649f7325036d728305cd41067a0dc4491e4260de32db6e17a9300bf7921e8159dd093578dd621ceaa8fdc23d71e492608674cb327141144982f3bc46376a9bc4e68b7e77aa190570eae84000007718c176ade64e5a59895b6e1b60a7d41a44925d6f38582367b8f63eb3857708471e524441d883c24fd9c3f51f8ff01d1ea7fbbf090e07a2112cbe4d11d38c5564f405de597696ccb2e3859dcee228ee493747ed16e196810e5ae8fdb3b64c4f81f4b5109a7b7633bf81582771a1de2f0bef400d10aac361f017d75e081d4042a973a044fee18b6a8bda988ee1d729494ee0b7dd7b846d21703a5874986ed46dcaa92ea0e82eadb529bc298dd183b0533ef39002baaa5ce6a0d80345174fb760a16a9a11058c8f06a461f5376d8ae466fcecf143d3b35cc12384be9649d38f862858dea067ea18b7cc008a1a47bc123d7ee99d079427644156a3cc8c09d7f804394293a9a1a060a0a586c8f43f5a0f7c85850c6243643823407014d33fa0a09722e64c0a72dbb9fb5e5fcf40348a2f318a01308a0b218b7acc0fd3c3157b1dde8d45d7c16bec5542afeeb7fedff9cfbd2449d4548086af49a67afdf4e78ef33a9041a3d55979928107e2cb60cc83bd9a675f1bad792da471e90bb653ee77b39b9a81b862f5509551a2987fd57060ca53c6c098a2c114a0acc62e55b89ca86bab6f77e8a69e1252520775515529b1bdd6c4b210de8291c7d0394f917c4a3cc0850b44312e00bd21ca8466fbc99df19933d01fdd519593738c69382d417a191b6511db0f75db125a66a0b671f784806dba8acbc4f199933119c8ec8cac15b22f501f58d45299c5f7135c133ab7d2d547f8ad00a94ee5d7a77526db62e950b6090c711536654c8337b68a1e6e9a7151801ef9a3797885e63ee1c8ca3c62faa4a45c87a845b78245f3b4219756e367b818a7716e0b1d196043ca22c981c5aaa7fa79cdd20a248548155361570fd1d2f2c9e99a380764420b8e742c1f8a36b9b81ca5d16006e0a32d07c17cc1f0ade477628b933a4a569572450bf1e86fc2ad17faaa8973f0cd075b8fa4b9924a3d2f2da426b4c93e806f51ce7186be0b88ba6edab63c21d43501f72a42b1cd46efde36953f2a7a745e2d36fae902aafdfaafc94e87f1a59ac8562a4d49af9fb7c64028d7663af1cebb5dd634375ebf17200c6da9b442e0467946a74bf61fe2433a9c2c426d0fd240c2a76109041288c3d0818ffb28145978938f1a9ecf4c08389834eebc39751ba1a63f4d56dd0d4cb907ed00b1dceb2cf6f14789a0f405986a9f68a3892ae8cc5642b00626f082d544185e3041926708a7a13c4a05bb4bee897029a7d7db452e2848270311573399a447e58f362cac51059af16a4a0806cb29500ee36052b5b71d5a6088d6be342095c8d85654d139c28ef1dec1c18bf64269c30d6be3eb47edd21711406c8d2b9a71f81228a9f73dea9bee3af8a3e5ba2ef592a3a2bfe8913cf51637471f467842cbb38bc19ba3db8a790244198d89a3e23c35460025781d2455b05f3f5bd611241d59ddd6a37b1ef7b695f19eff38799098db5fd2bab15c066ffb083e21617cb8e705011fcd88476725f479682488d8e0bedaae78d6d9989d427ec6da19a536c4b89817cd7f765451b1287bc80c45a2413f462417ff620c312474ff2dc2c31676468a1d086585aba0e1d2e8d710d778064154bcdbda2343000e86f3809fd5ac8b616ad57762d72b48ef096b8e4fb095a0b582c0ff68032e300a829e60a0c877486dcaf82736c46a68d157239f25f4fce023421c43bf10dc8b3980b839ec4303d905927b99a5b523b0be53289bffb1d5e5bc0a3fec4e9a7a1f52520a63fd4f697c777a0cc089d0a9fe48bb989c1db8126e0071d80e93d04d49b7fc9d28176d66188482267eb96816074c6b23d97b7633367b1f03ff0a4d993b199273de7904f0fabede261ad5888d49e30d7957c044cba9c56e6fd916858fedcab1562ad5c5a19297130d56c411fc4a18f13a7148a73ba9d5db5c45d2505c95187cfbda79b3c2ef814effb7dc99ed9ca87451185335f50002759b1f94d26a6611ae9525dfc739fb66c7949e4b2c4eb0a7774ae607220f5d973f198db79bcbe793898c0f4a36cc061a5fe8ffa86bfc66abc6e52e9beac085759372982ce0a543c92871e134e7618c4e8ad6f43c25b12cca3fe28978e13c9e359f68dfbfb44a067ee5765cfa5f6627497966b5b0ec8034f8e7eb859ce8ea601c92c75dcf262b3c36387a734a019b3bbc4caa8784ca38f7594ad87cde7e87c1f7187fe1e11e319b2e0e297c9b1d1ee6e6cff3404efade0a953d70fd66ee5e7614464793f1dc099ca21d99a937fea3fc311de1e0f4bd8351faa1bba18e09c6f968b9d4360e6f1ecb7a6c8ac54315de5f1c6a29d6e1c8d148ab32c60d77e848c10ccc73742a29cbf393983c3ce29261284c54f32fca47e0ebf0e0cf7e69a6d33e42508e1dc635499b480a4df9cd7364cf522e7ae2c63279e1834c52f1d9eac4e6e2d4598ca09f72226ed112ec51149ac68a6c265e053fa87e480a1f479b0a46d03d711eb08d92496faffb44b339cbe5c932684cb704ab568ecf6f1caca31578756be6badffad3634465e9b119f59d8faa4d5cb8652108f4a62b52cace3c9c01a9a892af9bf82640e6904380654bf12c947706c1142a6b94c9300f1a7f035f86c9e76127aea575068ebfb9eeb1b19959f4ae6fd394532b4b55c913f49ccce76e165b21cf387cadd367023deb1e51be415475086cf4eb7d74e68e85e4adbece3e01af6af329f77a7608a9d9b4b4fe5529e706c64ad9075555e2600d66bc5e5dc522de0d3c6dd17df6f680db3fc945f14171d9eff5d329cfe405def9bec2cf5664f5fc9af6c4aba73f211ad944c44f4ca76d1364f65c362d420dbdf76aad4affceaeea64dd4222ffd85a3f85b7fdd0fd469280b0337279a61cd04bbb7e9ed6dfbaaa82a5d5f8d31b2d65012b87bf3786c0678d842d8d7bfda5ff595fba8fa81ee009c7dbfb65baa6da69c1a4640add054c3474579a12d22911c1a97169cf9ffbe38227866b32f5e88ac678d2f6bc88658569541a154099c7ddad6bcfc46ddd0c85105d9b439cb43d4c2802cd1593f0b6a820fcb661ea463526710b3fc612a8ce9885591b90c902d3937ade4a02f75c2f5684a51c8db96e4a6d07d7b0d6d22118c8bf75aaf2984a93e02be6e9a42c84186bae221dc71f476a81720bfb8ecddad2947ff06bd3658e6f90fb7f0af6c52fa6edb5b6cd86ef2b8bf1e349401ca88d41b2ca8c721b4a0dc6b39bf4a906958aac0cda36bf06ec263ffed444c90b6ccc954366e5c22fe2ca5e755e5202aad0bbe270d4b239c33c8fd2efb42d1b4ac6f770eb38920cf7c34e671a2c430c3c93a23e59d0d61421868e2706c19d3eba347606addaa9c93ed5b25b32bf97b30a7e1bd9b98592ddad0aaaf1b6d8b4ef35e4d372add18dfd789a82517fe52d00869fa21cb314a6572baaf650f31df41d22342b3c6a6adeb720820e979c145e10944048d1bfe7c07bc016f69813e5c0ae9387fe54c754a8ee29df30f04893998e844c43005d3488e47099b7e6f35f5ff3b4ad994af82f7c0545ca94e6dc96e08926689c44887cb131b2dd24b9236b6910e213f5cba94b4dd33f99d3838b108ca04759938483a9b29000c581e0df21ab7a3f3187f09e151f6e4cb18fc13510b6c2d99c57fc9de4756dcdeb0c651e7107fa18320986b428303af513f762012b61f332d04b0a0b2c21f6bcd08407273dcc1a9fb30a79a67ccc8183270c45b5853a59385a7421010907dcf84f96f38ee2cd44eb2542cf66dfba483fc8b8e052d406ce49974ccaaadce6cd08ff578b3fefd5df5f7c2d0ea9a052a9e3d696b1aea5a42f438d7e0db27d69bac06ef6772d44340f9c90cf7428057343953caf519913807010e3fb7f38f5e3cf726b004d0779c046433719911bd150762c932b17757e29484afb54a5f044ae9d68045228b520a90361bb06b51c169ab08600bc26f5e06ec2c4a30db764e35306a28cfbc3164274d09aa0d98ed945ab0aa8274dbd18e6d85cb819624ea2bd72fc4adf9f10a64d519d74479222035e09aa65c7a0fdde8bfed3c2ff203d081e1c14c257c1d9d11ea8ff4016f1046a8539b2330db4859570659a2b48837fb770dd94e6dabf118af6cf4a4799ff93efe2e9fa655250aa57b5b5677d4c8d27a05b37616cdcbb172d404c05e38561da62eb144f63ea040acbdf097a560bcdb13f61e513013bb6485ac5a0d022cfab479b771edddb89f906a08a7a52828a26233b0be258c11c84071958d98d04ee5f980130b58a2e5f215ce978b54784e42901cbaf0d794688e47d631f282b122b432f951d5ca83f31e6701dcb3ea4f189b5fd05dd979bad15fd889a8e1a8b24074ef0bec3922f2e09eb583d560dfced8e28a1f261c2368364fcbc2f5db4ba139471caa0438f575ffbdc48d39708fe760cf87fe32f1e13a11be830b1e83c42732063c40b23e9c418fe0682e28dbbf62f738f3b9b789b73abd460da3f47ad770815b042210228319e818bdefd5b236d2faec84fdb6165913b55d15ca77b029ae1bfec51598c55b0a0df20ace5a6bdb34a69238409203750383c488c1863051e1e23fe8f9dc6b941b75d8d41a29f90b6b4cab9388265188f9d2a170cd67d561a9a1c746beed657e78a05840d4ccf207e63f1238f9aba01e3fc1191b75616dc057c66da126b43eac67023fb040eee03969c44873f882e6628534c8fc99359c74a509dad0f4c64987733a8b3ef283c722cda30d6189e920a97c61d214a1a8fa7a8f52ff0bd71ae55587c72eeefea104912b4379aad4f74d231caae50ea84f1e8ceb84cae893cb6414b0665c0a1c3338e4c152125314a7fa17f0c961ac010067b612d989f9ca9657645d61d81e3f21682e30525533bc708e1ba72f13e91df2f5150bc7ec0288f2234acae5a71ca7f2bf1abb4e94b79ba911d493077d8aff1816c86ea06585a7484a0e020b4c4b57b3c3b690234f99b0fa72adcd223cf7c15bfb5f601d97e58a0e6d9432c5006d5e074503dcd81ca934140ceb13b9bd2cf1b6cd913ed685517de48146573f79587d1da860d3dae8a2a60a565d04f1e36c61bc61fe580cb4bf15124cb6e4941bc94fe8cfadbb88b718da0b4dfbb468e00a0f21d09eeb574921ac0cbb425227509476b9f4e998743190f05f260b07b026ccd8868f5b5538066119eb68ae4d10230997508f3e5b3f284c9281028ea26040ded82a6eddfd4d829f759dadea2c1a4409664f75832bb1bf7b096df54be7da4596ee9eccc967c183fea8e232a9b65834aad2514d27f2d45b340c1eb0d7862ebf2620fc4e07f04e732be90c81feff04034664073c8d583ccdca5ac0740eb1e650465f3b3401e6e5b863e22ee3004927e0013dbb3d8578e90124928b1e07384908ee546599a4488077dd7c3f60df7cba6ebdb5dae02cb5c4aed54e3c37ae04997a39c9321112974042e34bf692ffb9e6400eb5dc3c6408fb29d525ccc86e14819072108ef8b940dfe8458e02ded7527c7f45a0355e56c9e97b61a5548ae13b7214d6a77a30579c4a3029864c998a09014bb4506673074b5b726dd149a31118f61903650923b906458db0afdc7e0c0c296172ed79067315e1042a86a64738305448722d56ac217100147d920ba1e452431a137a225294e05a595453ffd4621ca07113ecbe842425576c8e22530cef97fa7896e23b181f5a07293937469460f155b2f375f80903f870ed5f9cc3bbf22b68c1b52ed18f79a1f63dd676721554aad9a0cdbf3e29ee8bc178ac92d3bc29a10f051823be3f00ee8ea1508ac98cad1686cc2a573d79998e4297ebab0d750963e87f1caccba3294b2d5a1da04d2c32f4bddc883bc1820db8325ad01a5cc737d01b0fc0002f2c8acece26a6a9513ed0f1edf5289ad33f023c74673e65085b3559a98526440c3d852c3705028182206f62951101db27203abaa2c8ebaa223e7fc6a5be3f29ec38c95303a429b8f5181c8685b595a4da48cf9fd3fb1993dcc4b396539278c40383c973413c174fa54a337f5cb7ca09290a5f49edde4ac2400564e160182f3249a0f8da737487e4f47610862518490c0743eac22f05637bc5501f0f5357802a5ce57e6b1ac7aa4b012486a790abd12f3266e7321a57bd2e24a980d8ba7e8836ae8eb5984acdee6b2a5ad87f80c2ca9da8ac8dabde36763903461167a69c02071d06a12ffcf6bf6c3b16fb8bad8d2311251f80fdd3e67060bdff8d05c2dc31b25333899fceab28d0c38c187ca3aa64ef5e89f5a7b9274182ac2130f9e342809f4f4068a83890606b57e8107732e55fabd1ba7d62343a07b36ba83fe04467eb81f6b1536bcc2dac9ee49cc367c455be6d5bdebb9c9d702030e64ed0a17c96d5526e34668eecd3061a0c495507efc95e6432f57709bb6c95ee843db9ca1514bb8f0668ab01f0082e9e7bd5774c93a2c95c7b00569f01e8dc34bec2d9b2ad40dcd42c600211357419d0347b7dcd3b63451ac0f24c61fbd82d8d5a96e235a909beb4e70b2a46f3c20c9f18dc2714d7712d34a3863c110e5a71a02f12d1110d4bd807f0f60b29c605ae4e54dc90a10446ba46de1052076d6bb3714403a2d6b063fe4882bfaa5a726e5e8c88a0d4a06f5b56243982f6f805887ab43b343e84603dcdd5ae1645b25acd94020a67a60c02d314748a80d45a6fc6aca6f89200464fc3b485a5e95f0721613c6c0915d4b653c5c1b00e4c84dc645dd84de64ade0e4a0348f27be572481bc0035e32011c64350e45943224a71d117a6b92e09e59662b51cd47765e6f0e8109d7cc6d43e1212eaf91839468c92100a6a9e6189566e0981c32a0a7dd24fed434cc21839b4fff4d43985c1b68ac2775c175c71b729223142bb66f550fccbcd42c28acd7fab4149e0ab3fc4553f1404d51b25a5e6821abf3bada145033f23d735e68aee28700705184f5a5067a30884a5c29ab3cf938c31ea3318b14b041e5c752f7b45c2c2dc87ce82698622acfcec3043990afe688123bc2e94d4be1cf9b8a4824c5fab05a24113418385b5c25684bb32dfba1925188b569216953d6072a1086382f9126689ca765ca59625abe455d96e95d6909de2ad713847c2314b255011f46222abb6a058f9b537685f6194987a3ecf82dd404782105da2b61370b532e559096e5ab6d0087aae5af5ef4b57d589bef549d2799c863811f2d8ce73fedd564609ed9d6b8ac546058a1a1a1c43302c7d097ef62d880f658e1787af11b8bd3725b7e0b74e39946b6aa890994d62659cc69da5dafda9643e27d577b0e571124185c4334ce9d56c36e8fbb96541f01602d61d84394311b03c153cce91a814888ecce45ef04563b1127a93f02ae8a63b137c344a9adf548b593a5ed7b0fae39c04140fb1e8f545c7c0e4cadfa49c2942ef18fe535f3431335208e91989230f3dc142757fdb1a408335da917fcf8c8bb20bb5cc46ec132c68cef00ba302b792e0df6119579095e4fc7967794261493634ae402cce3e55a56d51ce6828927687b2dfbd5385cbe1cb52cc215d623553e68ac0098b2e12077faa737ff82b854ef1468d062302a65f49e7c34b50f544911c39ca3db7687bfb3f48a9f51c8fd1d26d44abb4c84218491f83150a507aab022f4b9447705da1c2b29fde6c7885c0fa4c84c5dd6c7ff453ca88f852d30ea96cbdc4b3726b6e683b93afadb3732ed085aa15cf42721d6f08916f551285133d2c014563845fadc66b86d072c737d04d4520fe23d34119594da97d909ca6953c0e4b99b8a05316f6e9756b1e04690a4c226b832e2cc8e7b5cdd75c68e3e05c66a1a5044359f2fc5e91e6e75a0e0dd1f87e3d7a508572a3348cf4c6b0f1c88b1e561d9f51bb68263fac56bdaede28db713695821707304b613f6e8115f1aa5fcc8480dc34fbeb775e340a152984b41b2949ec3adb84c11467e6c8e7b0bee2fdcef4f6d3c4348ab8507eefa9a9c1e6f4dd7700e7e3a94934efbcbaef5d57f119cd5cdc738ac46aa5090d76abcaf40984a424df2acc542b83d168b431b48424add0867478ef62f5a2b7cd77994158784c2387006974715d3f00ed4378371b72f87d0d9ebf3fa6cc5d502e35a7ea85f211f25940215612f5161421f534e828b6cd933048395298b37b084877273c85447ed731a834825653a58f229df6319790aa6778041ed88b769618607062fa8932380ec5eef78803460d8650e2019249cb5b6325297a016f74485500d3295727a355377b85c51d79e236e98127d114b8a6f998bd9ff43f11351f6862d1f438bbdfa47e3d62ed3b89b31961b9e00ad99ab8e5e8c5d656f3821cec7414a19cd39e49f74838027717a6c884db50e67aba37d3a41dcca94ab5bb6ebbcec72b91829169a7bee2623c594e747cf3c7f6c901aab8c5af5985340f491935b76269a83d5722aa12cc4644edbb2cb5063439b2573f263c65612e8e117fec17f124d151430c4b0dddb3290ba6c639b20af0d70b83b033a02dc8a0ca73c83936470dfa4617667b8d69aafe467973893967d4fffae8925219c4575b2309c49987a2189b5e6d2ee01018db081d31ebce5e52271b014d6b9e72919fa67b20a9259d9cd22db74d6c26f0e5d5069754d5bae44f9d86dd48f1a080dc45b2cdc1278cb2c34b3f19b4538ce341d5b4acd29b8698694c136fa832e3ea557ec139ba304aa6d35fa9d297998d7f0278764d9b8384389f2a196f67160c6e526cc1a2b8841f8771ebb460a83bafc08fc8c743c3460a89a3204351846894664f61ed73a2740a58c97f0d6e24f5442d8c699d9f4299c0b07ab06017783d8063ded9dc9b6a8e8245e5436df694ebb6be4a9c44942b454109efe4e87ff742c96b340adcc0d6a77e5d999423343fd99f431cefa34bb691d2ee50e70851545aa19667310e29f4958b4fe4c2de943b1d08eec843648ab2406dde76eadb5802b5b2d4434fb386b90fc94df816b1b1502090918f193a53feb66107db31f7bee57405f87aa11f0c940dc646cd778a69c8114eefc49009176cc47835ee98a0275c7a04da49d4a4833ce64ee21a24a285ff83ef127e30e5c34954d047c4216c19835b9527924c207061b2828cac52905c2605d51d590f13517ea65ca38ff4f757fda77ffbf9f8ba67f7ae06e2a772f6941056dd8721f8794f84a247261f014330bae724487baf509ecfd48a812af0ee6b7621e276d1e3d47c8418c9641f37ea6e14feec14fcd68ec0fe25842e95c4dd6374cc6f2be468006949508e70349941e2132681b29be27c34640cca8a2685f9ead64c5e268e85b5c55064853d399050df52b4d60145e355a9d76763278c84187360371d7b8b81e16d30e08bc128e0bec3f75594b15a334208fad929f4acbc5dafc327134508506efe2d583d6462b304d1f5201fd469fcab9a8d37dd4ee6e35191b4ffc1bb20a1d28ae287d7af9545e39d68677e13a10146989cd43c0661153e83d756452d07a34815080cf3cdee0dcc0f81fd51b6e5006862df4a8431d89f854c82570b14531f82ab3bc214a0cb1c04364b107265ded638f73ee93908bd15bae217c042bebb1fdd4c0721d3708cef8014886fe634700dc1ebd08980aa886bd38db3e8e9e7f6620554650543ddcf6ea3004cbbd7a28a3f3f5f0d2f12abd5b6179c58efe0049485917928de116d8b687649a3c0984e5c7b0a6e5107b096942efa81225155a49944d52e859584d484a0523945fdb6d0eacdf3fc8207efec6c04d298f17c42886469795bbab948d57d38b206162d04b6586d1f4547c193314ed95d3f49c61009c23ca72186e96380c9974696a89b3c304f32e2c8cb0f4fb2a9561d69d72e438d802f2ec039de5e45f416b837409ddc9581032b7c854771962964e42d4aec8d4dab8942bbc68bee390d1bffd2d7d276722531f0b35c7cc8e383da6b6d852892680dae4e4f40818c2e720aeddf7a458c1e4a3054f0df4bc70d442db1e54e2acd385c4e0703192d69ba3457b57435d4954291ed5d5187582c335edefe2be8d50443da637d94e95aa23b7a7cfc5b729b5f162271ba0bc2db0aea2815a5317f1e5b715009df5ca76bf7039e951ed298d49befce8ec61b76067af3be6ad8f0a25f00480b7aaba0fb07bdab85875b0706ffd32cec0a78f0d6c77456cbbafd8ddd9218e5e9df3ef6645beb2011f05a007710d5aeda225548e0bd3dadb5f00ac1f231857e6a545c6163ad81bde7b66ab658f046e045ff987b70202939915dcb95169970024997799408b699bf64db94145806e85324703b0bd56c1ddf24be7e657b75a06d9d2e741158ba200272f60686f1460fd2031c88ebf7d37a33cbcf915d73021e7c5d11c07ffbf8e9f50e9463db9285e97904744e49582e35e7ecbb58a18d2be2df55e760a5c4c24f01b41e8aee7361ed47ba408b0571aeba5461559642e315446927626a086444615fdbcc802a1cb5603d166b9e86d2e4c533d37afb0e0793e463fbc8066ce915cf532aab9c95e22e348b0c589767278e13d9d2a55b83a0c5fa8d290ef2657592cb8d377bbfcd8a7b8bf8e5c1587e869eb7b7e2b7744c042cb371c44c34a8ed5a61ea77f934bca6a1ca37a30795259d1189eff0c39d8f62a5504f0a32ae1845dda45d9604653c8a602f62e16dc6526e895ac59c7211b15296bf373947cd389739135ea2895a7ff334dbb11d64fa4dc2c8e809794d05bb028e4e93df4e7891200d73aaa0a96a4c933dca4b53b215168ffcb3211e7b1e8f2d04151540a78fcf07ac369d6689da31d99c6075eb8dbcf7cb5feeae2ce293d7c5039500595647324f363265d930a9019172a587c47a83e5a3ce016f757e334b539e9bb523fa481ed38e02935c8ac36875d2ef02c84f9a7debf463e3cce718efd093bb72fa449b989aa05e2409f55af8e6c46331ecc0574343aeca7feda47920182e1d0636fb3af3b26106b63eda0112caf482fd0442dc97668f9642e047634c977c061414c2f9f57223c87a91d298f4a38d0c7bbe5dc6d3dadce3cef23aebecd8dcf6dc6f852794c7ef6c71802fd81614db51ca884735875653ba60cd3924f249b68cc44900714ca421373825d7afa8eda903af5c10ebf78bb28e1131ed6d8a5088143d2f410240d5c4391425bb574d54100f01cc246c8f5eada193a402fefa77675e766d279dccb29d7bb7ac5f90c3dff958f008cb9c6650bc1cda76892a564993fff13cb6af821c02ac9339867d4b5edc5dc1b846fee4e8df4ffd72675fed3b604894593dfa472657cb49e12debb5b23fd438c98e75c3a0be8a547eea7c17cbd9fed10e701e133610696cac2e545bca0ff9ab9e56edf10a64847536df6d0953aa8d8f70e5ce1f40d7b878edca7673870d648ed683f2cf4af0eae88618e9cf0ad8fe14b46555ad781d292a5731d800b8c8d8708f6c17b39b0a06e01c9dd48756a6fc9bb07d4a722d8567e1e3c03d48eec858932ccba8d863ace37ad63134461e49a42d65ef9bf94608adf5bd628b7f67868867e9fff3c13d02de840c8c926aa90f3daf33052fce7a1801fa86b6559de6f6aa0b33ddae36bc71a671b7b75ab747901b19d4fc118d320c3d6fc8948b393d7e64839c655abd6bc1fe2c694fbcf8c617ce370724bfe2435d0ad1893d3244e6e0664a36349762f157c84db0c7f1186a4486ed1a2859d640c0c3190833ad73c155678599de49042439cc94a271b94a43d92f46d5eea4f8c283506d28d4a83935e1d9b5d2e683d75569691a53bbe24d15a4394efa3669878f5bdd1aca182cbf35bd68031b2f61573b6f7a1541d782e04d1a4d209c77fe0edddc0e8fddc5342776a4ad975efe666a5e82e3e00e8bcddad84faaf18a2c360aa9f5c71d04307775db3f69337701bfeed22005a2eb94416346d204c90063ede628722b34eaa446b7d8586a7d310f4c77c5026a528e16258614ef741ed6e217a200b94a9e5809a43b443d28604362a5dcf5e987e9a2a8e2720add9d5f4677c21c4150abd97e0328463265e968a19db1553157832b976c5c5fd05a52d15905e65ea926470be4af12d59dd09ba95caafe61565e9dd02532000651b15bef051e4525c6d0429b3aba601782f5f43c04a2f8f9ee988a35a31ebef7ab45bff0c501261cf2bae541c78ff49178df6aafbb4dc0ddbf23751cd18fec069a95c3c985496b9cfd0a7a5f259598e68c7fd94110e42fd1183a7e050f30bd83503f4189324256b215e457912ab108809544e3cd5883eef74d4c7af046e8696a0af5a05d27eb0c9289804ef2f4c7e867a70ea44653c542031638c636b8b71b30646346254db3092405a61976cb8afcc1311066cc5d2356e1bc9d6fc9bec8810148a13ba1ff1c5becc93e3621febe8669b8114191dbd66d99928e2e7ee4bbd740d82e51a3744954b341dc563b5d5327ca6d2db0c0cd28261bc94102daefb7448f68e98260c8ccb4a1bd8fbedd1044bbac856fceec8c6aa8a3e44d5c190e6f58b7c099701468e14fa5f83a3c5a6afac714df8c699614a81a39071027ca441dccea434cce5799286dde7eadb4967d0d4d594475bd365042d4568e65666b2d5760ee03dcb87627752c5107b198218c853e876a399f24a71cdfa515d335ec43a3a797f244acd8c7329415cc693666d3d01623ab998848d2ae3c3a5aaff62992c0c2bc88b7a812b5062b24dabb898e1bb84b1c059de9c26ffa31d8fa2b553c45ccf636369bd4a98739e309b33b2f746e685cbe569d27cabc7552cfb6a0ef80f18106c963cbfff47b36ddaa1d1f83d561acc32314b6dfb57acdd1281ba55c912369a1ae87f65aef7c08cee23062bc93bab00a039f6c0dcad031aedb43b616850c6aa3c293d12ef03c54cd474e5e6940e10ad555d6bdb72594bc1408f81f8850baca7381575c61d7372e16c436a54f5e63975baee1024c6e5d805b894cc66f83349fbffeefd17afca61415805a0f9d631801ea0fe655b35d699744791039b6be3375e3b1b01d4e43c90412d53e879ed7fdd71e36657a85deba2cd97e704c8b235d04eb1d820f65d5d70bb935c1165a0e00f8a0bcd95ef951ad8cdea9c4ecb3daa72ff558758d0331bc871412010118980402e1368b76c4a7e6793648732892b23846d66e755bd2585c0fc4f3745c952a1a090214d4cbe8bc270947d260d55dbc67491509982fb19d976b00299a47007f2e04f6b4203fab759fec7e014c43543d7c4782881b22821fed284227a55922e73dbfd8103fa72f2a82d01a4b46c15b189d16c5f87fbea1d09d6960f5b7ba096ba2039abb9b62990c28a9bf7b3ee35e7e95a015f18f51a0aba2061caf2d33a84f25a57fe554d63ed022caf87f6862b811ef4f39b134a71991b06e6e2d9fc6ee7f0b20c2d1b0c3a64af58815cd6166380e46c8ea59aa7c614c1bc9019103b06a10246f9f82fe9b908cdb6b4604455dad52a42f2f382480b2e0e4a520683f80aa2bd27abb994ab742117393af15a22ac92386a0f0a54bb2a0f6b80050c049706563f1a7912c8a532afab65b849c03119b6cd036b0162515a2f01904d205a301e5835cc50da15ccfa319b25134b3b73232196944d91af3fd1febc4815810645934eb97bc452b4111b746a8468a9f3392a94f7e2bae5dc24a448f88648067d88a00da03a60b5e1eab8316913d0d60c61c09280d242380c74b124ee17a31a50048d913855b50b2c05030a7d027c5c505815a70d1923b63b94f45053a5a645554f76e7292429c929b6de44bd60112d7a7d985c7ee2f19e419cf716c0fd4911ba3524321f41e80fd30f51a4d54a431cd44e9d8602856c660abd9970159fe4a83d42ea26092112d9dd9b5b06a207a007f507f0e5c60e37f91793f8571f62475c0f703beee85a7fd2fc9c71d2dea60f27edd81ee7c0319bd532971166d403fd0365fc10762ef45da16fa81473b0bebf377d6f6601773fe2afe129f56e5e52f90c43a16b0dafe16e7252fd9824e6c852453d94dcee6aa63b2859a250c011fd10688a044a6b332adef4e1239ae5f9c33f3a67fca37ea2f1488ecda19d4a7dec3fbd6990f6acbdd945dbaff5ad575b289e938826ad7d342821fc816491d74b90ca77e9ee067a69284b31898cf69f3db47f7dd91f2ca8247a732c5ee3237720fe394a045c9c37b254a30a457c04758d2a18f10fde38096afaaf6d68be263d5914b5f907e1c71825f48070b910fa4f17f831efe0edc079e735f2ce2ce0e4cffb8306c245fbac712fc775ddffcd8da9a77f326f205c668cf1faa761cbf4dc70f29db4837f13c78c192e2edff7df751e1f70b7042cec755fa91f52ddbf54e8ab859701aec56b2dbc1988e84b241289442227c5cd7320d96ffa4add9b02725318f69e7220f40958aa008542d35a84a615094d5f8abc0ac892fb2a4577c041ffa68ce9eb5f4e24fa4d93885ef4bbf6a01643929f3ef14f1db5291445ec8a048de877fd914522d18b44a2dfa12f2442b8ecebef696a89a60f4d73cefe83fb344dd3344d93c7e3b958f6fae100088024db02c402ba20d0dbcbd20266dec438ae7ba5302458ea75b529b3cdd3344d53cad71ed5473555191c3830dd59b132759aa649969cc64e93e370e2c395782d3cee7571ec107a9aa49aaacc0bc77d8d03e21f7dea294f0b38d1abf894dfec975868504e4a49f57012fea1e9ac99d1d4872d99a6699a32f3b472efd778dd8b63079dd64d4d0b941b9d86c5d33e2bf0268563072034ae2159b1ef72c247297ec3971095e741fca39e4a398dcd12c934999c745b5976f2264f5b18d6ba99be569ebee04bac3fbaae7b08bb5f9932f3f49a26159ff27b9aa0c048763c9e979330921dcfa6c2db3cf5b2d88bbe3648ffc062dddc7befbdd7b258f8e887670778092080a10810a26801218a22294cb0681e2b0f20b6c1e2b510cb52962c7dddc7595558a410c9e477d5d0a3214a8a8ef01ff4c2fb18bc084725294c5022b08d90c953d1d395a72efa42a239dd89d64dfec9f847a79bfca3f6ed9c2727ef26977112e863fe4d594680a4d0d3a74980249317fda64cd0f4377582a6bf691448de6f4a054dafa0a90bef7570f19a064d4490e572060d78035752860bd6a39b2e59d6b9b4fdd06bdb46ad3411adbcd79a3af61699ac6964e6cc23b315ffae706e309ed686ec87f21051dbce49db7ee73287987274e84dbe5a69452bef4d171e84386cea637a4abc7817dee97578f15a0a135c8c27a19944c73731e9f8226fc53b7dd89c277ff2a078126dcc97278a7e88a943cfe3a3435947e84df210518bf29e6f77ccece25f68c392e0a807071c1c625a6938c474a335bcc539e3fca967fc156f47bd322dfcfb0fe17f9647fcc39c07ba252a1e0e31ed547984d29b04c497a620efe36bc5835a8507f22fff53b92347f44d9a3e28ef7b92bdfcbd3ce2a47b13f222fcec9b8856bc0d7ef1dff340f08befc2ebe028be16bd1d23ac56e7ea1a89a6af848bafe12d972d8eebba7ff71c312f14d1d87f67f2f339a73f45bfe223b6c1e2bf08539fbe0cf0253efd192069e5e9d5289c4679085f50507ec593f09b3166cb5ed1f35f787abe0b6f4e91f7228889b79d20c4715da76fbc9be4b6012eb244de167dfdb8f22f44b7f8d79a872241b6e812e816e40eb5827c5128481765d12350962c41085930caf9ff1ab703fab7a25365abb592f7d5ef84c949adb5aeac7cf5bc139fd1533c3dd2d261491cffe8af7872e51ff5e6dd513f88a8f33f188ee8f976825eb888610a50c790a515e0883e9561a079d18519ed226fe1245b31069552802df4f76cb13cbd1ccbefee750bd9e2597ecbd60a28245bb22575a44b6f7676fecdb7d6e69f595433c5a137324ef29ebaf8ee296be6dcfcd777e1491c1fd167f124ab85278d986c9d9b5ee4326e92a5f89223aa9d189133ffd4396f0ce895973731a555fc862bf7e11ff51e2f5972d3e7d70dab57b22ce21f95ad1b39a26f6202fa93a737a1a7a11bbde255bc1db58acd94e51ffd5ff1251b65511645e224489338297ecae6744aca4398c2c2933afe55cc92a56fb25664d9f28f6ecadab2c5841c8f2f238b1aa139d87dd16b150fea142f88a8c2a3df797b4be1ed0ec50bc286286b83f4a6acd6a4ffc95277bf4f77b1373910c4bc696dde69acc0060bcb075be2c7bf0ca5c77884ec049c7ba4c1c81770bca09ad6be36dda54b2dcea7f4a5379ffe4ba867b651ee204201042fd8e0d0e69c73c6a9899031e2408b7287061d7a716e161e58fc8b80a67de2a6210c096fcc48cb1934ba47066c6182e381868637b27b62cc33600b7c19737aa064a2031e201ecffb02396ae0082734048286438e4869cd0384dbd2e63a122fdd9964269191e9d1c3878ff9d232e0a4cd4d82cb7e642859aabfb1644a8e8ae81d7332eb1af1526673594e9af4b26c6c64102944dadc4c1a8842c9987369fe2f62764a8aa25b12353fb86813c44b74464a1b908da63514551373bc85658a922aff1c25835094b4a1bea54d0abe44ed9bc640a3288ad6d0149cd694a37962c06176be67e8483ce1c8f671fbb87dacaf4181f3af5ebdfa57bc65f4abcaab78ce712bae75ab358b1e967df5ea14fb107f7377edf7fc704cfc2dab68af654dcb30fcb3011a3f0c2fd9540f4ec21e3f0ef8321fbff81d83c6cf75f9b7d4f85d40188bc3025fbc64f14f27718f3f4218ab1363d0c7f85f601bf846cf932cc5162717a79a08a30121d5a464493bcd542a851361b4140e382b59ca5cae956be55a69fa5327c26832325cb294ad56d3b59aaed574c944984ce70699932c65a9d429754a9d6a224c8663438dce4ea7542a851361b29a1a7056b284b95c2bd7cab5d2f4a94e84c9645cf2768ddd2b1361309d18322759c252a953ea943ad544180c8786bc5d63a754aae24418aca6256fd7d7e55ab956ae95a65f75220c261331185547d3bb9223faabea928930576706197d53a953ea943ad544988b23438dbea7532a657122ccad892f7034b52e39a2bfb2ab95a6cfe269417422cc9589315c5326e7ed7a5b1dfd2f5dd235ca5346d397df2786a802f222bbc82c2d725465950644d377b1e261714a69294d9fc5c3efd264565c2c5c2e4d46d36fa1b2a5ac74545629ab958ea69f5768a22c85a392a552b25496f2526428d929ab49919d50b213ce4e598da6ff2ab0ce95c9a470a1b85c998ca6bf7272ed4ae764855776a5a3e9b308591096c239c152184bd99cc2d1f457d42ab1530d76c24ed8a946d31799d092ebc2643496cb755d988ca6afc20437f5963977a56357ab958eca8d3888a641fc4be1582ea7705226c529ffa8eb7baae9eee99e34cd35ae350fa7cda8f08d7f58972c69a72b1361b255c4f2ee3a2ebb34bd321affe03c2d48b68a305a0d600cfa5463759acd0d600bcdf00dd79fec0902376509cfc8117d97760d739ca4e51127456ec649d8977278faf80746e11a277d9b3cc9117d2953a7936bcd460ba2a94eb2a415d1543b6938da49b3812d547b82c06d992b73654e64afa8a2d9e3190f801c6d3225676449e3b8997b39aeeb3ea5e96b1f4fc2cfdfe3591844d4425c1d84109c8ee1afd9c8920647aef5e0445fe84d3c9302eee21927e11c9e2a9143c43eb419361a239e99614a8f3350cf965562a6de7eea2d6f0cf88e7ff45f3c4fe2a32cab30fa39785148c489ab22fed18851b204219e91a5a852fda76596323fb4205e922939da341b4d5fa6f08cb4913251456bc678467b5e323c9e80dc0c29c2a5efdf2ccb340df34f520aea6a3ce59f949f43c06db7c99ee29032c33cbcbef753340ddbb0d98518923008e7dbaf1afd0c9b221fe8672a18fd2cab605ae61175dd214b25b6bf9e12a7d761fb69134495fdcd6e7631a791239b204e925a0d305516b5631a9625c03ff9d1937fb1ecca78b725fcfe40d43df6147b8ad5a716c78c3485565b0ebe601b16621bf735778f47053095faf34531f4b15cbf661e515b9b77b8a99a61b2d60cc70ffe8980db8ed272bfa3e00b0be08badd63ec57af86173cc836b3f79366d9371f3e9d944689bdd07f48fc2f8eefbefbafbf6b1c770807210f3079c7f741bcc46abafd55a6bad15be7f0c0d92b85b6bad31beb5b6fed5628c31d6df3fe84dafbfcfaf3b689deed98f3bfe9d975a69ddda785f7a7346992d40848ed807ec77dcb1636687d683eeeda8015bea7b6e38fa3731b2ce7071f9beffaee36af8573f9ab215e53eaf510cc3ae7f9f7bf7e5bc1ff4c63c9e1a4ec2ee6f9ba7c334b0d270081a0e2962476b3177b2747d84fda7b1e7f27e8c7af2be794297a7e4e9ce0e7d0f7dcfe73d1f0de4684e5327a940bdb1c6b0df1bf47c3e9f1abe6d52d36d6e2234910a9410086452528263ee53cf85a1e2f3dce7e9f5783c3e9cf69468fef13c0f718ce73f9e0f473f9e946d3a89528ee33e1cfdd08fd4d0e1cd0e262f1f47c9c9cbd7265173d807fa25254f6fc96b13afc483737c9e3ec63137d3ac795bdcb60d031ed4ae37065cabc448bd33a0413a6a4d7a8452eae9692cb608b78bee39cc47f2b18718864dec334dd3b4dfefd21bde783cefc1363c3db4583aeef22b785a4ea259e5f3d507cf539a553e2d27a9d0af2f7fc3319eff64159a69defc8b7488a8e3bb1127edfbdccb1df773c4cfe7358663ee538fd6618dbd0cad70c162c31e7bede3b9f6783bea1c1e4d633e9a96bcd49a6f97db600bf6d80738faf63d38c7a70ed980110db1a0e1100db8b4c662c366b81df5ca868180b3bfb9f7bc1051eff0bc7dcdf3083d8f7a3c25796340df4c3db79582fcf3bca67984daf47c3c4fae2157e8dd4597dedd0f7adf6ceb6ba5b634bdf01573ecdb5bc23d674dea6b35f427a13f09ad527473be9d6fef7f3e7ff2f993d756b693b7276f43a14701813e743fc6df5e26474323683864033a5a1b657c43934186172c2d5a781ee4fdb3c068d9227ca9978b9847d49f508a0ee5c453f1ddc9778fe26d15b9cb338517b5877d38f9108435dcfb67b162450a67ed6336140aa1a8c03a428fd2e11b36c724336c7efaaee3b85abfab5f2beb06b5e70c13e00b8ff8cae958d588937252e01cf3adc7137acf495d3969c7c97fbe7ebdd1423847bcfff90f08c79cfcc7a2e4efaea26409ea70395409bee15a84f220949f344eda61ff04be745f7f8600bea84891f22a547c0a6fa7a8c89d177509f6c1fe8907f5b62538c6fe498ab729f21053a33c8f8f46f97df5893e7994141f77626c11beb86cf103dc47638f8980c31e3b717b9af4c96b3560479864541ae553bce318944f918788fa85881a25bb29044328281ffafa9ed0631c8312925be7419dc2831ac5db273f5bbc631e51a37cc843c97a839e871df3e45fad0f8a3ba71d9271d28ed083be3e06fd866342a01bb618b23475a0960e39c2c42980c21c0a3d15f950b3d08f165ee0024a29a5b4568a824929a593ce39e79c1306dc9c73ce39299d343f341cc2011f34bede775e0ebc341cc20197761c77a6a3898e29fe42a9530faee843192c3f23010a802fb03e952828028bc562b1582c168bc562b1582c168bc562b1582c168bc562b1582c5611e29513c34a6284a3d7131a9c1f5946b4341c62448ed67ceb8e9036cc061261ae991cb47f210cf0a31b80a31da249e828710f48765efe15d12050de33a5a78cde3131c95bb6f4ca0aad1c9610cd71794b1a8d04b3b6e06ae5783c793b4ba798b851d900d1db96b79f744e175eae968eceb2f843db5618765eae7b554b60ad6e2a34a2298d8b7691db2bd3f2b7a6e1832685b931067c899308fc8a00bf389100472d2ecc26789d52433c6fa60a606906027ecf171ce0f79c81017e4f1cfce026ef2fa180a506e8f83d8b28c0ef89840fbf67087af89324c0523ce5f82d9d4080df920a31bf651670385703588a363cfc963ab8f15bfec0c66f29841dbce4fda5076029e60ce0b724810ebfa50af46ff9821aeeb9022cb94900bffd8900fc762800f05b1641c33f4bc092a7607efb105e7e7b1272f8ed4e98e1ef38ce02587256e9b7c780f4db6b80c36fd7810cc751c092efdcf0db8db0e1b77ba086df4e0217cfde3f320196244a8ee4c7f81da340c3ef788596dff1090dc3b5ef773432fa1d8720c3efb8c40cbf630fb47fc4012cc9951c4996dfd126c6404716c4f03b9e40fbc710c0927cdd77acc5efd8231aa11d4601b9a0fd27004b93468ea4e83774adf80da30099b0e21f035fe08b7d873800d08e83f6160dfafd2ab4976877014b932542b7e133e44540def700d900336660373af2be05c89bf321f790f70a469323ef4b80bcb9981346a3e5e3c87be5be78c8fbdec89bb361f3d2f277b091a5bb1a40de5787bc391d73e4d7c87b455e9400f2be01c89b0380e9a2b47c1a79af48bb0393f77dc99bcb21cfc87b45beb36e29efcb915838b064ecc8924ddd90f7b5216fae06df91efe2a818795f1af2e65a50d6a4e5c358c952cd912f59aa3672247f86bcaf0c797323b9aa395afe275731e47d5fe4cdb9902f96bc57ea89b6c87ba5cb79779c9737f7349745dedc8a7923ca7b45fe745195bc57ba94bc3b8eebe6bd1c27e9cca6332b7a6326317cc7cd1ff3076c893750c861ad7676608be7dac473ad71184d0e6c5218ca8811d8125f1808b01366dab1b1812dd135b7b2422b87257bc81e17e503954dcae572ad6d3bd70477e69eae090787b505572bc7518e822d71f35c6b598a891b954d92249a35d915581a3b634f39399eeb2ca70b2f572b4810d8625384d58d2acec419d8125fd77aaa4a5454a5a9334e00615b61d879b970c016d7da542d81b5bad92c9a05b6c4c75cb0455e213bd1202b220585964f68f91e2250cff5f45c632e1a17a7a4e16f2091b3681830d84d8c2165c0973ddd5d47fe74c73a668e1e87c0f9bb3097bc912c93c676b474ec157f634070c14cc0d14d067cf107808670090de1c641c3190eb30c1d1f3301173f33405c40ab0921142952a4489122458a142952a489269a68a2092244881021428408112244881069a289269a6862c7a5b99674f90f3d41e0a07f97f5837c172967768fbefaccd7bacf39e8fef37632e8f95bc27c31c6744fc28ffe95250b3ffa2b3385fc68fae9392489978eeff74a25b8f8726f3a460f0d38951b54c33da38c8ef97910a16f4418b1473c7996e0e4d38faff9d2f35d62b7d886cb1beeee237869fa8179f80d7012f71ceb725cd7fd579233349ce4912e27adf66299b661cf872b99375a3b01855052742a5254442b58acbcf7448b0dd3c48c34f6e11f7d962db26c326593b249c9d4a745c1eda88a425c7bb1c17831478ee8735e6c4521f146a5b28982dbdae9e4a41e5e8279aa822f9a8d66536bad2fbe5619eae86badb57e5feb8cfa5f6bad75e56b2dd5fcb5d65abdaf9554455f6bad55e56bc5a1b2f85a6bad2bbe5619f5dd556bad29bed61b6acad75a6b55f1b5da50415f6badf5e46bada1a27cadb5d6d0d7ea524dbed65aebc75a63d4f8b5d61a7dd0506badb5b6d43abf967ce5bed65a2b8c5a6badb5d63a43adb5b25496fab3d607611bb5858b1866944838c8b8c1861a5c62d0d0026386172c2f5864088dbe185eb8606971f22bff26d973bd6bc55522da54563a2bd14a65c5b158812352f97429de93a2a24b8141ab93ba5aad36141b4ae1804e3493558661595c71d4f5369b379adeac74b25c82711c37ab76dd4eafdcb59e5aed745c5d7b3c389e952cbd3cfd8a7f601f9806c6932739a236f045f250e325292710398308d198e5f14f1540982933fa3d57a0f1cf1ff0e57bfcd3e4a5793a9de6cc69d22471520c5040985943fa2dbfb0035f568e7869a6523a29275248729cc422091066e2e0f05b2ec1087cf1545e9aabd5cd6ab5922c8d5f0671d23f7e590308337564fc9637d0f8a50d7c5179fcd28797a64bfea8c12ff14b0f40182a73c36f09028d5ff6802f2b5e5ea2a7d3ce499a4ef2e4849344578030b4c686df8e058ddf5df025c5639a4a19491d49e9ac9c84b20484a13835fc762670e08b8a9497e86a65b352e1eef13b0b200cd571f9ed2ed0f8e4f1bb8c97e8eda1b1cb7dfcd0380a08536562fc8e6148025f422d2fd5d3c9757a9d768e3809c40408536b68f81d9ba0f1c71cf8521fd7540a2755047f54451c40988a1363d096df31071a7f0c025f4c1e7fa4f1525dad226a95c21f61fc8e23d0f86308204cd571c9114d32c36fb8058d7f0210c6cac419f8121f7f059c943d7e20e0cb7d17d7fac14bf624475486df9088c61f03616c4d8c413f95a22123fb41ff9b43c9a1bce5ed493a93f6fb66db63f8797c347edf2e77efbd17b66e77effd19feb9fcd77de95eee5e7db7873aee762fb7e9cf86f16f99468b75dc0c01eda7263dd9c31301b7553e1b738e4fcb1c8dc49c167693e6a7ed48684d47f67e822f3047b6bd63188661d826a15ca1a6f6f83f2548f4d0f8fd9444cba0e190134bfb4bfc9e3ae74469ec35ecb1df66f47df9f2b3aff7fdbedff70b458f157cc3f38e2d4b0002193b82b332b0bf1290e1237f1ada1f853a128386ad7a12c7ed4b1b9554018c619f7ab2c69328ff2c6cb18f7992c6731d77ad405b277cf440d292129398942ba9310cfb0861e42ac6b0db639ee7f1d19ef7d7362dae300d0d770e1a638c317e8c31c61f8c57f86938497b0ca3f1e700f1cf802fdb638c31c6f831fe64ac037f30fee00ff7f6f3bbe43f1f6f05c794e421a2e65a7215b9eb1f77777777777717611beef156601df8be0abe71dff31a0d9807b574777793bfcb1558874b77d8d2a961ab618b36a2a4f7f7fc9b1b20c231330f1135cd1280804bfa14a2a06d986b366cd3755a2db99a326eb5b7dd0fcec7eca1edcb2892c097ed4b9115066dddbe9ce874387944dbc87292f5c77f7f7bfeda8f46ecabe0184fdefef8c68efb9e77097842448d3304b023360e3454ee63af84c9db87809354eefd9b630d4a021e8093b7bf7932604b44c9d2499e3eca2a5410331a41a2edfb4324fe59087364dd80619808c3b00c53f1d88dcede5f236dd193ab4ea4a262a5cc9f8275cc77a902e7982e232bb2228be5268eeba49b222bb2dc84656dc696036a895cf6378c1d6d7fcb70b92c3931c730ec66d97b1e22dec0b2eebf2963cbf1cfbe0c1c33d29e1a709a8cad7af1e4d1e44d0f02fed9a79e041e005bec3b00c6b0b19325207c643fa7e5adae9b71e68426fab811064e7745980538697acb5bdef296b76ee8de96564f36bab4653a484fea979b65d80cbd73909fcb7fdd4b96b672a50424ed0ff4fb438f92e2e5e70061a096305a7e0a4fae7c641fc5830ef2a08fa09b78d0471c16c2f2b6c2d8cbdfdbcbdf5316715236c1315b1e226a9cf783b00eecb53c44f4967f767b8b8571e0e6ed9f650cc38698fa3e8f8fbe5f5fbb99123a7fc45134bc14519fcb7f9db5f6a5113a48993dd42173c663137183cb3264c101847b4ce228ef616344ee78bdde0c8775481a54444554444594fc5d1fea191c53676cd2fef7df751cb7f24881dbd15b517b2dda879b07ebb8efa8581353f0056afb18e7b8ef7b077e7f6fc97b39aeebde7a2bfaf08f6693abd5ca7fcb712646f3224df4e15171db4dda7e03608b852fbed1b08ff32a62a3e908843f033302c7496511d8625f7a721505ae7eb4ef5168c197fbf655a828299110b678de40b064eed73fab651d5adeb63f8763b2f71ddcb643cb91c63f1b7d60477071e6da8f3ea211ffec663fd23829b21a204b337ce47f73575c4e0bb622cbdf3a8813752c6f5f93b16526de0cae8b2cb98e8fec5b6bad4da2ad95d1d65aff9a8398f65d078b9199cb9b7ff661743921f5d869a3390f1b244104468cbcc0c87c2925037ef430e96cf3d080db1c948153081fcd87f8c605e29f66ab6709dc7695c6f1f9d8d327653f75ba2352753bbd8005d8f78fda6fa922ddc097f8f3b5f8031d7d90bfe5f85ce524958a63e46fb5e6cd135d5872151ccdbfd1f337367f97384beff0f7dfaef254761b0d56ff1aee790b01b765cb7774bcf4711269cf1b5d6f56264fe2244d06be6c46c017ec79ec6d0fdebca8339c496923814895101fa9ac7cc5fc7ab9112fe2381ee0b654e9d7cf974234e8c99744e22357b9e611c3a095f8a864e993e5bd5276f2ff83db07b82d5f42442d7fc8d64bcf49035fa0f69d2d91e0b6a49134aeca23e0e010234eaeda99d225f423ee2a7795bbea2666a4839812897f734f1f7abec3e9c3bf295fb0653e12dc0ef94248959e53b6e468b6769cb443aa84481cf8b244f5f6967794291ffca3f697291ce312887ff365969e2370db537afe7c9972928aabfc03e22415cf39f27c69e324f9f59fa51371a04d6f29c4a7ceb37422776d6e0ec45336dc7e0da7f19793e63b122715893498abfc9b9f792ec41431117012899376265ff2896c27f38a7d70b7eff107f40ff354b43c3dd2689f5147254bf2f59dab5ef2a38a9c9bc7dd6f6e807fd7bf19c4d4918607aa39ffa80eee9bf3c19d477dce4309c2d91d3c76c8ceff6ea89526747fb93cfd3b82d394735d9fc6c395ae73e2f07ac041bf86d37c834a1c4e82af02de3a82d283539b48f49c73c239e79c73d621477c7d1c4efa1e3a792fc75508b11470f3a585ac283a904416727c4c8c05aec35e829c8459f8735eecb518edfd9b81d07173d2023474ad416fbb6c17191d4a59e51034c8726784516f38347cbb353d3d4f995d622210333c0accd03bb4c7af45896346c6f907fa93161ddfa484fbcf4358c27014dff320ccbb776e2f3ded4fbc1d4389b73b4f0e22e6ce1301314343fa2981db3027ca382901fed1efbfebe05f15983fffe8bb58b1433a4a158c417fb7502ccbbe7781c83ffa54251fca972a19efcd892af28398580f4464594562a1a77ef21384fb5a1b2efed924afc989a3ed4f6c63de649b866d788070d65a6be5f45865ad1efbdffc0be244dd7eb751f38da8bb9bd9a804ccd70b9b9eaa9a110000009315000030140c074462b168988599a8e70714800c8198486e5a1bca932cc8910c52c6104500210000000000002032533500819a244309f0b0544a8228b654742aa02111537474b1f8cc7d87a84e3f0b706ccc1b0ec8bac01862ec4bfff4b149263911ab7ff72b095e2e26069fb6246d7e3615134e2563e6b5929604659317243dee2c2e70eef5dd0ac7cb84340287a63b95e2a1a69d15107e28380e926a0e910ca11a98142789cd5129cdda890318f638102f08e1b360ea2eb5bdbc5e8d7ac0c362282b796f07dceea2d9929983ea304a7489b13d24af6ff352b0a749d31e80208f94777f58aef39dab5e13a5814d9b742de55126bb9d4be05d232439a4105c1f83c9300150c118613dab975dbbf1642352489b48e02fbae7cbe7132d1e575db8a266fef2c1d0e4000d697134618c79bb871b22a194563f3a71faf51e8436070c82026030a57e7c510600d2a24276b340b241eeef29a5d314db36ad5d204c377b1e0ea011b65d14ed90df295ed3291bc72f2be5c064841729b8f78575252132c59a2fe42894cfddca1f9f2c4a0eb1dbdeb41c9e61413c1a521bf2ed68a8ed0a3478cf6832b99a1beaf10826fcbf986af4377555335e70a67978cba2474ed0c09e05125180040622edb80781d833373a2feb62795183f75f4c6d6613e4f754ed1a2e69aa7548b52e6e74fe28783bb410f7efb0fe8f3d3bf3e5097d195de649350e5b7a9f896f6e56ffabccf2dd5ab8df42710123f54671292e76f4e5f9e2bd8b70bfb4183c6a15afc12cb60b938faca6de3a23bab5579f985a805aa1864db5fc9458b20989b9acc1013da97883f0404842c4341e45dae4cee3068bba8b63818b2a7a95166cad62fa632e2de189ad9fb084ff7fc4fa53c3e0373baba6f73148d3cdfc554292d5c86943e8992de446170f0797afa5161f668a8a57cf032d5e6b24aeb6c8711a8cf98e0fcdf154a432283b3d9f15d85b46016215238c562a76ad46145d5fd44f2a54b487ae8bb57051b960d90a8ebe7b223a3956eca62195c276ddc1a072a8536d97b09394a10ddfd28aa4e07a145f9f2e8febda44b1c6f62773756faac7155afdbc67748bd6ff43449eb927e024ba24fcf9445f1de672deff6168493019659f924de818041434a8f1648258841d7fec148adff8749f97fbe7717533a1c6bbfe25aedb89ce5d29335b896552113e191bbf3bed247bc5a5d6f317f0a269fe523dececbdab59cf735abbf51ae49e00274de880f71637501ea5faf696975598a1d264d7a5420255852f36b810e0b17deeb8814d8865850508e993bc6ccd0fc89b10686ee9af20de60164d5bf7a233cc1e48f5352d28b5d2cfe7774b08d2cf3d69791d2c4c71b442822e180b0b5a069341590593202a459315aa9968499c961d3dc17491131ad63ce6e39eefeaba6fe7517d5db7d3524fd7ace008f6a8da40ea9af23bec791938b3a076d2b0d629e413569843e2bb8c99c54732e26ae61b34d0782ed03f2218977331d199c2e7e449d8ea77730f9efacfdd91881bd1170d70e4d542b7a213b975036e4e0a6d748efacc850a430b85443f761e1e15876ef8fa0dbf60ab544b951b3bc692d503caf38fe2aa9a3059ae1cf0c9ddf485fcaa1ac78dfa7e5ad2ac920ebdc85165f456556c66d08ac494a7ea53e38d111bf9b9c5a934055e6cbc8d5d5561fc44253330d288ba659efb1a233199520ac2f2142f1851c3b21ee04f48c3d56982f37efdfe96fff2217d2cad37e5287bcb3aab7081dd3eae45e6e5ba49fb02fc873d8bb21d90f070746efcc0ec63cb3cc967f8b7c032af7c198ecec73413cba1a31d29531db804046969e14aedc460faf7ac8e91c3aefa465a4f221660cef8e58b7d4481b8ea6e0429c31784b6e5da6d830072b0e316606b3bbd5108c0f285dbce02204b56001eff7b8637eda032a28972151d529170f87f2b4256565d5a3e2cc7ed4248f27f705d5ffc858ea7494a57f367d604cf5f32adca17f3c9bff0629063de91d4b8ab542c841bd0fdceb9c06a65d10e59cb255d80322e17c4200092847c81a2e0b302e472fff917a541030d0cb806b85d370f547c5ee1dfa2dfc344c02b70e4ed068d127d74f59098f3bf98d1d240fdf43e92d6f73416916e2de45eb1b1a7836f02a9bad7a46ed12a189a5b781e79d43f5dff95fa3bac4420fd5461c23e2865af7d609a331b537481ba416c7ad7276f9452b5a970503c7f024d2668a60340268962daa07a456f489666c1f665ba74858fc82c44806d66aa8fb40108faeec422034267573c720f9f52c78004d35c99be3e1f6bc814581122488c96d7b9f03f8785525327f7c0f01cdb58715b970484fa3591a35cb3094d6f18bc4548d1d065490d93b12f920acee93d91d78b864fd25673ccb245b9d2a88ce5e6af30decdccb3a7013031576f017e0ed583905cfa143a5e14f6cf470b0142a59810f5359ae9601834ba0b57d0d92711cf983c8c9ed05b9665a6254201b9ac1279874c174702313c23f59e14cfb116a472d22df611b53e76868e54aff6c05eb4c5e7357f4af0eaffee8fb3c887274d9513341238180bfcc949d3695ceaafa38bb6c913586e783df4b49a9a5204af522428ef95a766b8e4bc1f37fb575ce2fa8bac6e35277782d2937e3876e0bb929e1910f3ee7ad7a3d079a2013be0956616fd131aa17a312fec92dbb3177df88a0c4698a7454688ecd8ab13355381924a454a07561d959bf88c1c6a156b2ad4ff617dab32142af4520e451016e0ef28a3e7d6997570343a4f398a9d4c626b45286e1005a089cbefac12bedafaebc0c5bd008abc118efca299d2085c9b61919acb8d811623d8f1d0cff3b0cee5e9bfca2c8c5008b048aace79ae515bb047d76456f8792d7a5af57bfd9371262e0ebe6e0f127e3d2f09856546063dfedffb5151cdb33c5b5f9dc0268f4101722fc9dfa5a4c720a38410a60dc94db6685eee8b48d905de8052dd38afe8c536ab028b76963ae8cd082da9f405a22add4efc5f04c010c56c35863a56a75ae7794b6c0c215b4008836ca7649540c0ea3bf8e815635bfc8f9fb3127155e1ae6ab8f55709fdc65cf264876989e9b1084abc8ad067321afa4b9d924094d0d7f2d7a89ef49c09b8cf3a41db76514b0158ef9a915000f380a0fe6bc28a0cce7a2d71ad60ed7697114cf25292307eeefc88f19c49ba2238dede05e204f3e58ce8719dcb90278a3fe64820889bd0fdfb6fcb4e894311525b6ee01282e1cd8d05f7800b7453281b28b9fc0b04830c99fd560539d866429aa56056485e5eea23f31b477aa42efbd2ff0f43c6812975e09821e7edafb01d40504e378cccfca62502d6ac415cee73c2fb265a3c43cae973e5b6b402ca61d94d89535caba5c235885295545ecc685a0cfedbef5bffeec15ab205f7d00be97ff454b7875071d6af013aa8efa14c33da187e4b12441272cc1484e6f6923a0fadaab878bb1c182e8448700eb2acb301681bde998aab4e8233786695793dc4ac3bdfe73936de3f58f44b004a3392b15d550abb32a2dc98d1f82e5e7272e8d405c93c0f2efc2987822254d2022f6f5b692d2d7652edeb68cf4aaf117813706e29204053a476e518673827e13d20f59c4fb38e353a93878ff2e47e89f1798ee89d6fe612d1f9d0b8eea89cf3467594a7245b09ac7b69b1baecc05bf017cf5485f7308d2185b8de092a8ca5206677f70721b6f82a48ca238b6b95e21fb815a9b86852f0edbb0dffc2c8ce67325264771520445989fb5599ad1182187781aca19b3deed910c4f6ad607e56342f92326d684a349f626337713f1db79212ddc9b25705cb8348cf3d7a42e9c58ce55b0afc17ebf15e6dbfcbd8e0a5b37157572f39473bf7f1878114110bd029b7f7ade49955f09a1c86702ec1601439262bcad592bf6b50c9f081909510006ee4de0a0b52f81206d18e359757acb1205e09815eca89cb39ec8bdba38bfafff338b002e3b37545315e0ae20b0024825199c79e28299f41a43de1117a8917665d1e5f3a3274025a24ce74ba310d66ed2a43972e8283854351485ff0a0938cbd9a96fb9ff6c17179093be36ff6cd683d62aee8caefb05d9d08efb617299dc04658775d836b9fc0209abf4d2e1772e52d20660387082bd5ebb36ad19ea0fe372c323eb09cb77ca2159bcb56b7f0d08413181be048107e662b56a7c1d168ca9701a6cf4ad488da7f61050b3740889700ba8e8dd8bd1a1f9c2aba19f2c9242af4174815281f89726b0adc014a2ba94ecd676557fda6002903237b23bc3eef14d5e341448d01a02f7d72a2aa0f6155ddeb2aa5a83d3488aea5e06a460a0e35176d1a00f23c23e19695d6c63570bd120c30b48837e5f72a18438c8c661f83a34f048f788c9a861bbfd3ebdf3e2b106d1420b0359aba688313a908a83a44fb63e2598fa707391759ace116dc948dfb498bb41e51aa651897ac6a09377400ccfd33f3bf8ab22f7372282570f3d781f9102676698d9f1ee59fb8252d886acbd9b14294940eef263c888c42525fb2a5a633062eb58a5a8fa4726e9ca7202df94c8a304394d005dc398f1133d20334cd9ce73c643f5cdd561177997c2ea6b3d0c96691eee967f2a2edbda9bb53493f63ce9e3f8b13e189683e2d2021e79ca7456da6fc3a4faa67f43b2d6fd672201cb4b86e578d751b246cb283977677df12c0d91787eb589ca478393a8c0e861544b693b80eae747eadbefe80a0a2b121f848371bf2b4889105947ee4dbebf8995d9d30ee2b0b237e57517086edf941632f167e916b911149d1c5af3c405fe64a38934ae9f70da8ef6ca297af28d36255f410b825065b86e675ecb79a6a7d062ef055fafd4c50e0a3a5f43cd84e4d6a72f815ca1badafe370923c5e2ed8f7b612c139b8ce089b000a58bee0783914cf478d1578a6ea1d0e25f8d73855a0440c90ccd238684e327256c2664702a9a1e179e3bfcc8cefc8e3361ddfc91866e230f7fad0a2679e76133c19064bfed97ceeb50af7223e7f264a887cd761c89cfd3d2027a4fa89f8f49aacd55c2ee1087ff30adf76004b8734beb512afe4189b37f92382732a69e6257588f2d1029eef8b1e15c6d3bba9308c01b9a03dd454c9419cdf3dc24f0c88a1f09360be7313ff09a3040e6bdc4177f183b661c77fa9dcf7bb06909a263164ab33c8075b098ba6e1ca135560b37dc458ee07558e49c5e4175a74ac0902aa1ab6c8bca5392029288d151b2cbc34e996c01462622f2ce953ea5828f38ece978bb664620c2a57d0560cc153203802dbd157e5b25f71e153a3d955c9b45f51178d606507e5a1ce27783d92e63b2af5b0eb75ab3b63a8d08dc2d322c552fae26d55db2c67ca2e3b7d12290a74374b0f9e0ad77ad481e846eac63c4ed17c2c0cb98d662c1e0befdc447437088e5c1a83c183a82adc00317661b6e39d15a4a24fd010534f2528522dc4a65539de27f5e2584dd4e04b734ec2c12862b21a06927f65c21f551e693eaa4203a432d0f91358c21cd50d06c0f7dbc8a2af5e033cfbbdf375b22025d573be9f54c822c456f327949480b3c8f88edd29a41110c94b2f488cb83abf088da3f1e1287465264243cce840e3b52e76ca9600b344d21e78286aa6a7319b5c480b2ef327f8394cbc5416c40ac7c6c57f23ffc28ad01d3b151a0d969412cb075109684ba822e1e7f462038b50cbcbb97a1502ca4abfab9231cb4c502a35747598c094d45df41b81f369c52d2f89571d209a7f9440cd2c28198a8c6c45603edb133bce1a26d9da7e8c11ff121f2f18fa8738d21ff6806771b921a6bdb93f13e8fa6dc53121abb504145136d5d0d8164ec1ac7488ebfcbcb5afc31e3ac31ca761b48eb5d2a13eb4d25f19addc2bd4b73df90682b82be17c8c114a95325ae50657af626c4dc3ddf7c622ebbe5e8376441ca7965d56a4fbf9d45c9fe3ddfdaf34300bc56a7676e4e0efe70538ec7feca37ee7e8ec485f6a8b38a037ec4d6747fc6d459d0f5d04f6a4f0a27f983e19360567883450e8c33eefe3e8c5c62147019db0035683810e803d8f5eb31aefe4d5635eb29806babeefab61cac133e6e37b038d624df6443a041c27a4d9ec2926d8c75cde562a1cbe60fe3741db07ab52d3a9222b428037b97527c02e50e3bc4f796c06372eab17abfe116795e7669b85477ffa0aac31e82865f3a0ce1a3fc765b78c60c0c3ba28237f70c2a77c2292d3c70f5fb893db8cc2f5aa67e9322731b33336d6fa290204f0f9535a8fee5400d076bc5096d0da63338d782d22f6a4a14dc5140572e04b6623c32b1ba964868e0b299c0e8f8841e392a44224f9a9dae394abc059c0dfe15352920f984563d0057830f9921ffc8cd834f0479bc760b4004ff4f8d8523f5addbd42f798b750a532bb2085d04c53efabd4e8a024435fb98ff55728495394fba951b630ee0b0026ec3710951f52563f88e81c20934a32b4fb89f8388ae0f18c27663666af1f65d92b53944a70f3c0b985eca876963f7f109b8ca00fb825de47f993c6f8e3e03e8ad62b95cea7fcb33509df4759e28eeaa48c5bb9e7ec9abb08706a69b0c4e0b51632c9baace8b308b50975f5c268d2ff24518a7e81f5548e7da5d2ffa361dce1bf3b7c14b96072921c51fbc3794bcfc3a315ea4ba3b11f887ba43dca423494bd9db9cac09bb8a74832f2a6cdcc67afb836704ad115d0eeba56007059482048e9f71302e3924a599ac10776604e81e420740b0de29603c749a11c2b4d3c40eb223fc98330e439aa2db4b235a4c73263bd08c5fdda408adc274d471ac66507c048917883940e95f3604f344735beaa8a1b0ad76404fa15f4f969736f8e3012485c9d9e66a41801cf10a25988aead275533aa554d2e05041f2fb45129cbf3ccc538a937b7f2fcf4f55445dc1ce275bfa06c8ba979c3f96659dcb6adf90cd99fb135c8fb446ba10b5f742522eaac52c5c9478a86bd994070d9d41008b977cbd0e50496f86eef771c5f1c1a9cb1ef241d6f32fac44fcb51c226d4661e7c7f0f3629f36840cc5a5e75069d611985f188446ddbe759e41e4bc6daf5e9f0394b03db7c30822d00a4b8d44ef938de0588ecf56825515eae67ef240ee390511fb3ed97a3e128a2524946f7700ee336e575ec8c0892d49861949057b66b6c00ae72c97394932fa0d3625c6d1f4913ad4d9900a6e15366284dd4bbdd19af79d6521c1704d371226482e96dd2eaaaf8e950121f894f4c6d3029a0dbcb9bb388b165ad3e8c2943c51d9236e15def4589972d8ebac39fa795e702f702d7e0f0e880b171680031b0e80031b0e800b1587400b158340031583400b158f4590bde15fd437345fbd05cd13ff42bba0ffd8ae643bba2f9d0ade81eba2afa77d30311fb257a45fde072cdf6c8c6c4d1f9aca8b147c9cac6a86da5485f8e9a521aa117763b6c6f9c83ae907566ff2f988cb923d8f2f8a9b8d7905119b5b729bdd4fdb7e1275f3aa6090e3653812a562f657a2c60b4e761a52bb619e78ea6632a8c1a1376a7bacfe4516c71aab351184a70d2afd08acaa8ab3ffeee6f306c462540107d527195f57312bbbd55e6d37d24f3e4fb7bc98701d4a3c4c829bff0d2489a2e26e8babbce10d63c58b4623a250a8dbd4e3e3a99bd00d7f7d86ba14ead8cdd7a89074340487ca9067f3f6bb42ee5f01e18681ba3a5dc4aff33a5c952b260c70a068059295caefa17cd5ff8e09f8d56a51eb54f03ba69a4d4b4f2bf061621b260c160441c68de93928018000202477c7f365a93fad53e1ad06d49653941b0d4b22cb39f355e92f2bc67a007b07a7d718c945b093f339321380f06011164ac0f1ea369703e6a87ab3e8bf316fe993c6375199380dfc9515b273117ff2bdc322a63d470393ba21863abbd32ff6b79c434fd29bb4ec8b74d1b304db3e46eebb9994d8e02bbf43a02b12060cbbe9fbae5c834b4f167257ae7ce133d08129c35354a654d5034a2406d0b6de293216c0532af6d448d682851b473df87caa79e23e1c276bda47c0fb82946bb0f17eb84be621c74f88c5368da2455df31ae548089b70a8838cd4d39f661de1b6c1eae10d1827134effb3d4974453985856e0795c692ea7f86231666e2cc2242c377f71a9e649d4539d702f605dccc4ceb7f06c616b6c2453d0498a2c92eef6136c6c5db396b630f9cf9b4ec0f5bede273bdb842cce8b4f7253de6de0ba82200836bffb2676007b312f180519ec240bd8fad0d0103b9890be87e0d0eca0441ae568149be65c1ee9491ac03c3a93703423258bf937dac88c062456ddc4da57aeee35343f6a0643cf9944514038abe1b4e701445be53224222344c5428e672786ff89635a10fa986a98bab6162a0ef87782d12f28e5f68dd0ffa9b5d18260d781ad2aead97776356d0b10a1a3fbb218d52b968c09af444a5962f950778f910597d657bb5f7a0e667466f90790442bf3ba1c9d5bbacab9aa696ea4eaabd04d004f7aff61fd76a066d16713d385887de60fd5f23a5f7273e57f710de558d5bc0307b525cb889e7d062daed7106375f212e716c20ba3398a5f71bb73417ef3c19eba548a6afb54c10bb151760cd03209976b207618de25820c5813a2188e6f64e5fafdf7a9b9b56ba2faabece53b113f08c24be95ff23e4ef65b9bdf847c240570084951aeeff73e6db3461bace5c8d19b6d301f06d1bf339d582107147d41d36e8a51e01c5afe116dfb1ab35f7e3141c23eb5a737f9d3ea7934d31a61617fb3b6236b343af31074053478135ba2dfc6b39c86eb35b0249451e8089e4debe54b7594740917bec6cf2b2828f5a97628845bff531151373c09b6d35da6a16ac2540d3694c4b4f27c6b78db9d546834fca923b32160050d84c239ce968ce6adb620a2e92e00c5ca74e8133e66daad2422ec019759320c50889d0737f33f859ef9deafcb4d413eece92f24f48bfcfa9c6cfdd2710f320ba64d1523832b84029607c7c6eefe5653f97c4fbb92072a742823be40c21a4153544d496c8824925627e7fc5fcd365329f232e519c18ec2db9b0cae1cee02dd53a109d89ebc808b58d7ff9acac876af304218b80241dc31044a13992649b5f31fe6c83f27d9e251bb823d75074fec710840f1ecdb4a10af65a5c7a2f9cf5e3a000f8630445000883e8e79000408ff928bb4458ce40703fb9a1f77dff9b517e345b7e8b787d2a84202ce78d84409e09c60d05980115002992c5e107e74624cb1574f7b02b4129871e9130a24d2158233df256de689344f4d8c61a2807301f31821832683e4d2a4dff5d87a2335212e2c4c18572a1b6235e8636e9486803c86890462bd2572864477c57c144ebce2ef9c844ef3ab5127c140d262f6b72b47c8834f1c8c39d3a209e0432e667f046d1c08f862e79efd9f65ef25922db1760120e21804e384e1d79ef43e6058b3f99e750cd7ae8585f488024161287d0bdab14d8078dd3da9fcf4337672c7879ff6ea40aa30311c382a75898f8c387a758e18d7a86f1c2f701869ea73c090071e15ef4dc2862daf03f968efb19b28e4f51c062654b7556953cd3a8d4dc6b62f2309309d8b4ff3796ff77000ca4a28a87c468b18f4449401737fbf77dd6c210c3a13ca3134e51303a073ed5a7d320a2780f1dc85c6b311d2bb6e5c00fac505b0c061c6034e43afe12400a7fa594b7a1bcf066a37ad95a7ef9fadd654ce1189131856346c6168e8d8c59382632e6e45803f5d65bef62bd81ebc958efe30fbf33e68fe632e68f7619fb8f7619e38f7619e38f7619233ff6c57af0d6c3b25e1fae7c26d95244a1f14f936ae353055bad0fb02d76d804a805bae3baab2332ece14cde842afeeafc72f20444afac76e9ab9165d9046249a45b7e98e349f5c6e1733cd2337e618eb718b677eac8780065e34a3bdb17d99803eb7b505b9102d1346a2dda47e1cd08236fdec6c2d190a95a1bf2a22407c94a38f58609eb346a9c5aa2f143ca784dfd4f305e4bfd4e3986000366b1e6fd4d867ee34d2839dbb319a95723e7072f8381a9d9f846ef2dbe82f58beea016ee8a7565488de33021af729ab38fb14b281bfa023a9d08f54737b440e46cdbbce0288be95018c8a2b43b7b39c65811d2deab0a935b55b0f054731ad293894fb8f020886c9a809651fae3d4069e795e9978635642e6bd051d57d730dcfa893f5d959f05351e2e135ca2dae5a9e193300482455f478442e289190658e8d8dc139c268ca12c5b4e6354b2a0befe902885f31f7254835731b509a5243114c4be676866dc45b3b237d09f3251ac32c892b40398094f4577052ba06b2952340eea3ef71a47436c2eac4de6e5e86cbd4dc11c223fda5d5d8bf28e577c03ed17af29cae15d675cbcdee09acc8908b6a3629ef115032ccdfee3eabb3b2a2caa5420b97cdc8934ac89c76482ae76e2ad1eada3ef8bb865fd1038a6a219ef828ab8d5979d963d6c1b5cee84821c1f69e0c10d0b152682be681f0299390f4c58c410418c88c4884b8c984488258648229668f010cd6e17bff786eb9b4f3efaeedbd7be7decd3c7be7d2f5f4cf9ccbe12e2a7d51fa032c8201a233bef4ab95bf3a017de961b921f2f604411ae5d7e47a315edaed420b78941185014f3078028094eaff37eec9f78707789737ac1a6419bede2203d1ea38d8ae94d8cecea1c977c71d35a4a55201bac92cbccc29c7ce874a24a68a508c1787581b17a0773afab796b9b199ebf10f41db68cd7a554b15a756f2374d59467a5ba5c1f22ecc6544369b1ca5e46e8d634d55a71f93e22dacdb4aac2ea5a3d46d2a5699e552137a8dc222c7d8da48b4965b5d5eb7a8da04b13440bd525fb886837d3aa0aab6bf5184997a679568523f415c92e3339544e9d7aa4ba6c74be22da6d26544e5e7bace46515c511f63e14c4a081d7b431d2c625071574e0f48f63eebac6a251794a2b23736063498e644bb1074303291c3a0c545ee3b9d1cf207469544665b7b5b9bc13747fb221fbde466747b3d1d8d16cb476741bdd8ed646b3a3b5d1ece86df43bb75f1095c169f7e264d0aee4852c1acacb02bc80359467b2883ff130eedffe06633de5b932d1292b8607589df24cbb07474167681b2212eaf207224404a3b2e9e047cdde863269c5989222637cf02531af31c8cac55c9ca31578be9277e6ac2894258fb0dc69753ce50a7529c9117352999e7261093956493fdc6451cf464b7d481c2290208554d021d8de43d9a531e6dc0207e0c6c923d65336cd84150f8d1b9eac3f1e5e5cb6a8d94c35d9f70b98c543fe405eedb9c2fa29e7d60d066f00339f2cee324173f87cae8c90e19f30615e4d06f0267f0da91a0d7e423439ffe95e989311324edd4039d81ebc0301209e0e31f7739b2beee8a025dc0a13535f8ee712e0c72c80a7846f2fab694603370bac5057d734a16224dd7ec7ec1b1e67d63f576ec3ca6cf49fa47fc9c60dfdc986d68ae6015ecbf5117e2ed1aea176cb341fb58967217c15f5e3c07c60980bfd39e8a09bc9ccb8bbde50e2cc9de7f9af0d64d4e18f5dbe50ce883b0a5d36e996a8a730145d79230689c4b75a6aabf658a41779723d79c9d83ebe5012cd572f1b60c2ce5e40e5feee64c572c1791c7b26d49f7bde89ffc748abf064deac728b130f0af563e31255e1477442ccbab89a214e5fd0f582b652c4975fd6dd9874fee0215eed7a1b6704a7fcd3f577b57ef39d13b3d2af24edf55f519cad78e3bcbf5271de1ad36eedea07a8b1f4c8d8c8201e597c31336ad7b657e92296b31f29672f412cc48cef97d8fab31bf95dc6e19127c4659fc6efa5b851019e40ec3ceea9e2569cc6c67e6094d45cc96bb2e029ac0db28b50c98c6867b2afb7c0c48b6d3f7760cae3f721a654d34876189d789ab01cb928e087f8a7a330a82e8e1f224ce7eddd895935658e4bcd87ebd6c6d909e31315cc3e25697827a0b708380d18979519448d68357e5b38b619fc30f8d3c958e09053ed1f4055b6ed727dafa4d721212a51692b7a8b1b383c3a1fbe8e2b9634f662b0f04b84a7b628c0d08d4546a502f8ee15c591bb5abe97102d136259ffb0eebccfcbfe1d1155029149b34f6b3a346d9d2ccab17ee3a661efe9d2dc792677b7592068f4cfc27fce7767a09794a8385005e01f10310b5fab0fe412a2654424ea1b9694f7ef2610e85c7c131130ce5a57aed95dff3798183cc50459fdeee31cacb7046964eceb92d0729bad1dfe9a7f2a7d097b33a28605e8ddda6e5fec39af856304c6888f31720cac983ddc7ded5ed5e720e2ffcb95e20b6fe5c333b4cfea3e3e0f5b2fc89bae17123c34710da51a38a00e57e51ea110deb4c031b0ecba9b38acdecce40d3cc24301d4844b11ced61cc1de2cd11a2474f43ba162eb57d75eaba502ec5f17ba215d875c256b9d1d67eda2b25d3e4055f4b7bda276f4649e6ac1f0fdcfd19007726cee0c47f63e51f9ffde418213009e6e3744a5659ccf0221f8db9b6658b89e9ec0acd7b6bc5c582409f80cf37a7825a833225a987d28a79bff1fead648e78fbe366b879341a651613123d844fcb7d8d89baf24095b6471d126845001074491126289d0081c09adf652aaac9870810712443ee0b1370038fbb528f441b2bf81d1737f7f33fb6bfa6fe3a883cb650a51a6d1eb2092b656bd3c88898854bbb454243a44598890c3490d81263f57497ff99987c49050e06f50f92933fc5455305349566cb19e440ba4479a665d0d2be25f1034b0d708e42253eed9edd90353f3ac2323553f4aac83b0fbbf7117d0b22915ba98b90d808a0d121d6603592ecc527933488cfab5b1ab9d862e870a7fa77b598f53f8064a48ea5afacdd86cb702cff847399edb5fa8344388e7ff5b08cd5261e9db2230ab3e18d3493dbb9c58bdda20f6d17e9a50b97e6a8958da277f0d09669a9b38043398a9c43ac9022f7ad7d414bd290bc3e66acf997f3a7366a26233134f84441444e4730dcd271db06090e1128c188bcbd4672d87410ffc005bbb0c6a275496fd14945db621723b2d1e3489296ae4678962588fa68c1722314f5852001ee2e1d7f25320c289128666ea48668fdbc18d11f1ce34294f627223ad3db92d180b8ca10775a33d9f5e2b4e85e74cd8ed7c7b3ffdd78fb28b9e316076e05538679264e2e10288208ce23db945e74e3eabdb2f8383dfa67dd89878794a8c742bc5ea428afebe1981e022490d171e7037501a784f315f8e86b11f6eb911537bb60fe91f2488a1e2ca508c9fafb6385ac894ebb8692f3629d6ba8e61fe924a06880bd87415a52e33b1bba204ff306143e25307fcc65fdf33463acc4ed2e3a1d3b11a5af52a1d45dba0c29d7ae6b72f07882f73e5a590ad85130b9fe14fc39f48b552fda86b625649f01a4326053473fffbc850d5959ad5ad0863dadb32bbe0db19ac796b81ac5712b59579593272047281e619dd7ead4cc73e16180a78e4441efc615a0ec9c388cc70b3807fa2b72912f611873f4beefeec88fb26a6718a6ef6692055b831127420884fdecb016da25c6e5aa491fd60001891c71c90a62d8a26a964a7d710be5746f298c365590118e9790b615d8b7e63f61d99b9f83dcb7d0a7ec00172bf8ffb930b616a4dce08877e73f4b0246059ea078af3672344ce31a6198476f127e0b94f5e69c6fdc6aedc31febd34ba31e6d260cff4ebb3140cc7bfb5b6c646e6e3f5fc61d8a292b1ff60936dd9c2a6df5cc891e5c298630740497ab3a3c612bd3ab05687d6a8998b4818774d590df767a540d526899f924fa06a87090f6119128633ffec8c09908fd183db9ccc10b5118077082d850a598b0bd6708acca0d821cc08e6dc9b8eb4744485aa70bb268f05d3a0e3c1e8a1d2e305d050c0ceae0e8b23d00aa374e09e89d84abfdef0c1233eb40234aa5a306988e3f06388e0e8e3353e424d6952dcfb9261f79869b507e868771fc2b649dd1abf584e0f1fb71079ae41b453329352c8e1278bd99d1cf72d5c612f22a75d5eb32bfcc0577e2aaf5a73d6bdc87f4b0bde289667791e30d78a7b2c28ceb0bcdbf384c0022dd4ac417cec2c07ce982e7c9d28679438d947362cc2c9ca3dca9e8292780cf9239db0caf438bc193044a629a0d09fdc131a777885f6da2d00bdb7f548196f994b3154396087699140b6b56dbcd6344010049e89e501bd651590612b3ff23409d91c1cf76288e27b5877bff25232e0dda8842e01544da3001dee48004698a974aea21e341e3dcd3d7832b86c1806e4e319fbd130742661f758675e8244ece4550cf6e57986599576da09a7fb7c86eb010c03738d796bb7b8e02cee7a217878e798d6932dfcb06a22fbaf779a7d236d9198391c77012a899d9dbd4b523183ab55ef3292ad1a21eb90c5c889ffe4054efffd5e194fa06d54f18e7a1fa7951ec68c1659ca6e46bc5a93e005603d7a7f290a6616297e0339e2c2e6100a0c44a2ecf350cd5382073ebacc4f8dacfdc99ddde3e44712ba4a2442223269ea749cdcd2db41703a85a6769471772e6248bff3e0d41d8e7a9360c65b181f7bb0ff9d8f3595bc57d90d2172da3fb755f20acb3fc594bb3e65f9bc4577d988cc964d4b651dc283f3e4ce941d1153ea9233daf558c78aa9d611c53806217ab8f57e676a4fbb81d9f525c611e2f42b7c33d9856ce4dea374d56ae60c4667d727841580e712b73c5cc9fca6c48554a2c0e0e4f1a2a98264409d63838309e2d3295809d59ff214137e16edfdb6ab9f7c6ed0d0a78e1581bd664d4b2af821f8065c359eb6f08ba4da9c4d726be3ce566c876c6cb2d1dbad12cd22e7d7e9ab66c4010e2cf2a06d123f26f059352e25c727613325ba9ac092bc01ae76da6953b198c2d8157444813475c36510b32d587ba029d013b37d4c8079282d0a2e26a43292858c07a1a70062d213dbca8530f9e6eacba8504b4064abbb6818cef59b151e681a8123aae4fb20e634dd872692ea3378003d9e2290cd88ae9061160bc07ed8218d53fa173875fba2f6aca8ac23a966da8df6e1e73a30a4b18f1f315d3f0d436fd63d8f84eb42304effe1b487ba009807d74580060201a4a201d6f17123b5a525d1e6672dcf00948aa53bb852b17bc0e4e4184b476e2aa480594f9cbc1fb9f219a27919868bcf9d4a95691d0c60e5ab447d8f91f1b9384b04e4429e54c0af0652a200d4a6414d965fced59a60205809b3ad64113a7b83ec1f473c9cbdb95b0f3e556a24a6affeeb8aff7878960eeb3fcfc4ad1370bb0cca277af783d463d875ae11d12296dd129402ed3410f409e51c619a41cf8a56d4f178767938e56aac3084ac056dee0bd437059bc9131b48004dc1ed3b0d00cebfdde8b81f76b6797ea9732958e78768e782c0e43fb2e5c49448b62e1c91df5d1ef4e808161a716838563fa22d7651fe7b1cd3b12219702669af3b419034565ec2a21fd746c8ae9be4f9e30147cc221c0ba0ae2466a15fe65b73d391a3bdb7919c0326dd1cf7bc91b547063cebba25764cb0a79093e31d2e7927208314d5f5454db84310a369564da1897a8126622ba0596e262286d29c74a2db85be41e855f7feb7e23a879edfe715c3a1715a125827508931469019a345ec126e4b55da021afd4ba0d835dd76b3669c0b03a93c0188f49d3b1125b5d7f939b9377cbc5809db71b8414f9333b7c5355bd3a659290966d028c6049c7f983a5254b1cce27ee842a24bfe98d59e51ed35c20feec2487b730cbdf005c87d29212dc2b5e2e33dab26d6742f914267c2850c12260704391bf7bfd131dac87b770daad48e5ce91ba7378af526f2bb7d41e9762dccbe110f97651c6e2bb38f5ee4072c28b55e0e2650552660f524d8bd6a830088687b5bd21efa8dc112430b45d228b3c1c6358848c4a7a017c4015e97b839b324336a29cbfe4771587776d7e5ab6723e20d83e1a9154d6499fcd4b221b8cb280cd15bb067f5e527a23c019de3a40e227fea9c9fde6d8a8ad92a49b831c20934a0b2a7a839ed020274905cd25223eaa338dfe58850a1dafddbae18095bbb9400159fbc379ea643a7dda77a651c82af913dbcff48e584a03b05419f569654696bc02d40876eb3d161eb8b8700ab740d17ab50c87d90d3c0aa660afc7d1365913e899e544f9c35e37acaeedc16bfaba30aba17eb321590335b7ab66c571c2db21b967b374a10ccc2861778fb171605d5bb3be588fa0b62a098c2971545d07682cd7d4a116e6bf36bbb7be3f82d04650dcd4851b696f3ce8d3d1b0b1dea6be40870c5479e4cf27097925b52e8fbd0fdc512eb233be5c1516a0c9d97756cab99e3dad216b37d1d9fd3b689cb4b078531f56bd7b95a23e4a527c3900cfe955fb100b26369dc26bc4f82a0105cc4182bdf6e263e41ce9e127b6fa3e963a4e208487406b43ad06a06809bc60e28fec171c5bd0262236b0afd30841d7c73000ef1fc0d25040b3c49c4a93cb1db7fc8e86aa9e7ad238b2d21ace79dd221bcf29cc24ecb991152191373455613600db31a94fb7db0bccb2d3c71e394c2d87fcb990698f9290a4b52a829b0af799e8737b069145e1aa288ee81a0490f0382aa1fb07120bb68cc2fb5d4dcaa16f8f52590625bc828f2b8ab8106ca761128eb0baada1654274d40687b021d3d03f77e28978319594d1268313f3ddb246badf387a4cca56d157f14856a31e38fef3ac8f1ac7a8a7e732a5d1d9af0778928b3d899f33101066c092222e18c728d793f883076c7ecf109222c4c60c12d96d0b47626a02936fcdab001df26a236bedb708f483c1f030eb2a9b730d62970f8f714e91d1004370f2f2d68a3a5932d655604d0e2884414ca0389feded2b4e52fc23f03e2919443143f3c97ce99570610373c1a9756d4b290878efe4b09e612a906bd7c5902835a0b128d2103bafef229b22ba1586843158c2e4a2f7631f53c43060a336fc3fabaf2100ec45bf493ce9f3f122e3b71283939f44521fcfcec41af0ec17208e62565b669ccee36cc677f416e0dc9623248c815a505e3c9020a55025264c8db9dcf4b0b26abf17a8a0a918651e69d3f121f41ecba69936ed0c9c96d9bb8637c0d0e6d197795c64ac1ac47fb0610976946b7e7c3fc5899a47e24665970d48792a1a513152d62b8d8361cce1aa82685e69d9513161af1689e235f10debd9f1bec0c290ab1756b623b60b7681464903ee0361891f4c88ac576a6e2f9d84bca14d05c99ae0c0f1d51fcce7e9402f5b3927cdf21d22e86f0f56d81856b47781d436dd94373843d1fe861378dc8d36da44ef5d08ca77dadc74b9ec20fe9448b63e8ef67b1ee55fe1c68c99d61b91c97ae01a5f326363bff5f7404453314c86608d2d393d4075e27d9d96f038887dbc281d81a314b22431b4af5c88736f1824e6c3e4b7c3e38a3c9de503081236d98ba552839a7ecbb3cfc0f43ec4044cfb9b2f5a1569eb3b94adda1bd0056d9d11fc340d1d8ee97a22a312d360c40e296f0490d16ebc2ee64b1ea43953ce286dcac4e9c21e03872bfd4e284f71a9611b596af3f16504cb9fde8fb17fe40fcf1a5cdb00e979fc09cca1bd5e876e8dab2a1eeb0b2c1feb766bbe18266c44af867172ea92b729e856660ec5148687d61bdc869b2470d52f8f2aac663f9cd2ea32fd71dd0f15c74fc616ae572fbfd606b071f96329591e3aad2f55643eac01f392872c29c48665d515fa61b626b0b4e294ff7c2b9d0c6c561c0721c289b6bc0f831d8dba524414c4b5d2372dc53321bc0941e2644538f5b61a73162f8113a2cf3bfb222c4e63c639e9fa1f78fcdb6f0292e27056558e58a7d48e2c2ae717db3c82cd3fa93e48990ad9430a8ff9922f9e7f205e06ebe0e3d7713eda6a69002d86fe74cc4e8c9e5b2b934b565719cfc43ee28d4723ded801c16c5668c35494a155d982021e3c6568b930fcd8944af3f6f87f791166097a8b59abf371a05cd10f84097d10f76479cd68befffe3d940cc0510c81d791b94901026d9dcdfee8f4267390a331c292a10bef2c5ad641c8cc3456823461c1b91813a45eb65c8fd693882bde67c23ac98786ef4f2d00ad2737d8f0a7a6ffcb8d841f0324fcd4cc7dc7c4d002b4c471b7816d397cf53a5fbc6dd54f89c42a4769c4a946a8083f2e2b1c31124306ff7611ff4e9f9c9f61861d09793ac27559d35773a30ccfe763ad2e4899fc275cabf4d6bc995a38e8fa8592a7d3fb56345ec206ae719738b8ff3d3e938afae2c656ef36a3e6030d85f7cfc21311bac978eef1ce61e496d1b25dde87e9f509fc932d0dc582867f20b788eb3ebc2aaeaee51409dc105d5818da64eabd8ec6fb5779c08459de5671f02eadda91313bb7ef9038ab2e312484414a23d50079b064c9da048d0258d80e3751bfc82ddc33e244726ac0f2d36f71d505c337d442d4e7ab6484a4b47cf03eddc2634268ca402c6f8bb30ccb35745acd2c79a2483f646cda03d4d904897d14ba10b76f013506f448763d336042f765e3a495d0a6c980089566cf30da894f5f87379166a7928b50e4e527e78b13b10866acc9b46c96a5a2f92a61ad0fe08e8ae347c71324c1e5e15168c23f84002a2b74fe25e7a90ba03b702200aa26dadf7f6dfbe05bd6a22433c493d8d1fa11283e93579bc1e5c62a05d41b37f02f9f73c90214efa1615921264ae8bf1c72f8452d32eeadf6649e76bfcc612660d1c81b0f1269345ca7cd68afa719359518b53c72799c3068b61bbd82d457e60b58b3d66251334a89e5a781665bd51106f6950637a9f6da63a890700788616a521e9355da7785679a55a0154c22610e1b4d108659d204fbd06963dc05ed44c8012790f796e17fb329ad191b39f2b1feb449ce9388e14ae0f268b6d1af170e4e09776b147a55e452a9d99345629d928a1643f3dc1d3774b74eb3f5fa2137cf6d6f1a37d0dcc886281819d1673fc20ff5b1375476dd4ad446cd1252527da5620c967f5da24e97eb9ff3286bdf7c837a34f9632a4751971384c249de72d466a555a10970d0d44521816588010c80d4546fc8ac01d0f1a196a00634c13d8e4b7fb68d565c3b98ef8e77a07da922a686425e3d222aa91dbdf7e27365a4b3726a44c68ace407ef5e383c896763cf02352cac120ecfd1ca25ddaa6c4ca12d80cc401d8c91142a3eda4213067037bca9d1ff3ced0d5f11072b1ca8856d448225b944fe1cde8c1c263283982a764d0eccdc12cd05569b757948bdd757340f55554c36277607e4dcb8dd59ecafb39ade9d6947b5108ef4e119fed659ecd08813226ba8527b9cc5cebf319801e292543f4f798c2fe35a00791983f549320317a62b5add143c94202097d92981957710ac019c0124c629df7c1e9a40429f82fd9132bd038695fc7c5abd39254b48ce3b9547f20a2a9597c17114b1ebcfa1155209d214efaf82931a998e16fafb4aa8352ed1e60cac1a05d4e92aed94fa382aad80646c7c7a5326df626b7cfd1218931e8cd364bd7905ad551f3f17726c6c49d721b336a5a66278e423261d1aff3506898d47185ef3bb9b52ebf188e789984c2b33822733787c9429b9157d60656b5b05ee17fc5cc8fe23d2dcc4130ad86551061555922477fc8cb2bbb07a96dee9511983b08fabc9f76b912c936151a6e0ce509ac43ed7ee68a0b993923669b89494915868cefa88ddfa324b3ef913d64349e2320e8b46db1f1973e6d31c67c4ce7c80bc2774d9ff8041020944d040a73da6d6e4e5faba832e117a2aab92771e0f26331544a4755fe346ad1ab1531966cc9a31f4bc5a17ff358da83db3d408bfed5e430f397286c68a7631a815b16f4883d8c5b96cd2988d9705dce71e341810bcf4706419af45a61413c674ce622440353b437a577de17cc0bf86c9b9e0cb531a317c76023c70828a4c2f46a528fb0f03cf972eb1f119955620524abb09661fb80e490853444d44cb138d9e140455c4359c2ab5481857b0c0a1786d861d9b88fc0d9071bbf503182a13e1710b54a9604a0065395e5270f1ff584355fe441fed4cbf0554d70a342c1ebd55399c21736448220d14a0adacb85c01f1c22d9cd6f67ba63cd8fd90898cb15e3e9ba161cf793cd7565790570e7cfdf8028b747405f70b8524e93d5a77bf17024e73a126f5bf73fc70c7740efb8d95b08ed1ddab4deb669c6b87a478ce7e023348e6fc69e6d94b1f2f380d4ba986dc6e2d3b2a411f5a5ce386326ea2730142fddfe39a9542274f56859e53033320f454eb83f93e83b6c65c91029c347eff2ff7fa1ad5fc57fa8d5c68f9148418eaee7a1f8e510d3b7388d2fb4079253bcf2b0e405fb164b827545818c917f6e2b5bec82094a5498f4b1932cc703850f10b47311be2592357aaad30e136c6104f04ba496028866e16fdd47ad35155d352fdec2de82cbd170469979f4972b9bec064ea0b7ad609f53f77faa6470e2c9171d3bdc8beb1e02e5957e4d5b54442e33039db0211731f4e127a2bb0d2b01ffabb8e4074b9ddc366caef438276c98075802a537af338191917112d11293dc4374fcc88f7fcebcd0d15ab7ee89419b46b257d41b82b5277fa2d3c6bcfd79ad4b00711718e6378dde9e74b3150116ec82db605b7749eb37ebb444e5c22a1dca860ab7671020c35ee6ceec02b0ed157fa077c78af6a5cb822f23708327fc4ceab2207773a9c8a00c5d6cdf5cf8e17acc6c3d1f5c5489f4764868a361485007272f3e04eb6e697e7596c4d7818bd2c848425ed9d8eb805dbf8a3d284c726a7f29d8396fbea5d0e1719b83f278757ff4b5c9ab62af06aa98408ef4147214391a368750c4ccfe535c49d9f7b467e6892c73fbae0a88f9049f9ad2e8bec9c4bdb005481d9e33b0ab5f7b602708789959c68d756e8558a5aa6726ee0e8bd80cb580f1c4f3c56925c4904df8a386de24bf20f8212602a765943b92632afc6499e0ef98fc109cb8c7993c04bc3c1b0de727fcd93f543a4d203ab66a02c48fe342c83941282144ff32bd8a37bbf820b9388baf20bc85a84ffa69999f3a5da383324df4d70ad5d8d9e1aa882eebe5c6ec7f0c2c80368c43cef28ee955af45e096595f32d1d9fd7b8ef9b3650d6cf78d5f6fed95fb766c2487aa9fa4beaa123551f46dbe097fc80baf7c7aad1b775b4708e2cbeda3f7c4c9527296a020fb2ee35d87adad7db8ef5d71eb59b2cda6f99caf54aca93617e67b7dea88ccf4e39b559d8887a30f6a6201c694b469b67d0733b95867f7925e1050380b39228ce2ab481ca4dc57e61b073a025eec9af3dd552a523d93d62366a5a3d22aace6b34ec0c1a30aed82d774d6d7fafb7611e7625937aff9603c61d481abb23fe717552767fa5c140fd60ba24690c6fbe545536621ea3b7941355538f9ae2f515b88f8badea943ceac4a32bdc61e9c0e5456a0815fee161fded1b93960a77738fd2c5765141ae0daacb61e64ba41ab2b2353debe95ae3016199fe31e7459d58a0aaceaa2eac03180af4410a7ca72910ef038b8645050ef2899046f7b883a4abf562c0de72ccc4b039a300e84337d2ca35f19ea6e6480ab7fbfd4aadbd08b4dc63ff59fa1cffe1f25247c91f8d03684548bcc4871d49b3045c4e55902cc0a6003ab544c01a227e6f6f9d4d03f3f3dcc83a7048f23717bdd42c06e6bbf45d63b6520ccc4dde8469bc4fc31e7fd03509b3166344fecabf7053a93b9ad3a8f653fffd630f48b39678e31e4906e64b6cea983503f32cc509d7c77225ad12dfb6da9cca6a0c273b28e3954e947ae4fb2a8d748496246f550e233656a5f77850781b14cf8c808b5f13a9689848aeee3e1073be76de0338f8d99048c849d07b0a121d950b62c3f6065252b4bf09d28fd3022ec8ccebe91eef4c134ea29bf56ede7f009928c1e7203e86776711158f109a6764e41be4d3a1db47fd38b00daeb0ffd3ea4ef13818603f9e2cd502052c87828f0cac4ad5c53210a8d1782b0149c02ab8889a2fd27faf7eb2e45b6c6504dadc28314237529abf511c16e7e03c2ccac8fda30000183d0c76e86b9067537012b0dd497c340796c91cf174600f509ef9e5576d9a0e75cefd60a47734ee1737b70f16a9e0fc9a0d48383e942ce86077f20386eecb4b090e994cb63be40290c938622a6ce01296eb5b5141b1802db1d1d9c81994bddd169ab66a96e3829a9a205252ca4ac34a1ebc4d2eb8b3054b4e1c1cb8192f321e0b8983dc0bdce0b79f51f992e5a178ac2175b7adb0db7959b158e9da96fee18dd11f77483fb061a2ad805d7910c2885253bf46eee7d9e89ecb77aa68f318cfcbf939b427a283222d1d09f4fdd55d737df2afbb21ba4fa2c8cbb97583102b9583bf7d47947cf1921c969d08e5504a6a566af2c90088ab775ebecaec27f95c31d003858268fbf0f4743b096f53a90ad6dd2c8cc003200206f26467ed9c6473a7ec7f75fa7c95b879184ab9554ae50c3730ae51ebb65b013ae564346bb2d17a514f4e06344dac4e877537d846a279a85b95337207dc91d7da493d1a1858a91f289e740bc4dc8ae5e661d3cd858d408c4d39c7f1808f11baf3d9c1092eb4b6a1bf4c31a2575b3080fe8e1405d057e2aa32de40ef1d2e9bcc73af4c09c5cca957d4e16692da478d1e8cf81d575a0d7fa458e5b4bd5ce90e8d32ad994e4c9e83949d907724a9ae6a49b83f6768b40cda6587d49c3d723cc349514a5cc14688cf5b726e7fde91c17d5f967e64833d38808e093234b94c856b7c7b8fe913700872db1088b731544e99aa5e8a1001267a7aa79ab83e04abd091225f5f556d580ced66328d8487309c651830ac17625a5ac316aad68b61f9ce80f934590963105f42f26030bef15af7a0ca97b532164c813051c50b0d466caa174015057c1f454b034b72182735f4dcdd4bb0b8fc4f036a0d3d84c95054c632040dd3ecd25acaffc174b7c7832c6c7cb6cd1642ef49f636d18fbe955fb4171a9fcdf712d4fe605578630f761d317f1d7bc45a6ea3733d30091fd7abaa187816f5543c6a9238a4beefcb2ceac957692d40d3463a14b33f0c88e5aa9e7ce54afe04357f3057b3ac21ecfbad8d83add624dc29ad2a1c19cb06c30743399705229422f0e179ab463b2525e0f4e8e2694ffc049eac3dabcf0727004b54bbe51b48ce65f29404095bb3e9b2615d7141830c03ff75444c96f6882d2895788e2fbc0e2b1714691bc81dece9e9ec2c17915f7542980ac93af90f232fe4253a3a31a7e2999b1f157ae057bec815277fcd0974e79762799b92cba1b561fb2452beed21887dc0872160564510b395ee1bd7baf6784ae09abbe1dc8856e0e6a6faf1b622051d5e06ef73715f02a66bf38c7f9c150fefe49e8416c5a881c6a1a0a6fa3201b3ed002501167ba5dd4aa5036a04965ff24b6c3409672a6bbd0c52611f918e8abcd7e095d3f6207f718f7435b6e68aea6b702e56773acd800fe9bfc75a7358dd71e170b330d8dc57bd5de59b460645a894d671b7c02810f89230eed22c53ec55722d1a7b3c6223511ccf4f073687851885afb2c0e632a4b9f19fd2d4ad5eb98546127b60fb928ccc96c7bc8da937e5d2b27fc59ae96f5020892ab21f9a328b74bd41b76b46ee9400e0bad8ce9e22f02419a7571ccc7e8970c990b9a22339ecb1cabb07c220941747ab92db0ad6fd94ad9021cb782e4a4f2943602afba9a062ea38dc0b98509f130969687e1d0c35c7e20bb170bd1c50d96e033d847b47b2e781c83f7a6e817cfc24b9d49e522c05d88174548364a7a8b626d2a5291111d9d6bcf82318bd146dde5f5bacadf691383194a5785d7c3a817979aab2a0ac64fec7b2a42db78b2f4be64e7503acfc022a5112755a305a594d07b561824d6484300d6e3945b531313f219390c7ac8a2e9c4a7c107841dac0c81e9d4ad6021a40715d1ba9c70b08b270ce1d951fc80b60822a975d4a6f72855c77fcf35698702df5982c59696d6fb9b7dc5bca94a40cd9061c078f071c4ce79cba9fdf7df73592961751449c193d7d17a2ef1fe5b6fb8535ebe8e8b88e89ea88602783392611414db6b16394f7e792e944bf5c3557b373a8904ee7cc0fe70cca7192f324d31c89e303be9ec7b4baa15c0da9446e11938dce914c75563c8d0f6990b21abc0d360b49d6a13f60cf33312dc9f4a5924cdf0582ff2693ebe8d07c3fb902baf765e73a39df393a71a67bfa3d8438f3813813845c4454e92022a6bca73f57128623912ee84f247209eef5e4ca07efaf77efd5481846c234ed741ffbfb139feeb3327da739dd8f0730dfbf9f11d41b6921514f1d07fcd08fd4f0a1dbd0d0e2519f5cdd0f04ff6bf856abd5ca517786f461f6288fc22272310aaecc8e4ffea15ca1e0930cd22045f9cc3f1ec08c4247ef0388fb189d8fc99c6167752ac67c7fde9fd787fbf3e30c799c780033f7dc6f8e44c2a01f7d80904c0d2645a6d67b4da60f6b98f8be87654763fee4fbf580c9378ff028cb5924c6d41fe511160d89f208774cba3cc29c8bcb236c7d88cd23bc016d798435255a1ee1da35358f70f624cb238cbdb03cc21487e6119e9226c65439c2de23c6d48fd13b78fe60624cfd893b8fb0ccf1bb61e21731a69fc338e4fe4b2995b363fa316c575a62df1110624209dddddd36bcbbbd5d067557277d9ff42411b224abfe6104fac1e38c5e7d4de63ae76b404ef487067b34100d4e6992928a9cb8f2312131ce1c9126c8734a18e4193de8871fa6d7225913c32d5876f48b99676af04787f68329a6a8c2bdb7e3d9d101c1ba819a5bcf767273b3cf757fddfdceadba05411004c1296aadb556aed3ecc6759add3a6dc3b175afcdadba05c10b821dc88153d46dbb95ebb6d7baaf1ac775d65a6badb5d6da6aabadb65afb2fda5a6badb5d676b6b3b6b39df7dd771fb55f8eed756cd677783ca8513256e3a8d3f94e7dbed79087e6b5d65a6b0e1d4e27d775a10be5ecbd893813d4a939bff325dc7b226e93dc5f5ae95bfa987d4a5fdbb6c7366bad95dda72347c5e10db91f87fdec73e8b09dfc7be556dd823c20088253d45a6badf5be8f51be38662e871c431cb2e538efc57decb8c37d0e9dd29e8391b9c72172dd731cc7711cc771cf711ee7719cc779de87a2f73eec1f1c2cb86a6badd6fb745c1cdcf7a9a3b1fd7d2b808d07dcdcb7ede59bcff0ed2d06757678e887a3caf9e9c8e1db4fb556eb6aad5354add66a3beb71bf7d787fc3713f6efbbb611d97fb728406c05173744a47a755ebd8414d6bd92aa65510044110d4b4da794efb536badd1d6fad569ebabc37285810de3c8a103020eb0e075d50e9e1d1d107cddd7656934bd72df71777777f71771069ba194526db3580dfbfd902a4ef36fec1984a918265b91524a29a594524a29a594eae8546a6e2f3f0729a594524a492da594d2b718f6f68b5af7d6067dac4883ad067da60ccb7e957e7393456ad5ac7dc7b66ddbb66ddbb66ddbb66ddbb66ddbb66ddbb66dc3366cdbb00d737777777777777777cbda3e3944a3c1f17abd5e2fcc6eaced0bbbef050782200882a317bde5eeb5f65aef1bc7ebe55d4b448c1922ba60d538ea6607fc5b25cb7258d2c8212f1c3126eb559cd96e72f63d449c8936cda2385e4042716855593fdbb6d9ed63d5b46f6eb6d6c97d92c67e72086bdb86d0e078bd5eafba4dec3bf349bd6a03645fb11032f3f099629007d479b17d5166a71a38d38f176062e78d7e0326cbef66292971651109c37f869147bab8619a73d2eab4b38c312d6bb860234ce83a39fb97968f6f85babbbbbbbbbbbbbbbbbbbbbbbbfb77c8fa43aba490a3b2971e8586e4cbf767f99537bd4a6ac5a794505ebe0d9d22bd7c994e792fef4b293bcebefc1e3ab5bdfc1b9dd25e7e0370b48ac5cbdfd1a969e3a59b6b21577d9fc90c08577d9fa4a9ff6534423973f97699df23f0c9a61c938c6027dbc955fcf213d76781ff2936c202fb0d6b6ff1f61cb6df75178fdec3f749d82b61944fc1a55f81535e05af781356f9156c7a16bcf28e59fe84fd5b30965f3e46bb6ecb87a6fc583a6954f6f2d2e52c3e9bdff285a6ec94f517d6137e7999594a4adcfa2d99cb209fb2efa14eb560995db00f204e2e2e2e2d3cb87c7f8c6e69f9eec982f24bbf248fab7ec8864ed805b7e0da60c534b7e019fd272c9d3438d460f62c583e6930fb159cbd297b15dc2b70a7e0a629e146c1ddea220d66efe1ae699bbee923b85d8d9365afe1d6c1ada477702f6914930633ed3d67566efed43e9459ab1b0fad524abf8d7067df12156ed8af9cbd6c1f2972f6dd43bdb341bffbd5d3a9c803a532baeb98ba5485cddeee1055269e1d395a45fa7803cf4ef623013987185101314565b60344183d73dfb48d7d1bb1ed6b915cbfabcf89b66db3b3c3f699368848d83e2bf725f3e65fa1efd038cb069896b0e44c1c04df64dabef0d60fa6729b8eae06c7cd69c3e7ba5ef1990fcd9cbd672188f201a24ff41bce3e91e37003abec1fc3379c891e7f2602c84aa7868615e40343ac648000888c0c8735339165894992213ad9234f8e3d34182900a66812d4a4495093264d82b290852ac4295b147f8831f4dbb9298bccd8a2f8bd28ec40a6314754a1c4b47049f11cba389f24c2b26249c2ede2baae5be29efb8cbc473485cb43491fca27f2b62397489e381379e0615f0899453fbfadd0548830ee8f0769154ace7d52aded6c58120945341467264da7ba6fd18f9abbb7530a71e6c7cd508830fce5ce8d33ddf53e4512e1ceb75d4d6002089ec5d28e76481e499483cb3591c961fb70c10240729783d71361bbbc5d3345763ad04cc6755d7e3d1957750e228afe0b8a27789ad819824e0f32fdf005126baa58e676b5cbb3f7b4eb469d8deebdeb3acf1c963fc418bc6362deed58d1fd567e54fd459ffdee8e4b2b64fa8db18f9e7f3f5cf47d4cee435371b436db142ffa8eef4c9a9c77996e2c9e61df7f4a24c826a20bef67a72687822590a3845c38f64826bd37b3444213d1055dd2bd8388a2df3ddd8476820920d8969cae062a8b5c37943b9e0dd9a1a1ebd2447043dff1913b3baf9d1d70c7df77620cfd13c7b98ee7b8ca8b348abecd914c713275154417f43bec173b91fe69201674132884bc0f610a43a6dfa28e06cda2af7da2dc19e2feead4ccf43583fc1393931bd43e5902d126d9b3e1bdcf9fc883f7cd06bdfaa2fe85b2d5eda06fb1cc1c96d9c33b1aac14a08142c84e18b8205b31867ab659d716851ec8f6eb9f18e323ba027d8f22aae490a7e5d05f71a67bd24aee9a307b7f39e43df5a8e7bd1d6a247eccecbd8f51f6bef8223cf3f4af0d7acfa37a1f8f9e4f2be2d23f798de36a57833b35f5e6ba4c39bfd32e97cbd5aeedaf4321a2e873eeba54a859b85498c890a90d39f41714fef29e18237aad882bfa52d977b24f7a7ab21f0f6026799fbf1acca1382e222cc8f445520599facb553ba24f75729f98827cf479c9f658a2221d0dd2ef2c95c499eda3a17d42c85c7f7ce6dca109ab4c672f7979df8e60ccddbdc6dddddddddddddddddddddddde58bd62e2d27961593ca8a94120ac9bb2351c7d9adbedb983eb1173cbfe26cc346589c308b0b6ef9c62eafe1e63a1a58adb789340d1a57b178fadd93b3ed9c86e78d74f2f2a1a4c21fdbd9f10184ed6cbcbcffd6f1f0f2fef2aebd68dacb238dd348329ded443a555ff06c916c17652f98e623ae06e9bb60d982e5094b2458ae9854566099822513f96ab047fa0005093979d2ab0ed2a821f2060b4de3347547833f7cd7962c3944deb05c3407f9a653d1156762a6dfac4d436fea777fed9ad59a615a966935c37cc5d22b60d0a40a4daa40a5fc62d34929362746a51238b8fd56fa7c14a62762133844954c0dcfa10758937377daafd852c8f44399a8ea1c478de026bbc83189085eb672d575262bce20a2277d44f9c8f2f1f4b1e523cc47978f29161273a9b54e592631e9524a293bc7b14cabd734c36575ea53b5788a65453aa54d1b6e7b2bf8e8e894c8fecb1db904c645da68ad1bcad50cdfafbcf6cdfa4d2997c499154ec49992cb9d4cbdbe0cd252ae56335b7d9eca9bde34f10c4fefd3efe9b792ee25d953ad7bead33fb663667134a516f5e69c7168cef0f7fbf9a54eadbca7b0d746cf7d0e3114d029949fbf43a7587e7e0f9d3afdfc1b9d6a81f9976ff1f377740aff9c1eed6c4c6f4e6f7a2b5779adf79a4ccfc22edf02778ea32876f9160cf327dcf22cf8f42898e54918e52d26bd08dbdfb0e839bcfd68f41aee3ec31af62bd82ffe2a9ee14db87e0bdc481ad52c9347f30b0b6ff1317a051b69f1cd72955c61d887b2fff0ca68f49bc5a22761fb9505a3fc09b3fc0cb87e0b3e3d0c6ef916d8851cf53dc67ff1ca0b66f133e0fb2ef8e561b0cb7fd85b50ef8786f9e6cff0857762bc83057ec12eb805df6870fe09f7d0e07c16bc4383f3513009c7c801bfb841c60610a6c139e705b114253af576cec58d64ae4c2a4f7fb2e2cc8aa73f85c499d2d397b28848444c995e4029bd2feff4e6f42ae679d8f4aa099f3c157cf27e3ef6d6c3278fbeef34a8644983dd7bdd7b45b09ba01aef7126ac253ac4acc92072e53df65809fb0002c33a1fbcefba1ade7772a57d9e4492d33d8833ded3ea79dfbd8647de63d87b6f9bf3bcc7de5927efb527c1fd31b5cf913062aed3867c2490e1267b3922810c44b2f73327cfef46d0baefdd9f4282324c8e49843cc9b672aea45560d04f0fea4367b204f46aa0018579dafb0022736f3d8cfd08cb2092e57d9ce179dff4f0c91be193e73d9847df7923efeb8af92bb0cc29388859d2de753cef3dcff3b0d14f67352884fcce87386b7af75226266751671920aa643c4064c24b6573b5f2cffbee57430ee5ca769cc80a37eac8fbf465901513fa31d207c2cd9187c9a15cb532a593e67e8c992b99679cd1a20e3ee23ac76bed1c4dc3220d771ae6346c35bc6958d370d570a6614cc354d37a68325bd20d3ddf84e804866559668018dfeb6858140e05c5c6997dee3599fe419036927f99785ad5ac9fab0dca7cc38b194ebf99698e3195319e574215fe09cec2064b08622d1124854f17a8f00206184eb860c40b45a67015f1f372a1228ec1880e7810e40b31306204533c13406090052ce0dc60080a1c90610a3c5450a208424354e002142c21850b6292259a50e288248a900212f844f1391940c10c7a3084263c31849e9682cb0b5e2f3a28c2850c404e30051dd0c0c6064a3042838ec213714827db11d7a91f33f3781d996e01120001105860851218010a43c8f04fc651920439464b28e4100432640b6078094902178c90420c839024987821e182127064e8e418ed0109a4bb5bc0220a2ea088214884fc771510535ce108125960020a353232382d542419c204257c694812920cd989a28d88d1aab90221b050f23f3865734e209ce03486960d5e77e698a4a5c4ea6e392669bd2087375fac75840854629d2e206889a00b77340624b4604449e772536031c4e59cd0d2e4de22b4f45c151c548144122c7a902881cb1297cb31091244bcfc5c9b6392140c3591022c244981cf08b63ce79c1fde92c9a507979484962e5c941f38b92b49bc80e0d61c93a460052d65b8a51c93a420042c4e70bb30bcbce0764153b82b9ce042849b92639223861074bb1c931c8193a74d72840bb22cc724472071841239bcaf2db1f5bb7ef5639e3feb739d0a658a47f6f5fbb152dddddd3f1f188b080cd73f7eecaa0f99b3b7d9176b267fe6fad9c7f02ab3cf07f619919fefc04698f073f330823df7337733e6c434e8dbb79c7dad6aef63abdf8f953cfa70bea66ddac87eccddcbdf5eb3cfd99aeb5bcad5ecbac21867e69d281f027165ee7ef4deda683b9c59f30d7bae9d6bdbdfb62fce689bf623202edcbd0ffa2106f2fc1867b6efc78a86678d9a7dd0b7000fedeff3d03e763d00999f8f52fd7cd0afdf7dd57ee29a4bdf771fa32bd63e1fdb67a444834b7af9140577df03882bdf27e1fb318b3e763ac8558e31667ef7230cc495bbb72c34f0e062df7d5833f65de54090588510a4b005308c810c65a84246e989903258990c10009151fa4efb1a58b5d1d5c82ae55429d8a7e02036e0e1bfcddffc354cc30feef6b28a1c6a6fb32aaaa8220cb97e7735b4af5fb87d613552faee8cb05e46e9f340e4916183538d996d7cc08d3a5678e5b00aaf961f130881e40a645908f721678d9ae777e2a1fefcea03c5be707ea4e6766064c3704b6945054e0ce34fa6df12b336d787a802e30e3562ca45a68fd11d0210552fc20264fa5e0c6e1877462db8a109fb4e35ec57dd3b4e33b255dcb17b871e9e74993e065c6a1ae02428d31fa04c5d5408c9b60b4dc8617c92a9c4c217981135b9730f4ab0e97cc08ca8c9f5adec6cc46fef683c86e18c1a617d9461c3097b99b10fa4822e0141142c433726194293b150084cd2549d2bccb732ff200872f50747cd380459079025488425bcb27c2b7110334fda8006a7cbc498b923c6ccd711744399f940f4e0f1428108a345ba983fff07937f3271c61bb4b165f67cc9f3693022a40862e652839811ae94373488bd8d7d630c06fec5ae0dd7c11d31067b1d3f37bc197b9f4ef5632f8960cfa3070857c57014f6d8f7a040849143bac01efb1fe24c7d0c08c3de562c63b82a072acbc776b2c4c218858c6552568e0537ce78cd14838d302195f2023a9b4e12b363ec8ff3d24db39bf6b560c921e1f6abac670f2257d837eeace14a893438e3023a9b4e129b0e6dd55a737b602e4aa441ff6891069da6417f5a436de80d116e486926b5697db4a6c129dc90baa88be2a09ae62929eb54f3fc9c3853b956d97728c26a93aba4d9ca9f78ea6444b0496c3a32033c214bcd8df3e9377129c62faa2a96d5ef363e0257cad3d664c7345bc9d520db961e7b8a56624c471d2770b2632dcb6e8629c6f47b4bdcd913095094c0e6091b1bfada6fefdbfb97c195fd6643cc9aa669efe3dafb5777b42ac6a0eceef4eb6cd04fd35cf35595186894fcec83695022c9e92091e3907d00d91f8c33314465ff8b4758d4841bfa4de637aeea55b5379ab05fad225dc50fe673e430c52fbcdb17a3ca6f3e9806fd2b35b83dfd5ae86ba006fde94a6adf568a1ccd9d2f6ab555963f7d629e3f930a71a67bf91df631c27058bb6fee70d84eb43c4bf0673b5510637676e8b7b3246b79bda66d6bb52b6947e23859ced77c899870e37c89fec655a2cfa54b972e258fc9d3a05cd2e04e6b67a509b9d56ffc6689a9e5969cf3230d063125922cbf672bc6481efdea14edf1e994e5e1bf752ad22171067bf9d48838c3bd7ceb3e4699fb88c3ee0be564f9e0bf7138f49fac06e596656f2dfeace12024d68a342869f01cf29906d1399c2d15c4991548155d515612e1a4c1d4018d5ead5c35a38828f95264b96ac28590b6e62bc6c817c5e0fac71cd252b6e954385979156782206990a5936ffce4eef22334b3358b6cff30a6965bda8edc38938731b594b8a006720feb267fae82c8ac35e4f2252804149a6ece4fafb6cd6967836edbcbbc7d3cfc94fdf6356fb2c877ca36993b88dce42667e86890fa2a87d3b36ffbec66d4669f06c618ff0674ea54a3e6fe1e59fee179fb6e1cb28a3ca375cf06ee7b258b341844489c914c34abc3df60ec3e6c189d235bf2a59238635fd66b7ad06fb60fab8c9b94b2255d484e6e3eda6e3411a2e7001019a2a738042261f8105d6c2f9590b7e7beb02dee01882bf3f8e1d98806441fb3df089fe1207470c978850e464a3a159a4639b58e72b81c97ac3971fb5d5ae84d9c41f12f7d2f11c14e6d2e911572c3e5d8124e79894bbfb93a05b32d11533544b0371f44315821299b0f72db0fad90949c18c37dc8e5e4ac008a31fd304c6ef7a39c58038c941f06400c2ba9847465f985f729e90b691052846f463941c8d087d87038733044ae5c0eed4159d6e42a7aa4513892de90726e483b2552ce6db05fa5242d0a5ebc7871a301386ef0b4a63d99462c10aeb21fe8f409fd7e1ca24a0e1151fdadbdc82fe24c47b53fc917b1722862e50c032b9fd99d2d262fdc1bf3e795a11fde6c0a7f44b1cf7e843bcc590ccb80495cec79c8ac90b1ffe42ac6487123cd366a3894a138bca1f5903162df8656d9c70d6854d70cbaaacae8ecbb21699ec8dcd0a0d47aa362d42c3fb8ae92328c682f9f5c05b44afbfe1846b4efa6d1651007178b021014bc906d733668dfedcd0120b48fedf5b0c40906786e34cfb5bfddc83cfc7b58bcf917cacca0c175bff6909961c3fcae70dfe77b72c35f61444e50b80f06223952912311a8409263d79fd0951ad88255811d8d4e70e3c79ef17bd54de4863753ca5dac66dd0cfbb6d6aa6933e6db6ddbacc5388ee3baae13c518e37d3b1a89de5e6eeb2a0a6ef4194914cbaab691628c315ad98946d75b1169326f58664b39ca5d6e84822bdf7a9d0f2b049f2ab2109107538e3a36324588fc901c3f08a54e727c1ac4f2f12910508eab56503ebeb75c65faf85ee32a95ef3144b0df0431d543434c1ee43e59c5efa056f9cae3f7ab7bdaa77f1ac87952be4479dc67461f7f6e1f715ce13865c4711b674b5ce51c933afa7384fa903eacde8757241377b695a3344d037ac5b8c2619a8625e6b84945482d8da5caecd8b74e3121d934d8cf22b72802c85577283707544b5886528c1b718e67a63437cb34addb31cd26ccf1a8b0fc0e4d83445a5c11ae099bf61a6f8d3ebc475837c5c7513f8dda3eea9355bb559cd1d7dd2ffb5557353dc9b1cd225c0a2a0939197a4243d221b51c95e3aab1211da9285cc5259fd24f29a8245472d2a87e22de22b5482ed20da9452a426a71399680504cda86659e72247346e11c77d5d53220d89476c591c156188bfa502021934f8ce1388ee37ea440d2931bd45e8e1a12f2f9096a31e59a51cd6edd9fa89bc1bda8d3415652cda66123286f9b4b4991f9c4391e65d3abc6e6864681f326b72f2f2fdcbd19cfd8302c894d67c6c739f6b2c55c4d83cdbde0383087da4bd32a90f6d25ed5a7de3478c4859313a44a112b123a4485a88f467d546cff0cedbfd361661f981b9de086d4470753108e662a35d856495ffffc88beae3f6efbb81b56901c521c0dd32354892cbf6b10aa24911dd582738cb2826dc0802883a56a4444505bb5d66a692d951a96a3fa45b8e2744e45527550dc8a5cbfea6ab9aa2a31cd4f6b612c3a84ad2810a63e1a90e6c372150dea2ae42a4dc851ab4e695144908898d27a22d84e72bf6bac97d0c532773474982866540002d0cda0cff2846b8300b88ab91a9b23987335d8388deaf7d5ca949538cd0ba56c587bf1b88a2725673368017bed05a384ce06630c4b8ccb1883f2c546316995e994ad0bce314bdeb01194a7d8080b0be569045c47c9708e5732c546507e7ed5515225db2cc338529106358da5e6621fc34935da8b08439dd230ee06ce71295b11477f465c0b1ca4749d0e356b9ff68a31fd2d72704352abe52a4fc851fdde505d5d96894413d44141721051476385eb86a416a9857d6165c134c751f2517074ab4221a5acc0811b99e08e4e60eab04fa586bef68a33dc8a13951aeda552a3bd546ab4974a8dca8a904a0d07f4d32a144c6b39aabfa6e7e6c71594e34427f7effc683c3548a58606692d577147381cad8643a2b538259a8b5ba2b538269a0ed7a3ed703f1a4f86b557832cdc4dcd4edd8cecd35e5935719b660bc089b0cc26b82d1df6915a284fb2edfbb597d6d32a949b23ad52a969951a9ccb2131714a96ece47efb851c93dc3d2a2fee47fba9715529a851fd252737a5273748ce10951a222a3b294578728a4dee0f555e4a7248b279e968a864ec2b01b5f0011630fa9afd56c9289854a451fd2cba19da47825172c31ed485d16cbf5b32e1c03916658a8d9c7e9e9e3269d58aef3f61951a5c7d70ad71f24286fa501f134e3d52937d56301542c174c8843915ccf938aaa90f10c71363fa2be65acd15e16a389b3168dc13d157eee4834c3fab55ff0aacbdb056c21a90a3fabfa361cadcc7dda03c0a1948addc1c50de380e0bda0a729308436b4917402a36ad42f97e951a14acf534aa7f866e06fdb4d78a19047142aeea1367705ac51d69547f4fcfcf4f50909327fd2c3a1a2979d67c5be63559727fb1d66a942cbfc581dd8cfa60a7c3ccdad3d0e99092edc7dd68e1366d01e57e0b76353a6b8fe12c6bf437dccad2d4aa18abd4b4cd520506e758b339708e6bc6f03c00e738cb58e6629c69b92311a9e3eca6b108c04502bca52733000214e00603b4885834205e5ba0062b2bb8e1c7f0c5a458a0eeee94624e29362526f3a4ee0f4e0c9b946219c3308c62189d18c5e8779ade848479cf5ccbff9429dacdc0e494fe4e1b463e58eada322dff206dfa8539487fe18fe1172f029b06a8940c179645ec10d10c00000000e314000030140885c462d17028d01355b40f14800d889a4670549c89b324c8611043c618420c20000000040000044686030038e9a79673ed379b84bc61c0f8b408651b0301048105e2e98fa46fb35fcd229a6cbf572c0a942f0da41d82a02b8b977dcc6c4ac665a0a0e342f4710835f1f6feb1deba0cd1e7c4d7333ab9bfe4da659599425615583da3d297d0107859f3ccdd33462f63a43f920fd59a871dc228963b67cef06ca60882e5f6438e09d0f218d1b6cbaa7a6ddfe176a8ea382cb91ac6332768c9675ed693dca76d2bac85f40f5e8d152609abda3c646e999cd5cd55a64bb39a10f04d987477af93c5c284e0b13a0149f01ea33431ecd1c72e3b2280396de6f2d4fbc8885fd4da71ceec13c560a4c9a793ea70875cf32011787fe8c41b6dec1343498d047307b7654908202d515e2349474bc9056e7568a3a51e3b036a9875ce2370b4447f4daaa7f0cf9a1068224e0b86a117eced5ab3611f76512ddb5fb1a05df81e55529b3e97804b889f58565d024939ee624b3a0854c2c1d533984f4914e020983a0bd4b83824b5b0019c90cccbd950a330e4b9b35fef45b5dd658add3796b73d1a732cb04fddfc76a541bb1254f162a2b90cdcbe05bfefda9b156ff5da35cd4e5186f8f7d9f10ccb2ea7fc205c055405b3291d38f6889a7e759426098a62d6dc8618f1b63c9e863fd660513d9e2b1cee7d43ba6c37d583b195435a1270ff4f390fea69d08f1a2eb599e746fc260f6701e6d5813279d40fed90a916cbd941b5904d1fd170b1378464b1fc955417cc61392b32bf5337d6281f66c1c99a052eac6794c26b600e536072eec653441ed7da7af221fb58b11c27c0be130d21128eb1eb11128fb3bbba8a7536ca879a81a5975ffb2fe59929d80c2df8c21dbda28fa47c5348493d53fa7b896a187fd937a820d2128d7a165143d03d3a294bf5bafce7539f0237074e224d4925a182a4f745212123d39789f6d3f3bde6a714bea6c22a2c5d4d3845de8d0831e1b89a3ca345730b2ed050a2c162282a269dd511d0342791f0d531c6a74addcad7c82739a81b132d2f0963e270b905cb0b132526932929a72e73811ff03469e4400ec988887902bb89d6c29b6cd228542d7fca18519000373d7a291e166e235fcc8e45295500b485cdd404b49f344cd701fe74e7711ad172dca022c78201f8cbf03c42894aa3d2d55e53692f3fa7fd316916060c912901675771bfab87b39e87e2c298ba4690b1e8de03544630a195135afee58208fd53a540a132614277802b0bb1d7aec791efd0ca9c7881c17096c68ef1b18445d64b92c4e8d332b580b9d2ba0c9c5e9f002a21a35c9209c5ca8458dca0222c0c5221bbe043a598dc438f594086352c6a19890314485ce13ded7382c36fc7fc4f7702a32d45a0056999e254f0bf11f2328fcdefea256a90b7d60f99debfa9663036bbedfea862cf523096dec6c30ba28ac3c7c942ac1c1f82e1530e7bdbc3cf9801f799a358afb10c740ef33d6cd90e2c7772d6e394a54d7814fba2c505ed1bffb1721eb2cd2c914e517202b109ac44062327886b5c1a0c971069092b55c3e06e13fd8458da4b65a1db8c927f3bafa29e3313cfbe2a3c065e4ab87fa6a7336058301b20544af9412945765ac08278aeb85ca165b401df0a11f650ead212e88a2b342c461307364246fdef3c557f689640e03a95c81d05044f28a04610d2ef654a083ddd12f43f835669703d68b511a0b99bcfbf0a3d032484096d8a584d104fa414eea72bf143191d1b46985a5db038da83fd3d26261b170605dbb95abe96950338558065bc983d4fcec77e934cbd09f098eeab1ac62fa56dd826eebf0b8b56859d6643fcaa88ea6b93caf52dfb1131a1443b610682dd5206715ae576f3b7c88c8cfbf9bc973dbb564b4ee534225e7321023e9b8b2a74d510d29eb565c2ac483d5c552dab413357add755d40a36e75512213c9ff6aee02bda61df985ea212a0e747299cf2a3ac76d596ba34bcf87971f17903b992015f573100c3d10ccaa733ebd22ecfcecef788d39a9367cbd9e44efec68272f2ca0ef2320c6f6ae1b97c4128b77c88520df9380d9d8f6b9a2966f4c73ab62719bcac06e55ce8bb8ceba8c38ce6fa5e93cf67a6fdea23e4c9a159c0a0257955599eae22516963a9c977efe99a6c6bde4656c218ebd72c83dfa1b5e0a50e9bda1297d6db5aab2bee449dc99476d88627b9e64e509d9813aa8b31558188ca75d58fcaa8a49f46e3167825a2c5f5ce48f638230172aa44b7e00e89f2427810c8db3752b9e49cc5bdad227d5f2a225475851be063542f042688dd487e3d2d51535fdb41d1368f41e74b8fc6974ab3282461329d46cf01291f49932c63fa9b970780cc41dd47bac971b775182385226e146c621c3e8dbcff37582e2aeb37854bfe93fe06a1ea8208096c76b56b03c5da15ab96e7c60f7ea3dfbfd50b5853fdcc5a726ca7aabe5c056ffd35768a731ea1068f9d9dc68125edae6c24a6cc0c2f0a7d863dd7e913da23d90174580ec89f00e9f668dd8bffca37c9372eeb859140f03e8145e2ab0f9039489e3c0450e424c078c793e621fab94c3e58288d954bf0362ef8979c2406d6a07cbc3b6f75c9192cbc21cffc4ef11328574256e1876967a083a1c5c60dafc55c1fd24ce861084f694d6e8ac78306f5e33fc0658077197ed76aaa25414911acc8119b4d23cca209840aba63294b4c5e2974ac5442394ed353d52ea13ccd3267bd681503782f47893250100d5837d8009604fbcaecf51a7c3c47fbe369c0c2bc7050b78c3807e8007aa889876b359e5b3c0cc69da72900980321d4ba2f231e7fcf5cb321870d04dde470697eb08c4233480197bee084ec70175aaf0de6bccf7b0e0c3147b2f81c1a96935c98c5e48811c2680a163a5f2b25b7abdc4acf8986b6db134b9d670a02bcfe203134cdfb3d361032b1930f999aa3fb762c46bcafaab95b50e1775cc5eb0dd54dcb60e13406fcfe0b7e062721a9441e503ae197fa3c1262ce751560e89f4d6f37789c90429101cc4c772ba1b563585d95ced93e9c10e04c06de5df2947b1c2d09d3f659337390d111977ff970d801d56cc515f22641651433330854bf36b65b4036a2005acd18aa348094cace169cf9a5d951282595fcab4bd59b2b08d232b6999fb5ca2d7be494fd1d04baddd76a33438fbc31c58c5056c6bc2b41faa7d262a28b615c606812edb3c215f4b56614143ef3ca195d2075678495eee6a50243055e7a20a417efa88cd9b4858e2068974b40824c910db676f83e9bd20f399a89444b1fd8d899a88440894c641ed64eeb8d564044107e6622c02ab17287dcf4a621840ae0f101108635413e3af4e24960a50560acd8af01515dd76ba3042c8c9bb2cd67161a9e88b7ddb227a693202ba0581ab2ec30df3812de1dcceecb9a0fb569a672960ba4305b10b4ceb0fc95b74f93a4f4b0b0c2b7420ddd96740cb311375f59ccd56e1d42249849b06cbe91267c18d4fce1b8ef65df413b8749a0f31a977266eeef9209316c5d6ec8904069421894a9e8ccb61acb04eca69d15cc64f8416633aefcb81a591eaeb1da932cfc057bd9022023144364d10f50a2c917a8df03ab7223d9a0e615d76f9d1d5059579dcf9f747009d64d4003985349a080f0fcf03fad20508c8b721e32cf33220aed002742b112341434d3e8551d594f9ed8c38b96df4daa0692a39e64d9b6c038c14e932ab1595a73ee8bcee998481d890c267bd160ac5d0ff3fb1306e7c5ba538dfbb12f462ad4bc8ad0cf8027f49cc5455fd1a96837eb773c0ea128d80c01689a32096fc546a4c0823d05320cd9b1a6ead069161583287c0cce3f21a24f72493036b4af4b979609d5040d7a9990e82d306cd06e3ae61a8ec7f60eca1f3412bbfbf9a73d8c487e06af8f0a6b2379cd9c25f7e57957132fde60ca00081ebd4f5d22971bd8b3c88cb90e09e03af136c24b91a5ac2fc5551b9f449897f0db084a023ea4f9847a90eb364d9739843643eb0604047b4ce949c14b0e909b026257ec9c485534fcf8b159a4876c143b50f7a2da2d45aa2bf2740163a60ac1a537bc8c84161007c35c0265da97029e165f319120b79a8df3e0134358f2e7d32e38e622df0dc0c0c1d26ec6536d9b5da9630916e59c91d37b671926c5f4f8238eeb1df7df67bcf1eb7100d0fff3ff4820f2424e4314a55e5130924f05f43dd9c074ce20b5520e6a53092b4423ed69d1ff9c4cd402d8ca4d7a9a481b3a501cfbb60ea9e9ac13ba32fae05b85861de080459c567d6733593f1035667c57325f4000cad5425e33338086d66083e62fd40c693bae982ef8fa7e2eef627e028156860ab24b169586a86be28a4fc2a899793bb474e2cadfcea584d379e8cd0ca46785184caaf2c65c6884ef91a7ceefd9e4fd1e207923b0267ab322e00d48373d4a02fe287cc53f2710c799ffe94ded9ad0eb26eabd60e8eea5e898610b7e087dee323415a864496b110b3c26bb05fbc755e28a2b62fe178381c224e20479982ad153022664e67e6447591896c86f07f784a03cb5c223c4a8117d99401f4e07cb5f08f5526a20b8c1230b2c534199d0ac1ab3285e12c2a9f425abcee01229ab1c4487b6bfd90b8b9f381241e449a0e29f6d0dd9343b5979b7d7a36d2ec5465559e53fac0a0247258f4c5603d0f3ba22e470b7dd19382767895aa1b40519030c53017247999bfb6c0776b534d1fb864c23938ef2ed57b1b57d43080c0991c39d406de262908cd7c966cba1b9eb56d9415d8f3cfbf2e975ac96b5ee1471129cb02d24635b5b8a86a95506e30cd1ef853a823ca5ac570d77c240dcfc09b29290244e097e3feaaf17f970fe873200447c821e554f5e508ea5712b7c526610f56bf63038d9a6a3be7d3ae7e828155c1a87fcb4878453f1ca54206d32950323b62daf67f9cd4de9366256d34abab7d7f35f03def1c4e11b8ce51bd25b24c7ca8dc6ffecc9e8444f44a763eb6d6e1d7332744e236f4183eec4a7c06b3d1ecb724bfedb96f028fa979783991acf093640ac03b2ee392b8bb753681e360a1aef6a3c5232365075836c2ee451d5bea3b5b2ca385d4306b9f165326769af274db0e03a8d2d5e0538596cd3d25f5ff5d8e69c2777259a8e279fe0d86b5b03e8452e2b944d6bde7eaa1d5927a983e26e1b409fe1e95a4be5c699e046deba86732174c6200977f7e119bf37bcbdbdd17bb7739ea213b87f68e29c6164961b471b05434ff2cbc2b2c28c1825a7c15734bfb61461e0fb6e6c6e04a8de4d4634cfde01f4ba7060643a443303fe90376cbaa7a13144990c2bced8d1992552946e17abea5f5706675ccc847a218c20d472178ab8e11bb89430ca640a62ffff536cc103abac2d3d93d1b499cfdd9a39056e094e674f4261c3fdf8719b8af0dae8a2808376fa9c750d7bd9e473c0f7ca88342b05d558eafeb55fdee40664ab8bf7e87f54b5d085c35a4ac367072da6d8a7778187420fe2d5f0a60fdaed08b13a5923ff2a7583bb23d41b49c92f85285259ba1c8ed77ea3a2a64faba1d5aeac2850bbc2a66a75ccff0a90746515e958c3d17bf364461f910f4821b2d1943a7dba9268ea8c973070a752aad3c49972d18e8477703de759687bbcc4805dc4f17efb6308420035c6a45aa5d50a84e84ec5e43db1af37b6b98970c19e5eaaedd5804cf6e8b0bbe3942e4827c9490d640d37b1ecaaaefbbba41b4ebce550c12e5986dab280798d4733c5fcc480694e7a201d3f4651f841091891a501bb1f2248f49ababb1e9a5c8b832d0153348f08d527b3181a3e98397211b9a33e8815906f8864704c951ea8c9ab105eb7748d193c6d12feb788a291b5493c8aaed6bc22fc346ea7aaee8c7c042a23458ca590782fe8e21c20ce1f6028cfb7999158411954cf7c65948fa71ca7a12876b10b44200ecfbe99045f8878599cc31f48627aa48d0e990329bf03abb867f6e2558560ec5ab8e03e8638b7067dbe5f01dd2a9fb6dc613ab878630ff2b21f5dfa98873c7c49728570e4397d56a445666659e002208014f13618f7f896e862bbb8a4a24cb0a6ed4d6e0ec4c1642fa0fd4ea0019d4f84931b26e7a9c32a668aa592091168477d3cc51417050c82f78d5954ba0ec5a3597f401820f6ee108b44fa15dc8c175b946f4723dc508511247af9d064c7b95dc43afab6109a2aec207b0ba1636e86d00389be2143f6ae443ab1e221217ec460971a3db63ce4ceea2053efa67b6e3eeedd416b0951b16deedec2f8935e81dc130d95cb8aec4d2ca9371d9d26cb1054c657662c4bf5d0706effa6e6047247a18107912370ac7320214c3e9cd479ef20b1fb566c692a021a855a400ffa6d0e4fa96622d6e1f993fa46860fc33deee2dbb14ef348a3e0e623e928b5456a057374234bc9c556df72f0edf7ca668f49a2b8e362bd9a5b95699e02e99be3f47b305e50d58371a4e72153d7225cfc77b959725f50b457eef4f56cd787a038c319430d8cecf9a003bd7d1784936a8516e08a4d01b567f65c7d7ba0a11123eff6a0035ddfce0efb92ef934619f9ffcbb135c0b703ea803772b3f382cae9578e6ede8ffef490a3f11e2db3b1cdbbfbf5b832e88b26a0e802b24f9f4729b209ec50fed01b70c0bfb9a70b30cafc3acd3f9a2eb524de4ac89621285584e0fc74cc7c775968dfcabc579b1888111d8b7962eff9d7bb4fd0122da3440749ac5913715773d6c51791fa06eb32ec5f3ac2146b67882ce28d66c90dfc1e0660da61821c07bd0d43dddd8ec6868c52df5856e5e36e0c82c4a1f126b02154b525d79bdce34fe453e90a73a111ad12fce58d15cb9598aa494eb022e5e170e267c4c978f0ec6e677a3e63fab0e10d2fb974b6cc0714029af82e7094c67cb7b76ebf48f0d6eaa09c457872a475beb92f0279a6f37d7a8baf90ea9f1557e8feaa4d2c9ec8fa28f958ddd1fdd331f69983ecd08ffec8bb263193d4e49d714e63d0bae76a52671964683d1b90c5d50087cd61437c8da518ea3fcb1d801533e52b90cad6169f461e075f337012a29ab3503725bbf69ca166e8ec5d9520bb97ceb229408cd932ea78e12be6db28d77602667ba8c45721d465dd82f9ba62f9078f0f56656a0bb3b42d794dfed90d9d4777f627f9a36fa8fa627182fb4096182a96ea5127ef9b6c892e1ef7dfecfe336dd1d589620019770918896b6f6bdcddb27c825dc0cc283a38c4cdd3abf26ae92a6088080d5a941558e9adf36ef5dd3616685c564e79892204b8f6a69c7593c221b6425f4d4a2090edabf0c1b8715ed9216f81b3598eb71fdf94597ce20e61030cd92198c3eefdcf44a2e814d09de2f37543c8a48e70d511b37f13cdefc27206a51b91c180575ed07ccdd2a2442a94624af340010431a56b20e9ea867d51ec32abcbf4289a09f548fb81c1fd0717aa13935573b9e53e70eee9c83c0a371ec359dd4bd69c6090c0abb4ee3224f53305d07a228c8b16fd429083d881b8a6166db5aeac48c5b5c268cfeb0e1a287136ec2b0dfa4855bd5406eca60ce627c0316a6bf327367da6f4e76d97da9fc5f37ba70def7ebab780364d0c776dcc6326a2003b4dbed9aba8220cdb5f4818e020de7b57bde3366de6ac174c8e15671de21fe2adabd76a1075663d495c43fc59042aa5034865ac1e9e3533106b6eccf8953f3b57d84906af652e2ee05b5c867028d7d660042789fbb96eaf035ef11a0b3174ab2359546a12bcf2eadd2987edac16e1e2546cf780c0624c456bf9a192d1c4565df5b0287c85b8e5723cc74eb9e001e89feb577625177e1b27aac14b2ee742ac942ffcb428e39b49e101e554e8ea2806c590e3b7f48705a7dc1eb69fdf0ccb1ca9520b323a7405bb8b0d5f7d673017d0599d09457c693cf76410991012550bb92915b6efd9767a793c4019a7502e08446fd3eb61726a7ee40f15bf6e30e523a3620b220c27cfb61fca4e8bb94cddb3689c1ca3c00d29779cc0d132c7b78dee3c09efbbd2bf867952cda347dae8004d2004eab166e84d80a795b01dd5b43a69181b15ad789b8341e02fcac350121731a8647d07948b8e61cd3c5015ff6a76315e392be50f00c9b3d32feb7cc9965a2cd76a6e685462c720a0c6f4a465154dcacaa4a7066189d0c4b1d49bfea698aac80fe4199f647cbc65be1eb12bb887da0fe2942d3b07cfd2d31ad06bc1045b0ac96e8b8d3659fbd026e6c2fe1514c2b901e22f63269cd9466ff5b2e412ef72a816f36a8d5103da8fa2538dee2b7b88be9a246cc538ae285fd0667e9428f501a6d034cc4b837102145d0ac2988e2afbaa3dc68d5d0dfd2c32cb5d0bf6c1c872ff73b5d1f35caa75c00dbc9d232eb3b788025017718b88fd7bd2145fa785fd3d25c15b54f0ba551b5b24ed28e209280fb418b006125bf4a5a46a09fdb1c9705b4e85b3e10508eba5b936a29d4510fb6178bdd9fea0590cf1e51ea1b86f9b24c0c4028ec7e3af435bbe94197ac0235f9c72e8bc760dda8967da1c734392e25a7aaadd9389c21104bf0179f9aebcc9645c47ac83c309a890764dfdeedf966329a380addd28b2be0c9c4a7e650a8ab138234d9376463dcbddba6c8f47be538c651a5a2ed0ad1832e01054558f5e1452af162907db2e624198b96bb645dec053a388cd1f3f6031dc67eb68a42934dca141ec5842810caef514dabb40102749a7d06786780d28acfedafe71ce9b95e5331806e5fe1aab79953d1a469511ba2fc36dd04bb5e02cb9353321775be0c179a0469a2fd78183292e9b30ea891180e8ae104a39081986c43db55b9edfac6096e4d5cc468f8555de1ac24018b0d053a2a957a2b212ce91ed73e0437963b55704ed1656a99767ea81ffcb0cc82f969d2476b5f574a45e6af6da19b26f9124ae3b9d090385d29b26d6ed920f35068e13c77925d6f7bb7c72821294f9412239fc6159503fffaa51d79038401918548e779d28fc403151803096a898ba0e43065c6a53e28cf590c679c18981ec64f3954693343e7623b2efaee2437f877416eb46530d0dc3caa0762efde181a7ffc671fad96fd84affd2a03658ca1553f7483004f9b891d875d1065b52d5e0394792cb3c94d39050714332323769f5962496fe01e7567d6b858826024446d0cfc6ba158807aaf303810b4a2ae70c91fdf489554c5bc9d1f654090ac233554540cdc41640bd882427756a01b78608b4cefb5930e075485e094cc3e4dbfd50967284bc10e65d933fa635a14593a8956a8e518e23cef9d8cc00af0dc8a63cc60e33ab641648888ac3b47dc875a058424a2d2fc9d5f8c303e7b3ce7f93b44c8fa7818ada0bd3e4818e8bc0731ab1331f99b7053e7de0f4ca8ad331516c6f584aaae903909964944b549230667aa9d43d4a3fafa00621d446c0c439e8d5abde3418ea00418aef90c3febda25da8622bcfb1667163e230f6782d82562582bcc22fe2bd75af44aa547b77115382159ae47bccf6e431042f9dab285a436de5974379f2e9f14dcff21ba905f9638056159e55fa34b4bdbd2b3c69630cf7a96ad50c0d090f9b038a5b5823c3d4ee1b39d8da34f6b65ea0b8a3d4cc4cddcfb3bff226c11c6a8cf76f69f3735cbff51e196271a12a004c85b7009044b4b2729453e083d88631f8bac560ab40e4368a4183ef41daac5d3a7b10c7a574801c8150d7d0ea55535cb1e8cf57a8c61c24b16a8d3ed40171da780bd7f7de0e2ffe610a5dea567e892cb90041f7629fb74eff4698dcf84484c64c403f42a39887fe8d58f1471c3882224c42dcfc9422d291ea140b02557f1e022612ead063d126042f20755e3cbc55a68cdff5ad516b32711a3559c962722f30f9e7aa2cea8be6e42765841975e426502945b7ff3608120a07340a0ffdeb1f4f69960ded5995e99c76922732cc1bd960786fa8cb36f7563dce1afd8f885ddc6c5b858dd22000d3689f4534b3bd4c287cfa45d05ade8cebb5859073760911452f26672236e7f38600e2604714a703e0104b88296b86480e04e2c2125be407282ecffb7e649df0ee2927d905b80da40addeee2450df177ccbfe1a2c32902dc9c7fb5096e58091dd224bc7b4adc4e62d776afae08b1a400ff9daec4ec536c9f19f9a1133bd023fb6cc4519036c763b2a399fe13151da841d410ddb3b5c275ec63e1c6bd7fd7797cb66138dc60784cafed4e35e3b71b8179898c1d55b782904a7697a6698f097ab4d74ea83246794c92c3c7f3a9a0c82f070e73b6e22a248d1daac76ef882674324b0a39ac0bf28e46928cd351fa9538fddf2228f37ce66b279cff44d6644882bddcbbec6c64076ae750db884bbe5f689937e93ec3e0de2d6c162609b25bfce9c1adaea2ff0029b7d4895787672fce844e58b63173cb62917884ef0aed70c2053fa147037749805ac7568fb61cfd0fe681460181eca998ad2ffa5ecc220d4e6e9644d4ee0f1f1641a178edb2a41bbb76ccb4101b775ed3140ef972147f10da015f4baa624c24aac4d4db50d7f1247bbcc1c5ff0f765192b122b451daf63bc75910d9995d6fb5614a18309accf3737c047f38a14c345626d83ce054b509628b5e546a26f000c061faf2d7f2b1000d5288e985dfea1e6ef8524321a984728b1db9890dcceacdd8e2a717d32faee62732bce3f4eafe7faa8f83e13a17d769c7061501e01aa4782720b9862cfc26bc70cbdd2e8dbc98be67da6046e13c384531076b479d2424454c62f79230836492317a4acec936768659bd75676ea0454460136cb4bf9dd0c55cb2cccba515bd283009376babb8ef42e478cb9091b60b4b900e2381a05197f8a49ff20c89a1ea40a3c5bf8d447641d81f6d4ddda510bdc100c95ff4f7bea0cd7e6bb193fd4f021bc7aead0ffb6e1baf93181fe2bc105c06dd3b9027a2e61304e821db621a42091ec7762b3160fc0d13bb73d8f299ccc861aeea3be3af95e49115827ed216ef012cf8dd400836d5e8064389595327a4b56a6cfd32633ae56bbb52b56cad0a9129d9ca4a1246087724b7c100ec763eff2e0780cc4015046f27d6bc38655658aa468c328dce25c1bdffab7db5a2175a209997fc9ca26b58fc42094590606baca81df9581a5fc07bd5ce248d66f2a0d17769cc0ce1e6815892d8f45b594fc67c9d1540444b9706918e76dc93192d0492e7069a5699454ddd80d33f9171bb02915b0ba4779f5c74b0345dec52963911e150c38c83f8f06aca7417b2fe4599db9892a2524734da2c1744ccacbd60f32130021372a173ad470b2cc0e1c4cdd784b659a6dbcab32e5a17674955e58439f1f5674dde43fee4611bdc12c47404f64aafea38d80817ce66fe374d660b72a6d11487166e2c9bbf9ee410424128f12a0beb1b271bbf3cc5f0d11b6853d059ac3517219cb25e574a36674951d3da9f84fd327048c5fb73b05b8b09af90b2290a461e462db7492b83322a93ce45cb944140bd7808244a49fffb36cad2d91e0fe4379bd1665365c68cf565df15ac03098f412aa8804d6d4503ed6186858676ed6b471b90fa1f68643104c68563ce5da9c1a19e2670f263972a20e84128870f143af6f0f9cdeb4c4e4c7c94e25835fbc2b5084402b283b47c9fd321e1fbddd727c6994b7b45e95971f5bc50f8bfc0d979f25818783de68a597b2ef7d8f5683320b7fb0dff82b9f1c0f95023fea048da2605a9a8110364138230edec69fba5083736b223a8a585b0e95e6a2f4125e1164d9ad32f9b2fd142f6d1ee3456c355dac7b11209996887c73015aa1fc9a50d3af32db695bf15b50a8468f863d26479cc19cd9853a8af4a8bba4371a26bd4aacec34df1021a00bef7fdab361ff91d0bf99e29988619a8676f074d29c61ed68ab7cd3ef20709e669cb4c1829561590b9c627b1c4dc443ad6f5094b3c719e191287aa993d99280e0376ea7cd151e3695f2995627cbbd800b344ac232e82197917d615d619c602587a0c17afaae900a218cdac7803b416f60792e8e95cdaeaf5f5b728d1c8c57f1810b27a312e849b6d1567d4dc11286106378e23e84eb016e5ea112ed1538efae327bf50fab5af8e8607d2b3329d6f7747221d14b11c6aa39694fc86444d700f75e0d06a58b5d85965d133c7ffd0a992d5100fa983510464220fbc1ee1a0983f9624f3837b4ad9c2078118a9debecb06969a91077912291fa1b6414f5e44f1ec4a338d153739a9fa1b1cc24173a4889ba50d08dc91684a4106c012ea8b22c12f3facba12e4161f9ab89383509341ff38118a0056d201de19743838362b0e5ac5738cedc6086427561b3216ac5b9a9b165565715387be7cd7cd888833ba95a7974e18ec7436e9b6705b76bc837cbb028f235795f95a4d9c84b8c68f74935cf3d4ba08837df46b2f41e6c917689fd93994202e180f6e70b5db4308d0604755936cd40e71196db472cbc45aa798962796749d89a22090403bb0d166386be5049fd7a38c3128e32c9a1a586bdfd42a44cd7fd86e490863b2e53a52d2a28663ceb7b1fc4aa21c63c7fa8ba7ad39c2214240335bd36b961ec8b7492f761cc8982a0a0541edcbd70de21818b1b5bc74823a1a472f98857e5178ad2acac56326c883aad6a6d29b06381ef3a9ed38b554eeacd9ddb1db5ed6891f4ccef11c02c6d379cf3e112762dff16c45e8db03183f8da5373f6ac638888092ada9a3e818db64e472f3546a54e10b32e021ba97a96f828719c5a8c3665f34fce5642e69ecd9f938c1e7f1c8b26428df3b5f8e3fe5da239863f6671a18e25edab43b3b3dc2720e5b6180ca47350b0928b3bd2426824313ce47ff40cd9c5f1b94ebca42bcc76679137c6c9646ca836808856007923c6ade6545cbf18207dd3b2dc1fee10b44400c9ee39685a88e1ed7054dc32934a1af1762c056296ea7f86a448c29b57f7124d13b746a2d56f27882c023ff2ceb61e723ad9ee68ae7c50e6a355306afc92853e448966f1470926592618f0233b135bb477fafbbd4f37369b630558be2021f372e76426aaa5b0c90fce7d21549d88fd9bde074d30c73de75822941ded0bfc36ffcb4e650a66c82944011177d991b358163b413b45e613c69f0a7309dcc6b81d1c97165ec8a8bb9327178f30463c9ad957bbf3bd32eda7dea60b951748e5aaba7f4760f9313c76ac1440aa45894f6d08d6d008a2a75ffe97b15fd594054e749074706f91238ef9e559341bd9d8ddfb2b62159fa9da6fdff286dedcd6a5a7aed2dd7ee306b9ddd2c7909c0546e91e108e4728ad2443b4e0ff45bdef538ede6f7c09043b1a75d7097a0ce50bf6139e4232bf9a6893b19e1687e0b8a0930c024868f37006c3af5123263ccbe2a311e4bc99350670bd5e89ef426f9152c2b200457f86b6246faa48d0049a4be4d36f46f717b67e8b3cfd2439848d43c9be362ac1f995f8deca7809be3443205e8fbca76920a015436c54d5845a7962e68f34a16af81f5ef9fb12eec1972fb61f2e8ba88593e07c04fa923cb04872db16eb1c13944cfb17ecc40c23a7f8999f874ba0700564d10ecc5f8dc5813c03029b2bd50952955e431fa15fc473729c85183369819e7d78c9e671d901b6cb6c76ea705cab81219dc0c565f19188c60265cc9ba3c4dd7832c2205610aa9213f777b61ebe37c8d292d0ca11a4778a4ff0c9e0bc0ff61f47573d557ac6bb91a738565714805e2f230325230dfba1c6c4aa6de8bc7f21ea163986a631d6051d0ceede56a97986524bbfb90d9d4c181073eac54ad7362b6853c22a90506ae15212ef81b9d346094bb2be711701e763c9a1f863aee03596488687cfcef9a8588637357c78a999cba878c4ca76b989b0bd799a33d345c886af0842113a0dd35d46511d42447a0e7302438cfba989b247395f8428a18ada2c6676ae4da77115e5350667252a1210efbc865f6f40a9aa785a2dfad070e8a4c13508958d2ff034e95f16e9f48a4ee02521ddc2e3119cbe9c04eb45d03937a0167a5ce3d1d78881d2e236c61562f9417d64e92a1acb2cefb838f63757cbc384bb8e520690748d58c1c059d3a16ccb521eae07b10564d01899bc1a2fc2772a0be0470b2a88b9b5a33b3c672ed29cd5597a2855869c00c490c9b510d18c9809ec0a10dfc244b3c233cda83557ca0edcd41561e71ed4eeb602dc1dabaa623aa395280c6e0c99fde914e97923d62367104e40212a3e22c7f2688d9d38f8a69f22144386421b4456d65aa144a4c26766f1c720382c8c03d40a4fe89d4b1b3e6aa60d76eed9c3eba074350f6bd612cf20a4f73c2443c13997583893e29999cbac8ad58d3553209aeb1bcca6f65732fd50dee96c84f48cc9648e0c4bf3777f17bcde68d0ab30d321b31c2e8a82cd8ce2fc15411603cd55abed82f1fa3bf7a6d79f2b10748a1c021d42b2b82431a3844ffd353f9974e8e80d42f7ed9d57cd4d283d1fa1305f75e6c29adc5dafd6e0f249e6a082110744b83e018003e215f7e6093c501bd898b0d65e64778967143057ed3c2281b471d76b1f29c4dac25796d00cf838b6e335cf842d5335ccb525ecef1941d56dc5cbeb5617a787c6302f8081a760cc772156de57deaa6ad40b66fb30ddbb06628822c6d9d38db08abc0a9424cc1ae35df26b92ec458217b7ec604869a0f985635c43ac91d59da86961a9ecf7e9dc60670b0c09e9269ae7036bf17d158d9e3b214a1778fab5f1ea4ecc4354136134c3e71a26670637ff73605b8daaa282e348f61eb62923ae333b11af1bd83f49cfa55588930a4c3e42cf62eb24571d0fe9f9b6631ba111a15815af9823df3be2e7ae75b1555a3b7a13092ac21ae0f554b459fa14ba382bc326a9a30d7aca4609f46da1c2ec19ab07adae77787cf7f0f89ec363b850a8512f174bbbba0f0dc92fe04cead05473e0449055b1d8b44b6f24deff6b66f175dcad1ec64a8ecb605dd1d672e040b676051e94828cb87ffd9fde05cf0b7025f882a8c4954a09976537f75b25c3606e6389e6570097af072cb63321db7207d8a3f559de99310c5ae317bbeeac7a42a51c13a84f4fa0e92ea566c8d92b4c1f227b46f5e4c172fc663ebc490cc195ca79f7aff6037bf7acfe4e21b34034ffbc59082fb6dc52249759b7ab57c4b3ca4580d519313a10ecb897f7ee4a69c9a8356b8d6003865c6df97c95c023acb76b17ceb42fbf1d74a72a7932b65ae38f9af0418bf0b975e1e73c08b4b1411607bd1c5d5f63b109a848b709105ecc233404352d788bacd76338d5b9329f2ea77b99c15d2ebc7c856677d8f907fea88728414f5e0da58edf5e76c733acd8aca27b8a0ab9ca42a4177e852b282d8ca63b69c14cd543b3fb703f913577432acfa710325f472797c1bd46a58734c55262030a6170b1aa7bf50218ccd9668346b11974eac4697a4eb7c9000e399d3f3a9703e89e314945320bcc6fdd096a2377c56e479cfafc6c0e56a8b4bd3f61156789362d614022f5d5c5ed80e5515525814512ad406e71ee94ff13cd10bcd5a6d618ec3acd82ac21e77df4e23b099ed1e7aeb1e242063de8b7fd4c184e9dbb81ebc7bf51c92b5c363c22d20407dc9b22839c741fc829d768661ec47a2a6516ca3e8aae0f603c595cbec16fb9a3c769a4556f3d8f0400ed3cde0fbb3e0d33456534831dfc5d454f15d42c3f23261d5c4c10a48b2839a9ce3a32f9db3b32dcb652211fa2052da6e2ce3de85785935b2611d63ae550e8ee7306dcda4b1c750e5e974800be02b0c79a2e3826a45af2975420a0a0248ca1375c302420f55593020b10b207b470fb06c2af079e14f9b8f566885b4f10fd1575674a9fa06daf73d1da8ea75933551a05b6c17c5e330958f1a7840ba4a50104e34099a4332d2abda671fbbb5ef7e76e6a617b7c4ac676465a9c3e388e6abb3f40cd36c080ff39a8691eda22bc6670d71eb9b20a09b61a39084d7cd444f784a3beb6d70055eb97ef582a51a578295aea812e1d4068d3677f0ef775227e33617ec10d56004a33655244b722562d0f961fc2c01b4287b59591670da87cd8f20b75f4ded484e86c8795fd609674d6c8d1a16b10425d63696b32e174921e9ea1522d1e83f1dcc886d2213a895df8b9fa41d8225f2caedd557f0fe069cd71e533decfd80f710fcc4af09c3240d925fd5b7fab066860b10cbde729f930ddc839f7d840371242bf04a32613f9535ffd5c7385824d0ac7514a298e577f7cac09bb08384ce3faab28d8dafc553a48347fc9412adda8fd379f261918494ef74a82e664eb9d42660b13b46206cf49d2fe6d58407683895c42f4444fb11d86b25208d191711da790a988a26684e22538c4119ed4032b4b02cbf4f72160364674e582996892ec0a7869241329f2eee441b3299dd30eb79c2150e7721567327b6afa498b4449d376c285dc9d9ff7cac07dcf905ec288193c946eb25c306923b09a756fd4922b85b341af4d7125dfce1589fbb48c89de6aaf9b866cdbe906f1b7be0bf9a1744d59a342bf609a0dd287ba4821080fb34021e608f4aaaf5c0540ac0fa95b8635fdc2b49cd3040c3a1d5457f84aa3860913149372124c3c54c424011772141abb8898fe3cfedb77f6dd42b9f425c3232d5f0199dcccd61a6ccf6a891b524bd31adadb08defce71b230c8ccfce8d10fa140a48c4da84a6f17ef2ff3d3559236aabfe177d374cd2a0edaff0c2b1009a70bab0481085b126b3e99167835509e8959923bc81a499cd0c377ae3029a13f34d695d5e531dcde1466610a939ba922bdca0acf4a9ed1ac5e276cebc8d7f654b826335def958dd535a261be6436215a265c768d4f83a97afe4f676a595e597e3304958f72757a69213f6769337adb5b2abb3e03a60fcbed9582634a02ec2327776ff672ed33056e08e9cf9de5927f38be0008828b0e94f0ebb1971b0c8cb1c34be4a0d958c4561aceb551a12941801b1fd95fe5fc313250fdae9e02a95cd4b8b3d3a4a8606170104ad5c74d4264a44980465fd08926b0e7d66d7249f8501af0f3eb377ec3e886166bfd71523230f6f8decc9542ad9b3e8906466886e6f1512af320e674985bf9749418b6ab3b0612cef40599bb26d3cb9cb93710da9d5bcd135939807b20d17be6e5281bce9659abd04539a3692a35ca410edd55d4c0fc357230318123de9e8deac393da9bef45a5f2bc962bb8017bd799f558e5378dc5f64dd2ee1d22d6c41ea0ee91ff7ca4eff98f5f7f3d20468370ce0f685fb05e8814ee84edfda347424069d3ed7ebe0dc28052026c6053a43aaa1849316c2ecbac2c52da94d6391ec381efcba06bd2e839a000bf111aa9b178add626b0db9608cf4e7f9e116857a8324ba6f4feae96958729a2e409b459f65f934d22df1f0f47d2c6f063f6212bbb4deefcb4dcea871c61c236808e52fb8c45be874ce41c44c2e8cc064c6cea1d9e50a0022bda98b07cdac81fa7a21728aa0450e7a19f33cf5417f6a7071c8145319b015b27a605754fd901897177b7ddeb6d1c04581f0ff782af542e28a5f1515d76891ce0f2c9b50b3cfd8e9e3e809d1a710df689b6f82281661b89752635ab916d088fb875433937df2fb40e5561d494b4c8ae2c4c481a7ccf2ed0c18bc8255c3b8c8c8722955aa287fb7a22a45804de2aac5455169dd77ed63b808c5635099c02181e74c30888ecf81c044e3264e0858638e5d85567a8807111138488b2fb0ea43d7884bea063aa55bfb082527096a6d7d49ce7de5b1b0fda8326eae959f45afa1a14beac348441b1cd29f0b03091c4daa45a071db67d60af7d355db038c61fb8383b24b98f0998e5fa32bd67dc37abe18f7dea06ac97c0a8219965a5894ebcfaac74547cb2607595e5bdb1fa82cb38dd2dd343b4988fe1a5f1e0f60471fa88b3f40d0cba185a0974312412f0719415f8e41825e0ead04bd1c92097a39c809fa720c14d4bf1fe291024e94a900590fd4ac0d3716e06b41bd184fc4c81a02af358c9cb40fe44bda279ba09ea770308f8f043cba460a39cdd9b2ca35cfcde84c9f78e21817b5852c83e38a186b31ec88effcde9832ed59cc85467498e45016266267872a3805d51a0e2b607342f2c56433810eafcdba1d2e62b2d76cb010ada22f3332c042e46e4e972ff064091597328fc613806aa8691f9db76d12308f9783be51ecea68a7fb7efa0372d6e5950af603e863e515a7c454962d026fb80669afaa9c2495ff94bfc6e3d56a43438aefca09ab233f3d07cf4cae0a381db091dcf81f25b3d37b0242787fb7dbb185c16a27ae4e342b0d0678f24285e98a13ec58589734e063d57844f35c54ad0a5f7c0880f61dd3844bcd70ebab80b07b0f136b092860d2f866116f027e10e9d481a0aec9eec3f4e59889de93860de6a888d6c99b0fd13710ad014b15f375be59e22bc1131d3e7cf8960ca91383bbecdb36a10c23794b4165f4d950ed6cfc0b9664d949d60307bf0da228b6c42718e6610215a97e5da5c7810d38fa21ba39f1e93a079f5c9400d1c6731f5ea2f4b711683f75cb184ed3b77b91feeb95aa2c11681a67d798c10a86218c2a34d878c37bea15200f5fc9ee734c74c6e98a45f4e2454d4297f4b5f2fe9983f0190d3d2ca08a4b816218048d4f0fe861f1aa89d1b675c0a0864a729dd34a988243b6f6293e133484b79e4fb10e31ec7a2f3e80679bbf9fede1b124bbb5a898624fdddc046bb8c09c3c90732cf95bdd39df2fda2234306053fa0274f73ccf36dbc26437ee888c6e8ea42e2988507cbc966015694458fbd7d4d6c8968a7afcb8758d86e033cccd83bdf8840dd63db9aa5b80e0173adb7de7530339a7de8f994508f81ff6f50d7537724c152a6bbda552a85337ac46394b0ee2ed6509dccc41763ad44b6d4b15ac31ec605e7235640df223f5403a3587ac86bcb93e3d14da5846702a76fd74ed078003e68fb9806653e2ebd7beb8f1f51aa8a492a2e1effb2ffe7463bf947429fbd79b74757f4c88039d08986089a995be3ed99321a7738f1f210bfd1a73ca095c4382b8f59cc107714efa4aeadff63d921ff3bd657ce928da66d7a1657502cbc4c742c5bf142b709f28208d1b4f7cd1dac5d77ecc632d320289e57e40988c8b74f258628ab5420fd83cd8111305a039f9803765a26d20b2bbe27125b5083336e3eae56c15c8ec220c82af0edbc1a2bbc09d39d59a3e4ca28104f10a35cb4eb79a9113e4691e1541bd7073e9a02e890928d34088cf4f6c8032dc2937a2f6d84cc412e2785121670d01c98bc3f0985b582411c86b8a1da4b939bc49a33e53be3b9118c61c30b03de3629a67c1d028f48f7abd07d4ab1f1510b4daa2c8b96d0802a3db122da988f969709ce8bc23c1aa19052373894040548868cd9b2f60d57813c59e9fc27c21645acc399078940efd0e10056dd0f264d1cd67bbd164c4e412cdf212c528e6b0f846af7e4a9c892be2e104aa1ff2ee0dbfcbceff06ca386854c5500d00246e8f85e6ef012705c54aebbc7d382684381db578b2674f285636deaf9f564fe39d3c2009ea7bb39bbda480f9f6eeab81c1bdb3800bf303fbd4d4d3210d77983ff9fb47e0441b4b525c9ced316036a71c1b4b07a7e83ce5085ddfa3281709d5786a3c1299f3fe01c9604df21ab58ac6387cbd7e89fe0ba16d809999a059dc23bc788028d27843f4dddc7fcd5ccaa7109bb84915367cf9af058e9d16d69faf92e7b51669990dfbbf50d1ea1516c2bd6c67069be40c03c58d85f938b92c1f0d21273963fa658531887f3a9858d1160b6be85a6b186b3a6c6720cf7fb64c04d87b42ee8c056a2c1b85cf9cc885f1cb9de021f1f0f24b467ca24e81ba80a875b11b0a4ea494b72aef81c87461702b839fb400bc70f13a3f3f305e16e5ff1094534d003be971397dc1c51c0e902dd10b100b392a7c977463dc3d062c4e2f799229476a48549ec2aacffee94875060f239fea8a447deaf452c72568b031b4e0d8d27e0e35976e6887c5019eba49968e68e85c48d607b68c7f63911ea3fb4c8d1f460fb3f57ea87761f03868ba808074d7f86d887213480fdf6c3e7ebed7c31c67137ca771b68373fdcd404503d4c7b870df3da23822e09077d2d4ea4a7dc82044c26bc2ffeffe83f73b4b20202f90ccf5131ec8c0063a60cbeaffefe1878c750b1c9c7351828a3c7f4007f4433fb41c4feebf5164261966ca012cdedbbac80a75fd19405c3d7d2e314ae5e3b4c1fc784b67fbbdd1407d0c4a531cb924c609cc90e4ee02748e692a7d5658a07fec5efda2243e381f1d87af8fc16aa67d184280593a5835932c92c88ee90bb860b28cda45027e055e1f94262142c0568f150e4eb98d166559ba317a4631a348c10d99549812abf19c6da15c1fcd3589dc9932579d224a1c251a6dc0057d483d1a3609d6d82f5815c90e5cfcccce15a372aa98f97d5e9a2522889d4e03f4c107eb881a78e567462e6eeb0ff9305329b12a05f2f1567169649045669d9bcd43c3310d1635473e360dcf4ab4b8f600cf7904f918863ea2dc1e41b8a559bdd44a68b7aac1bc84c0adbbd8f132d430bd2ad8d4e37b901b1450b28aeb638f1d5f2c15d30d77d184fe07f02cc5433ac0b32994da2fe7f08ee9d29d0fa13e8406a9150f7e60490dafb3aa5cbb1e97266c927a8ff349d35af109417fe6e61ed89ee1e42287bfe5fd812ed2d0a489354f37256ebef14d21693cc49224a08dc72beb62f3daf907e0ebe245351e24ac4bef91818fbcb5cf9bc11f59f9b8569516860ef7570c77ac675ffcc78a185116d1b60b4e797425750125f5ac6c7fed0d5fef0a7c23020c63591cca7e097fe663c1dc742cb640fc19cd0e361e03e562bc99df002f36ac3e1c493a6e5f732effd5f3a94ce20e9ac4d0de61409085d969a2200316ecc9a4ca341e5b09fe5898cd2a6a33d68dc74e319a447a62778db4a62b5284336d97bb55ca7f19879432ff501d8f9cddc505e56eb4548298806bbe3d1c0827c0805223b09493a04632dfb5780c90beb388daef81415fff5aca1bb4697efd52b8cccc9d9aecc6f2ee74a298cb54ca8fc7a89f3052810353a7c556ce1f8f5953f49fc0cb889bc1126ecdde3028c9032eef0d70d2d41c915bbbe814903c7a0d832e24e73ace3360481e125cfc278186e7d3fe2215b1b4dc9ea007457b8599cd5c7560644a64a6e0bae159deffc00e64a0838141e073e05e45556e923707ee66d8291809757a2530075ce7e8916ad50db67db015d4c4c0cad04dfbe1f153a28d334381b9550a38e96e3075a627708c5a193856c523b621befc37aecbe33e4063c22a16e211dfeec09154416877099e1f988671125d8f74d157eb24ca709cc5feaaef269817549a1b22551ee6113ef237410d57e4df06682771c409f82937fe018a81ff0f612bde9f56f85ed6ed078536f0da2e50f17b087dc19055815a6fd30a26fa20afc614dfbfa03b4a12d6247de469f637ad243ba4c641f346b878ee54bc81f951570d4cadebcb5ac1de1f3cb78259fb6e00fe850330e5c1d800d0d1812abaf6e44765c7672483489cd8f1c9d430142f3b88e643ca7f4aac10b83852ca13a45e548b366f0c442d1690521009a27523da83c2deead17f8ceef9d5f8a9681193d9a119887f44aa84181d3968462671431715b8543b211cc89b7948500d762c2bb70432179d66fe62576e314b5b379f737c32f4a826f095810af0abadc90b711bf484a1f22df22ed45becbe8e2f8aee4118df8c34d8de49985f058efeaf4746ea4a92318c038413ac9b4a9fb526f8c958377a874cd9ea9585ff00406ba72d9762f81d97373aaa9d9379ed559360bb57442a59c994523667a260e9c81886be8184031920a605ed2f810d5ce0176d8cf788847f58b204a9aab2591a8e902c06261618019528cd15aebf00966acaaaef117b131557824c63ac9b37a3fa80a804c100be66298d00d64f04424bbc2100220d6863eaf7d07f8cef90a0ef1abc23f29da64f16e4983c52cecc16e85fefec6786da2630be421e62633b9dc6ea06411c89af566b2f9afdeab46c454ba997182f48262834899b973e6c74148691cdcd41d4b0793da09ea54e446521c46ec81bf41bff1ab39191d556b03c06e42f7c95c42266a20baccf882d21358feaea24d92a32a857e689d8a997334fa52e003850f8027d240f006ea7fc42012665f146e780598a922ca83c59325e32e03c84998c7343cadf628736591d495b4f0800d64140dd77f397494ef1ce1c2ba31656c7d582148c1dbeb3de9bd4d37db4f1fedd665f79438db663077cb331805ec76a7c102f220e9372e5157ab2f2cb033a8e1dfb6fc9c6beb923497c50537e6b1fdb56899c25dee5aa5aa407de44bdc85e1b0fa588dfed23302b9506b724490abe68a2b0d36e75cc008190a4320c676cff3f2787e1b3fa210ac5aeace6d2d2a32a3becf643a43ea87a3628fe3bc4aebf1dd4b87f6408d26614a0e2ecf4c4d73a0416cf569391012f44647cd6fad4d8e73d37ac4f57f5a2922a25f24f359b96eca163eee780f7d6969a0b30e032eb29f5dfeaccc2374396ba3030b37e831a659d31ca62931c460934a45939515cafccca41be3f51b623e599a5c6f86d4583fa65bdb75577005a4f2ddf42c94401f24480d6dc40923d210225ada0443b05dd7eb04ebf4bb8cecde77d890705ad3566836e2f906217393384535c050842f2231b275e402701c6d1dd4f0c023b76b926bd9c8da4cd114cf034af5ae758968ff2bb3ca685397ac6648cd0a647c1cddd1c646781806bc23214fdcff271872e35d41025a1595759e94a9fdc370af85be8ce0ed725b8866ccbc45488c0e3e517352761076c2c4c5a63eee912290414cdad4b033ac6ab4351db7e87522f7009b3847f954cf6f21a50733d86ad5ac0a9698b446c59ae46ea58a528520b7f5e1313a49c58016e255a66edfb3dd3bfb2086fe954a3ff11f400f8d7083fb451e4dcd37ab99239113432c749cc580d6288c0732d22b4fbf24a7fa33a0c57eda754cc864eaa272af431bf40c00d94d9785d8759f32e812ddcc5977997a5129788d70247eb238fcef06851c7861184acbe922afee58acef37747d70f3b20588c62c182fe39dad5c1817d6ef72ee5267948d24f8dfffd829e852279de88ffe45803216210e93f37bfe0b16c93fffd2d9c0f5ac014ff552a79bf11b02b582989963732308741c04de4b4513599b98e2370e7259efe6e892556311a3efedbc2207a19f70357bd08af87226bdaf78553ebc00a81d5cdd4201e90bce4ac8f243c712b117bc331fcbd21cdca1c537384abf8508e336fa2e82c7033404974df6a38977fd344baf324e343960ca90be27d9ae08c642cc1a611912f0c41d76c4e5d8d34b3505588a065f49c8c8be85d951069d1acb8595220f66600b4ddb7d0455ab56fa1a73874c49db72bfeaa5310c91d06d7dccb52beb089c8d966a32e1ab6ecf5e8bcb131f8b467dd18160050200cc2c2c8560181ed393746267368607dc52832ce8ec4eee2ab5b3fae96ae8d9d7e9b9fb43e5c456ca3b95f5c882bcc4c0d0ee8f78c774f6f71138ac248df59d5c8b548c1621e86ef95fdc7157ed1fa581f3bd5e6850441610374e81dbf699ff937c776adf079d47fd97992c4a09d1a3c58ab1a6a5246ff3441cccde0994f20001c9c8143d79718a5241c36f8990f5bdda442ff90dd3c310f929574695c20b40d6eb47f07d77e59afe3c8c8b7084849a9c854cb0d0bcbf3b83d20e9a6d25583f533cca0eb7e1d66f4d2738ea309cf04cb6c1ca0a4747fe00001466648ab799aa1885ee8165ff98500f3c8a738aa5a0e9611c786acf2de7889234390735d3d9280f14c2bbd1bd3924a47556a621cb7a51f6a7b6607a7b62fdce79999c13e4f4ea87dc365860869a65be9ea40cb64d7cbd5e8a09dea27340d33eb5dd8e38591ebba2126169e3fc4df3c9e615494e584b1d8d8b565f66b87a80522304406f95e3fbc14084554e12fd0f110dfcc6994d0870f2433ba7351f4e5111d565302f205455833f978dbcd134d7e799608a585b6439d1ed003bc680f6e6152ba74794c3e8a084ec978ff6a7604bc76035a5f2e7c5e4052324e0759199a61682b25558c5c7af0b928b44d3801e16f75b3bb914ec09e7599334eb9257889e4f9620e9293aadb93cf4718dc05f637361c9846ca0129461418f5e0251e1d4ed310ba4d74903497db1c2159d476b7bdb794322599028107ef07aa0732f0255be865ba5c100a36c207e640c2b7bdfd91b018192b01c9a02012549f0fb9221df2c629e34c0dab4eee8ca32cc2c3234728c9977c9540e39bfd4797fbb62b5749e82ac92a498dcf246d88b4e836890ec94aad90ad4ff645491531a087a9c4c3afdf1a1f9883b5f98a5c910e75155da397bb9794dbdf5370ab652dbbe2b60c3f1916e0eb933514f00a68041a7d503e190b9f6c05134a2900d59fb9c2daf898f8643ce31668f43d715dced0602578a55b356c2abad6bd6ab7bf7bf5cd1137800e1b1f6885153f19730ee7ff64beea57bdf8dea243b2a1194a24530262b1582734ca643ae18684b1f60677477ad5dd4bc88a9fec75fb3f59df40e9169532a483fd865034a71919530c941d4500829b73cea69532db1f7f292533bb5ce9734e06b9be094f3bfae5ca3bdb671ef38d7f37c7c9cbb20a1920cbef0fbec072adc9dddc49ccf7fc29959fcfcacc5f6b153971ca198add9c4cd85e2a65ab98997d454391d2f749e9a4e1c943ae6913482945ee9bf472ca0871ce523f0ba1d8859c6c818419c28319c2f6fb9c93cef09b1208fdc311275ae6f3e8d9420b11fc9003268cc0810e3ea83332b05f096c3f6abadaf43ee050031345846ea0a4a6c5163920b1aa9ac97af2b388112caa85996110b8267810032d5050821b28d15a2401a3864aa5048b5906465ea0918d61beb530df3e969989978cc475151964b0032890004108a08052a4c5cb12e7bc65f2e774b4ea08cfdd391c0782d00e8e1022a449932037f880129fb46e9cc9a562f90eac1a3ba0eeeeb47aa54e3fea5e53a3d619daea465edbba60b5aa3fcb73b26a54356a983e8e6bd468154f989a1aaa9adb5d6bb0ac7ddb014cafe4374c0f2490eaaae6e7c24425a626afe87729140c7f29e60398ae28037775aee3a8a17ac148a5c3cd9e20746881152888662ecf72b01285c662b9bc7bb8c8349c157af48db7b6398e2e0f3902d410687b4a431680501cadea2de45868c1112b7379a60309aef85e85688a208b1f267280b7a6ff27c32eacc8c5d42cfb49863038b03fd3218bcb8fd38515718c8436aed2374969e107b7f083bbdbd4965bda2b49e0e363c50a3b0b94c8458985d9b6d0dafb5df1542c6943be48628982bc1c8a2e7c40b0240a3aa40df952d52a51e78afc53e41dfea580b8a7290e47afe67d6f4d16d75026a269b43d331017579f0620ae09c4c52515e43d29a0fef639f5b79a339f72685d3abadbbbb1b8341429322384fd1b455ab1fe2cf4cdfbcae97c196dbff814c757ec7267c1dcc0ce70904e53cccec3952bfedccfdd6f3fdb614732b37f77f76f2918dc6782a8514376cfcccc3e7d4e4ecdf7140cce753f6b789793ed3dae9442bfd7eedd6f5bc7b77d6cdf0901b364c992c5e5a59a3ec79ccffc0053c5d678a497e14efaf1e8af0ee432c967ca85ef76ef3eb855f472ea74807ee6f0c4839fb714df9a5ec9c7f22c43313333a9494430fdfa2df8cff0c77cff1afe7092f383afe0f7e929ae531f4a86620d18941d3712d755da711b976ad796aa1ff879c1897f3e10d789c39302e8fbe7bc873a4e38cc711d7f1a9e3804daaca9bbade0d9435252ddb8ce27370b2bb44a078eaf6a4a95fa94c95733453b6e4bb1aba6da4553f5033fdd2d8329785c6af6a5b0d13479abfe26bfbf070ba915ba140e4be52b293bd597949ae3e8ea52ec846cca6b7471ede2520a3d6c2ae7c81a19ba4c54e96cb7824acc4589fdfb2ec32b56bec79c8393e1fae077e1bbf3eff6dbeb68fa95df5df82617c2481b73c4feec55c51559ec49946072a404931958f3e533c05bfeed050c0e2c0e4f5c0e45ab6a95f4c28aaa2bbb7b4776dd1e8d1485d4dd7d6f241a8b446318cb759845a56a154da95ae5c4fad3bf514250bbaacb519828713d67a94073f9815c0e45b93d155281e67645322a8400c6e25296bc3527d84a29d4a0c08a31b691bf3df5ea274b3b686ec7a94073bb7012b18dfc1d3497db42b12b7d27bda780485fd32dd2cb7f528a146572245aabe453219a444953ab4c608d33c2072b329671649dbef11667ed2967becf909b4f43202e7ef9d65b2bc86c9b184787557239b44e7f9cf061e68915edcdc1882bb23e6be516de1029a91866fbe4d7c111f172fae68319929dd3373148cb52422b892611f92094cb55ba06eadb59932585d122be303f1c38aa1a55cdf4ee64c75b8b08c3a55e4905f09a6addb68de3ba8e440273b0b4cff4a14a251c8983033c623d10e4f66cc76d955a3badf5b6d2320cd49bd8c62349ee1653779decbaaeeb5e42b8cf5d0e450ab8f48b76046ba3b740a3918528324e581134028dbc253934ba52071f5c1124026b1294d997d7451230186bff1200c5592ccbe4d1141e2c1cbf344dce071e97e0a10ec0e1adfb71e0a464a02c1ff0a0c4913783c4724a05f4253e73162b984d268e63b50a07070e8b007dc321cb5bd225fb07320a4cca7ed665e1f4e36895ec968f43e6105661e29de4b3ab5bb20e56f8c02659c43b4996cc5c92347c25dc6651cee03119149a8c248b618232f6d8139a5bc2edf75437696c061a639b0bc85beac28a24a1a6b12b9264a41849e836ea8989142391c01a8d01c8353235abfbc3085d1bc25480f25c910347d787b52f72e0a804c58242ad028d00653042582904cafa86a5d3a0be09a25b26fac4f33870745dbac0a51da90a509eeb43056b7f06a6520a34864c99607c76ce486145fb3f313ee61cd35f2f2bc48a76c6769cd427a9ba205fb5773505e43d2925e485711c4797f75d90fbab063673671db2222a08753d1a8a10b8d353a7dba538999e91b36786ace8af2f56020ebcaec1f4cf0de1f2db17174b048aac96cb6f83d8aef54de9f24f2a24f5f95df7e484e97a76b7c90a1bc3bced35752927375197fb6060eee42c37396f4d4ae7ec50301e5a5bdf9dd6b9c35bd5d1975c959e970f443526a1948f9373fa4fc93633332b6ebf718e1c78dc9e5fe92e4709eae29aa23419a334795d93e48119a92ed7aa269e7cbeb39e710a2838e426010a0230a20a2a6450c4072080c0c58f7745bbb3822e824c41046589902a4ff8304509e797673b1cb9a27d26df083c8927f1249ec493789224d43d367b66cfec993db367f6502a2bed381a4c1c92c522008ed7f8a76af838f522be117424eef26f95eb1bef1eef93cf82bee1927b915acc4aac16b31233f530e79cd3a547f2d399eb734ef7e639e79cf374800981eba1c8ccfcfd203233b7cc3489cccc95c2d862b52ab5cb5f63607e902e732194122b7f86f0fd40ebf6b2d51ac4792497cffb26fda1a9d2f3046d4bc9bafb0c6a09ba5c612340f958f9b4ba7fdf0fd5fdfbc13d4e64667ee99cf3117ceecc4f6b0cd08a64b6c1ca9fef3f3f4577f7191b2c3333cf394ddff78305b9b0f22733b33723d3a7190ff571894351a78652c2fca7520afccd2894084cb4f064a007821ed1d090515111d366664e28130d0c1a1931200e64cc785a2bb5018c1e4c3221ad750912184368cc92999308ee5486c42041d1d4b85319128344841a77283148667c8a04a732c47d6290a442127c88fbc4201981045aeb929821146609092a1ed56afa083e73041f1fc1a7041aee3e34a6af7ce6cac7573e356c9cda80ca2c317916356c6cb0a80c8162820936583ec4a1d860d13abd2e3159107c7905214908e3edaf2d24fb5e32590c53ffc834ac878675ac837ae820999917cfcccc0965ea284ddb19471a18586864c4b014565e3132fa8a8888c0ef0504c18f26930dd57aa8bd18bd18d13ef0fb3e1db8d5f17c60d3684deb789ad6f1803410ec785e40238484c0ef05049fc462140682b0d70b0cfaf969dad75080df4ba7334269035e020d195af5020dbaad92504f5254c3c69bd6342c6c8f1568dfcbc606ab691d0e1d4f7f6703b75a48a8d3412603b500c1ef05146a235eaebc18bd18815040192833c18417231b2c2664336ebdbc804da3b09797ae48c7f37ddf1127000e0e08b6a9694d039b0676adf6a0629dd027dc903702c0df91ef05ca5e577eaba334ad69348431081ff77d9ccc4cddb6d34ffd39a16afd41a168644c2273462363c694193fe55361ddb64a03d39299534dddb69f1d4c446a8c1f140d00a49c4224c60e32668c20e51422317600c00852ce62ec900a4558cd2944664f8c1d44a8594d22b327c60eaa55ddb69f18442a8c9f55091e2881864f558fab7aa6aaa7868d943d363e69f4388d9e49a3c7041b7566514f3f3393c8041b27dc984264c6629d70631299b3136ed4cde7f6f326d3f77d5f105a69a54b5c423c29e7d4d171e1232905c1747727e203c66ef3d02294e7e3be8f8b8111c3446b910c11d1913432a272a836337342996860d0c888e16af40421c4683494321bc85a6a3d8ca30cfa913f8c442826633327115c50346d03477960486023570557e3a6e0568fdc15e338cee8f153245098f7f8e21932fac07c602d3524e3081b023bf2f279c1c6ad080909df065ef0d512a318c9332e19615cad079eb11bc16784114a1328f29c56f5f068cc6de5a3c347c6c20a3e341a04832191ad6f1c6145a8a458aa2526cfa20667638345613da4472a845b9487a76df8be6f04a98c634b8df2388f09269452e0a8685573b521a3106e7135f1b301b615cd0ce16acec1a08cb4ceb1a62cb7841bbd456173848d3f58bcc9c43ae1841b1f94ef4600680f14469370ebfba8083e28230a8c8555d41f2bdbe5f27649214f32dbf405e2320627d520836c759bdbe461e6daf975be7ab84de684c735e24becf67b1e9456b5e7d550312699194974fa9146b2a8e88492351a0a4523c3442363068c192fb1cc68f129af16ca8468201483b164e654f30383f10ea6183f41281a00b81c79b223220000c711bc2abc9a3705b73821ef8adb4292133a4a7142a1082b793439a117c8c37142f3f572a98950b39ae0c8d3f332dd49db41058e2b707c610231e408a304b4815509f2c8abc92c1e28814697d3433555356c5858a18766e392068d49a3a4a347268f6c5c797483d3cf0c4864e57b2678351b27dc9047dc119c9057ebe754c02d4fc6deecf6739b2e383a38c5a526f396b1bc5aabfa59a5143c2a5ad55eed841be01070048570cbab8136c8a3492352f3963c3af26ab78f50b313c0f1864f70f4553f387a4b1e1d1d81e38e19b7c71900d91b01e800e090029059e411affa81c02d70044721e038b3f245701400ca7d4689873f7d192fb194161eb394a1f3e460d902d6e5c98fced4895dedea6e611777c0f33eb65205d495547852c6e5d9deaacdea9fb91e5a98641b3a735197bf55f3ac1bf95faffafbe6747906f596935dc62d6b9fdd0b3462ebd691bcafd4025a97971060503127d3c7796ba683645200ea1fa198af7ae898ccccc779abca34ac5ffd9a9939a14cde45789ae714830646eff4d82a1931544a7741b0128b3c9262c5eee542deaa270821268d7cd5359445beba4216c560a07e4c325ff85273204993b4d72733c12072203924674e22b8a0ba174d8de520d088ade978e85ebceae74e076e754dec92dceeda501233ba963a725bbaf6e23e28ad92053127416825c10728a594a230a4053f50f2822f606004f7850db268623b293392c4000a4b2fcf80a8904921832cf1b9bf69063482fbf56ce13222e288222ae0220436f40fc67a440d36541074427666f084541e3810ac40080f4a3bf5881ebc1b86504a29ede206da11468678b111a94cc0e007540891105422540891ef22402be85aaec8505845d83004c7cd6ddb503a98e244074ba0aeb0a6cb331d0871c56f42074a663a0892a4efb66d43d84dc869a218697145fb33a32860ff4c8e38122ecf8488491ae3145599e28a0a809002218e9c78c1103704e18aaa1a224e2dcb1559c6506a5fd066b52a2e787956a305d9a1affa9aff1db8259428419013d40f504a29fd6192e00335d89660a28b10705c94ae58438a16273ffc1071220646296881cb674f843c21bae2b3bab1d25a290caccbe55913534c995df167d65e9e3551021a214d0415a0899f6de6030a2ebd3ca3c5aef82c25248d1ac5fae5191008beb07d79067444ce00a483cc8155818a5cf159404046fc881cb8785458d39a2c8cb5b583d86d0624455b18eef2ec082957b44f3f6052c091a3197ca218ca62892e78d8960ca9b5d60a441d42db074d8440d64ed040e4861070db131eae3f9bee112a38220a223f60b93cfba1c87de2f26c04485cea8158a45cfa3cfb22884b4319fa62c074c50d8ce39863c62d5dfe147d597295cf3b9f8a3414e7e5dbb4a1cc10e55ec05e18665cf9e20cb99091785c23cad895bf71bf558e4b999ed853cea663b25b785a00bb54be726decd2e911ea48d70a3f493220c62b9fe7ba663e58ae2877aefcd9a503f856bd7d7632123cf1d93428dd03045c0648140c206d380e16172108b12e7679962cd7df533ba40df99dda21af01a40df9f334af7c17fa86674d76ae2c5d29ab0c7dc3b3a02ba2dc118ddcb1091057943ef3693b03a038a06f445ee2ca974797a928b144d0e5185c56420928aed79f2cf1e81d12050f7f8612a755d2001205796faca382fe7c1ad267a7930a1598776a41476e1173d16fc049be09462e1d138c5cf5e7cb2bebeb0451767e7045bb6262c4f5f80196674c82b83c63e2736d2ecf7eac5cebc22d96104c0826921421c515d776ededa0b03cfbb1c175968fc735adaa61b4dd56f83353426482428bcd96747145ae7db5590f395c91a5542a7bb6a4e8d2ae587339db5e549e1625f647843b5fa4456e44603a03df9450efcb5bdccb13ee8cb93d764ef549d6bd277d0bde9348ffa37b8fb640e29e145ad78f4af3d57cef7fd4dacb4aa379eb94e37df75ec8be92b5e6ad530ee9b927855ccc57f3b954adb44aab51e6d7da51dfec740bf5aa541871e73395a23babd19d154b1dbaf3b74aabac6fc4f915caec525f6355c85bdb8bb6c09b73c2b0642a924816b13c32ba541249a2dec26f15db5cae72dc96a40824c9920cedb82c491757545d5112957c74b0dc567fc695db3cb830974b3ce68b16e66ea12c6a157d59e5fb98a8c5c7fa4b21f9248820aea65b383d834671e8b714a9de562b57b9a32a89bc91e32451df7413f50d6f44a2298827319b320171716fbd95e3dfa18e93ed75bef7907dc585a2a9f41e5a9e80749d17e4b7a46410bda28ff3511924832411edc539622efd4a548baa91b74a5f8fbed4c9062b7251f72cbbc195bbfda90b6545542e47857a3344b24adff0ec899d202c7d44725b8a852841dcf48d7c7227e0ad7e7f4a24894426ba12864a53d5a53f8373f0f6729949e676c2831962d251f247317d762b610e9778dc37ccadf932147da66eb7cc8164a7214b2e646ddadd2dbbbb5b8a77d281bbbb33cb18785cdf70b34d7777770dbc10ac384a8995927b07ef65bbec96227317f15c05e77ac70c2ddbf0733e9893092b290db9d66a35aa14cb945cac9ed049810eed57718b647dd537aa2ec571bb6c32db2a552b57b9b975bcaa4fc3d6ef5771b5ce8d86b26f0728e956ef1ef9cc2979484be7e4a620cd9073941d84e5e70aacca4b368fecd147ce51eb88b8cdcd100537a6b65aabfa83aac0783ab6a1b93d7a6ba3a257fd5278a8bcaadcdeae6c583a68ec180db79a8c193348e390ee2a650da6f7106f6318e4f5559b647498ddd2b2099f74d65a691b516b13f5a7c4c2b9bc2997da785840743980bd3ee6bbb8147b94abe4d9d20baf6337057f8fca2abd473e25c213c62e7b8b84025ef51bb9228db14863cde48a341674451a9b119284525726d36e99a1103b5862b6b34b1f22f732e4685e4b9ef53ee1b95c7573b1806517138f6be83863a17449d332e848e471eca703b40c5dd866ce97f36b6a8a16b8fdb2a72cc2ab73b6d390d9c6bb6748c1431314dbdfdf11f0b6f932c55d0fe4ee76e70532a50648d017089572885f498afa5a754f56ac54b54bff9afca49d91314d0a438a1a23c5e642de722199cc7f9c8386dac8bd828f743dd4376e8455f50abef123f5ce4461453f72e5bf684f0f1326b749933bbda7c78d4c3762e44a8779ab7fead652bf83fddcedd670bb9e77d43731b77f260aebd56b0daa413f93d65aa5c984624175e827282624abb45aab43d569f55a2ba55ea9f37024170eb2b01791bd3ff36561af4a89b8206af2aef229b7f90fdd269db0b96d7486b2e50b7a7d419c43fa4be9512addaa43beea6dfec673641b6b9123232c45b7bf12d5216f79587ad5bf4354a4e88891cf910fdb18040fe7af99286c7db10bf28e5ad55608c75c2eed8071290ca2be8971fb67866c8d716908fb781ce62bc9bd70537eadb2a4e7f7dc37a25cc1ec7a112860820856d1229e983ce49da18cb10d25028aebfff305dd58cd4bc91709f643d4a5e4482bad3edf01a3eb983222203fef7ffb67a843f67738784edfc42d29b539b817eb6f53c53694866103a11de8f61e78ab15ea6b4a39e404e279d7f7ecab5f710e29a50c814a217dc9e3c6a521d027fd6f5c0f53a4947de17a78049d94aa0007a7334cf0247679fee0014ae9cf1a8adc932597fe6cfa30434c6b2ac1152aa376fd59067cf90a461156941104f37a790b0626e8fa53f005c2ae03995c7d5a29a59492b859da41734db2201941f40df72e03f3d6f7fe323f323e90b0d81dfe304bd877eba5647c5ae55d1a1adbc880b18d4b6145ca0323c980f1dc26bd62d8baf7f90e925e52045f0b28c1a082bc402d9fe3ae961293c2e9a25ffabe5d32306fc99081c9c06460d7df931173193119b12e3ce5b8abf4f20295429d0eb85ef4080c1d182fad21e7ab5aa7051864c5bf27d7c9c39302e6d36f413e873fc21f4e72f8a5eb87fc199e1a0607f61be585154d77473ebfa4011145a0cc50420d3edc0b70170690881794f10a1380a7067709d95c4e2e3065e3381ee111e2fa915b80b51ea8b7b854a5dcd75047e5ab7e8e76b4ddd965fb6652a61232952b1bf52b74463543ece0b892a388aa1bf18399de9e8517820dac7caf4f4d7d2602281706b74278cd84305181338d6642c4aec844b3a0d715b9ca8d44a1af14ad7c7983a3a6068edd0294e31297262e4d5c9ab8347169e2d2c4a5894b1397262e4d5c9af4ef98370a0d4138e4d886d906c695cf4914980a0e9e92a51a29ace42b3f058b8575e6e11c538873c4d09712245850e5ca5c468205b54b1a2f1ba05b8657fd9c35b14dc704b12be868d53bca14859d21d02907cc92254b16d78f20f4ebeb6c5fc3530bf5b9e7c21f5b68432727ae55fdae2ef5a0213571a2cae44160d796b33d2984e101cb3d4924c972bce7421d55977248bf853aef2ba7cfd1afa18f19724ababcb9a5385f59d9935d5a440e5c56d7b8ee49298ef45ca8632235c0fb2ed4f114c02ece57fe3ada12b1cb05924c22607e0d7d70fdb3a50e9cc0fe6b28eb0b2cd7b304711d077db174a7573df4e1ef9779f475fad3a31efaf09f30ad6a8769d5e9075604c119785c23b908c808700eb772fb25c0adf9fdc4155d30c0ab7e4fc9c036400c5d5ee2f20c88d74d4db1a2a445e99b79f402103c2a11f8ce0fba1a72f05ad560842312048719a45443961b6048b17571438dd8a38a73f4953758d246ad51d9982996f4dd83312b7e0f93af2a0ff6d4a7e6be85efb9d2ffe8ff9e5d2d94be14965c5e6955adc236f54b2996afea7fa91824561e85d2a855f55f40605d94463d76bc904393afea03715f43ae862c5f9958b77a28fead5ca883d563b7c7c08b4106ee82447700a91cd20685f18415712e3b0ee7385dba318994c2611bfa325658fed2b79c1c0c755e900f129dfcc107e22aea3a30ac0db1af3e1c5fd1184c6cf3dd7fef2fe2b052cfde97e253e9fbbefb2fe49eb72c4118af0c616476056ffd041cc75b5e8bf4f45ff0d6f79c88138a2c1c95af803a66f18a1f8793795d68b986863ec0cd098f6bbc5771c9243303cbf7d46387fbed5daebcfc76e4aceadf5744acbcdf3f5f8658bede7fe1a987f7a705b0cb0bbf3fb5205ddf4bd717be9bd8a6be0b10b67b9dafc7ce0ee977baf7c21ebeaaaf53bbcf21755d17beafeaebb093424f7e26140ed6b2cd7ccec444b2e9109ead2ff695367a7477bb09065ad4ae3fb7c9f74acfdf7bdf02e94be18fd293de0b7f38c961d78fee49df02f75ef8e37bee49dcff20b16bbef7405c5deafbd2e77ca5d293c2d37cd20371e980ff85a7199e14d0fdf739dd7f3f6798c33d29d4f99e7b1dd2775c82619bfa3a4e9c8840df8fff851df811b27c554a9de6037181a9d30c63c8f91ee44c0b6057e9c15047c786405cff9542db12da1640fdef4b2fb288f842ae071761d9457a916ba61410172905c4355fa678f89523e9bd5047c701be015fc1b000767dbf007679a417e5486223ef3b257fe4eb07f6a3c353df9352a5e75202facaa76106240c2d65a5187274beff42590ad9570b60d78e14f295ff577a767d2518e47f2976b94829e9e2920c637c4b892e982efd1a9ee64bd7bcf367e8801a6d4e088fdd55ed5284ddeee41cad007af976d8a30adb4558ee8af3f2741d2daa72a8d8865b2b5ef99b4c2e2ed75788497203922b32117839cace16d7997c3fd78ce3f8e3ea672ec5fcb264a7b09f9c2d22907b85564a2b0590f8220b2244f0a303962f6842b010c2ad2ecf6236040551626a1f9a28828873420b1a90545b2f70a5861348b1d2822cb014f9c09d28fa41110b33050cc80a141ed7ef7123719965d707ffec663aa774666eb2ec4e1f33e40e5bd2254988f4b9adc403080b92b8536e554a29258f395bce23af2f12b7be286f122c602424cb700670c28a91b8f488ebcfb23bdf2f102a7d051e738ecf2331775cb7649badd2c95ceaeef7ee96cc51f13886619e734a77e77777663699388e12993e626068bb77dc163da4aee3a815b5d6984b7d5ca7ed9e842a99302ec0d076a79c4b8bc5e226a5f58915244143e52e9cdc908406168800044c1a082ea0a20725dd0e135b4b1135d0275e5cf1595c0e374c6bb230d60e79558ded891334be60c109aa00c2105f2c692fbca03b593c508b6e184229a5d40ae7c48e0fe690a1bfa8a44d9825a964886600000001e315000018100a888462916092a6691a6c1f14800a6b8c427660361c07234990e4208a8290310620438821c41042c8d0d01401fc7811664c3064a2f02d43d8cf11acd17ca051d2f09697e1f76a65a29007809ca5adc019da970e4683608457a0af4fec5d6c1188a43d024eb6b0fd1bb722edb1f6c4ea34c3952c0acaf9c7b0b6239bba5561d8031f8ed34d94d1872d3eb946aa6caaec022f2b3de9bca2c215025260ae93fcdd2d7b66d3f3a55d2780b8b4f2a9033a50b277a5dbfd7ea33c27e031c8029eb2ff691aa9f4f1b3042c7a08e2a7cd5660777df337d7879a96f8de7ad7fcbc08c979b7f8ca02a72eb2bd908dc700abc949ca0c4260f862aa20d3130c397006f554a0096dc9d563a37d0a100cd961b8f09d2e06aff42161c82ad7bb00ba211620a96b54b0fd8df2b8db6f1ed9cbe0590032ca621088f6120273712f1c77977a2272507e45cbb68607227b123d6cd76ac3f47ae5e3ae4a46854b81b50d6e5563a9b305fd70b622f5e36c4e2e2f6dc27e001c1b82f6f2b2540b08f053deb98924f0bb0dd164e2783a9ccad045b316df1a058798b8ab4889144f672de2e775ba64691c91f3b16c29923f8d8a2804a42a00fa9e3e73093de78c102b1758d5ff330e45b0a95bcd7867f7e146f4c7611b890e75fc3b98e7fdef0b25f7b1e642360f9744be1c4ba87ea1bcd162ec562e5a74fe572b2cfa41df494708a6db204ffe5ad5034e96c33d11abcc4292f07892c0619a81f954609d9b385b24d0e65d47379611dad61091f97d6f32285a2a5058790b69e1142640a8ddf463caaa6882e47a78614f870c7b79ebaadbe61e89b2cb0ca6cd0440a73827fbda0b0ca9543d4eb3f86e598407101a0d330a7b141733c2aab5a24ff15f51e9fc78a79b8670426228be84903a909fa2ebcebc89f8285b5c09a48f7d83d5e2c64e76bf69cc20bcd7618d71f69542d25565ae021a404b017b6ad0e2242d8dada0bc72ca89219ceea64cc98c0d96dc7a09011e1f3c78705c708c9e9b11c145a74aa96d872cbbd23cb480748a02310829c56982d4fe7cddd5c507658357c584cf425f6643e23849f7083f12ca6f94a1d454b956d92386bf9a078bc19ca1991496a985769aab6182e74a80ca8a94eb078bae623877844d4ea9aff937ab2d904b68209526185112b3692dad47a6fb0fcbd0a8c05011430d2dc2dd109c021a080980dec1b7a0bdba5e98a899e5e58f997bdc5ed6653903565185a9f7e65c1e9f8bdfeaacde628c2e9fe4a792804b3a3cada7df6ae21bc983c5859bf8a6ed77960bcca8004db117932db80a60be38bfb3ae3e8d7deaf66211e3bfcfd750e631225831fa8a9929da2628b202a569bed63d9c0444d965202c263795cb1ae1ef391a035c8361aa48e252383f52f56e847cadbf55b643726717be881bee8b2a1bd513cd0808666ed7d2aec6f408aa3ff21376cd0380799e69f36b747a6f79e83242fde08f3b92a2471911afb3f77fc131bf827236f4b5894e23df3f5201607641ec80457ffd236814b5cebb9dce2efe8247d168939507f91db5b5c0a366c60e0fb19cd14eb59160933c741aee6c0e78b61695d8a649be6a7bbaa0577cb6a3bd91a5c5136137fec1227a2c1a37a4f91787b10a666c42065a2764fcb3303a39495b66b57cc4f3ab47dc273a4acd7f819184d74579ab1a386031e04af585da9642865b0d3295a1ea840e0ff594d0c460b1c8b4b01b486ac162d50db0b3ba4bc6702bd99beec2f132276cf8c623aec82f3446ad787faafe04d01b67c9c1713e70796014096b0be84996e6124696263429ffb98cc831bb38fe763e4250f8a88eb4aef942b2b4909735a5423023324e8181cbf2a8e264cc5d8d6d175428492e1d69d5a600358e08f06748780dc7abc015890afc6357ee15896c18a682d1ca8ad70413701240b114698e66169aae5e9a04206f054106f54076587c9f6ca46a611b99f768a0e2a581124dbb13857770390509d72a20b266dae6a4850034f00c7d1fdca4eda8cd57da680d8ac1665f2b185a3fbe16051602828d0ea58cc1e9d79f209eb7057849a258564897f4967f0bd220e8e11cc40256cb34b39d23a5d4ad8a8d242d41d44d82648317d52318965f555a0e71101c40889fd352b1818c94427e6f91d8cd59deff9b2f377b4b929befc917ea9a365fd00e598443055718bcfe00a820611139916caa11ddf8332a509fbaa41bb611233dff25a83156a9b16d677f236d5c16a35cd9455d62a579c69af979de71ef321e5ddb53d3d52af062079dac91fb2a51fcac18ea52e16bfee4aff954d19295e72564ac8e6b8ae85c32bed3a568657f8fb2a4f3403acb00eca32208c335c4ba198565cc980949d37bf57e97b379110ba4e478e85c6c41117e7b9b098f217b2597bc8c261625b4fe15e6c1a05ca83aaee92338db6607d77a4ee8b4cb6f5b16ecbc154a647722b75e5a81b7e39ed444ae53b75fe2a44dd51477fb44b424e6c8d38f5abfb8c498ab77ca35b3a865a0d1caf73d2a06b61080e26d717189b172471b3c60edaa5c3841e03918465728321430475ba747c78ba2e4d295a8c7dbe8656847461f3277b8de83a2d5e81142465a3d7d1adf9f10a78ca5c738bf082bce602efb74368fe3ecdce911d6d78821d9064e53cf42adc829eef3049f4c9b92b9122812c5af9001d6116c111a981ec2eefe3d5bd15bd7c2f39f07de5b922bdd0e652c6806a0c7cbbf0ebbd717282103ab8afe500a34c9228f35de29ab0e82ff47d6e353281f6496e3ab9740746013bf28ec77e29ba8e568f708a0f3b03fdf7ab372ff2b9964048ac98d28b792444d411d7bc742334b12c4424748430bc5ddb8548ed349420b28adfb83d7de8701e38a72a853ff9869b435fd60cc44e56349b643bb5a685bfbc8bcdb49dcc60d215a23dea857ca57f0bffc4be71862783bf8fe036e5f02701cb81d390392f4cbcc440fa1184ac88794df75880fee0c3d76411f775ed02d6e0f4a1be46f830ffe8b73fcba06f4e254c3e5f2adb7a9ac8bd65da26f2da6dca447e0abd828003651289dd890003a51a640455655d46412563065b7f3bca288c8388ff9367fa4ca056822f07960594bf4db83cab3f935fa3d28011bc42680dfe643c1a55c7eb6882fcffc4a8160c7ea60661ddf824490c9ea6c21a6508b9984e683666a1f94cfbd615aac3c1ea42732b5ebc8535b4c3811381e1a585f75b48c2e38c50e88433821a9b995fa1efe16360614a1dd6e4269b5643d39ce247dd2cdfd41aab5bb34ce5d1b675ba7e10cdb3f3ebaf4a8b06e1eebac368842716943929e7da5dc8026623f4cf9256ee2a637dda30726a0b99158c91608a72ec9e01639c8038ceaf67d5267258465531691ab913c05b467e81c0ad18048c48bb887db30f149c101fa473ae36012c400b06d30962d38a955e78db80fff0427cc1940839b7ad9edd828ea52de15e9345bd3d3464c2b972a61c43032c72501844df54442ac26d5045bebabebe84dc53946a3cd6f6282fde8172e23b436e29cd321d75ec5ef54c616b6a1e19be98edd98d18113207b8817efe9c56250896ad06e11e1fe85e7f21231cc2192e569b3f8d55f0a1ed4f400e14bc4faead48c88fc5063dce49125edf1521270813875e16dec4fb92d18b5f81b8dcfc9314f59d80a4765ab1ef4b9dbc4a70f8e6664d7fb40eabdce5fb105c0ede0132376c0d4d7dcd8a172898bd017fcdb7951d54f322ed1fd70f5bc115536c0ea20ad27c0d21da0020b2363f666f757016598aa613c4e249ae949a1b7179d033153441179fa1d2c5bd0c09a3fd1af209d87625ed487074c918fedc48dc386073d886fdcfe65719e4a877be65004f0e50f4bb3075106ae8371fda145816dcd4aa91ef4151aed1306014ad515ca8549787837ee696982bccacdd9889087d4455f669ad58626f7d11d570cd8dda5b38dea32f071882c1f3d5d0df18de45bc7d540386590900f4eae74329849253533d8049d19a9a787a6e5ce27006d778badbc4024c31cc2504e19eae2da56c725ed976b0421b84de0dbad5516fa648461e4a9fa414f5619c1bdd72f0805619c6beaa8afa44bec0d28d10cfd0768e3d15cfb5c533f4dc2a9d7c897bfee970927e3e8c1ca513f43d69c75ff8224f0665b0abaf5f01904b6f8acbb0519f0040f68217e59834b14ee19ef06a5b5f61eab1b0973d43ae5eaa86fabd76e4822c51350a1accd96d693739ebf7d291a68d2814040f1422baa156b12ccb0d2c2418880d1a49ba2ecfb0c70e96fd4b3833e47348e066e93a2509f52cc3611285f1488e9db78131a00291ba407d0cd23a52a7d4457a6bd8eef329a94426a409006b36a434173072245b1af53e08b04545f0740f62160c23a30bf11574e57eb7657aa1bc9d96f9fbb4789a9ff9961f411af18cb9251e0ffb70d43ef6a4ac4088818427b715eea571e67acc7a1be97f1f0134b3fe0de99a66e02e60a690345116d88cdf51611792b4ab14239460087f423d1ce32134f65a4e35c94a06d10c74443505d89164d3c3a4c9eb3cec65f3719359c313a60b40956e342b50d35ab8f1239e252fdb20867793bd97c92b34169acecb50d82a1869dbdbd0ec9451215b5f563143a9d63fba136a5031bf081d718b7c6b96926a8933a8032b38b0656f4733ff97af074a0257d1663cf31758d906f3dba60918e94e9e9f3c67fcf45af54b23a8062d2d87c258acdd320c2206e5fc297363afc10857140e6ca23ac3cd5d2b6af880a6e73e4c921b7a90e515f019de310dc5e5e75702c7ce393241ec6cf8a9d51b1dce2fd16beaca0009105dfb60e3015ef7c437bcfdb7af5362b15bfcc2b15fa4466bff0335e7c6d75b6932a69b978aa42c4b9ddeae9f24d8007bc1c44544f1c8f557b2e3d385a24076793dab2913366f128001ef838f2fd620273e3c6039a8716cece0d094419e047244623176818def76f9c101c67e0850aa9b8ce30a0f962ed715ed2d49a09ba9422ad1bb3212be08d0536fe0d5287b98bc5a635b5d1c5aa4dad3d540923cbe2a21756ef050051bec9d29ecd8824f1fd6bfd43b605ee370c82fd6501683e3c8c91d73ae22e0cb609b250257e652d07183b3090ed0da6f548326e0115ee60641eac96855887e7058a44ab7072c1e15074dacb3b4f01a459ef45d2d3f1f6a786d85f119eb9f16fa532be0ace5494fc8a5ef3861d24bc78fa4033c717fe4832da54db94515f80d15a9b347fa79ab148031860da8e56ef07fabd686cc4bd5b67bc17fda2043345bc6596bdbaf99732cb6f3881c2615ff0c8b2d2b143efd1385655f0f1b02f29c27e42f6a0617aaaae8dfe9a4a0f1b7c2d434bef4bea75d5d5a9899045977abc62de8f847f8812c1b90b8662c36b589753233b72be27fd0f86b023602fd0511e8fd4552433a76a0225d94db14ec88bfb0617eafd51b4ac3206b23c394fc7f50765322aba5d0865e7158393c2d2a9e29b714f367fb867052ea368e77434b006222baa678b7e2576ccbe97a5d8701c212c80aec5a487ae006a5dd89f5f5e2a504325114574a24faff91a591054f58c4582edc2aa9470c5d3d0c21498e1094ab25112bab0c308c975d6348f240511d0bf0650a68c8a5bbbae8d54bc440d378c90f762a509d809c4d8c4efd792941af63b0950d026554e720333729a0cbd0f37236be5c653f7683a359ac447e8a9a2814bb5ae1211cf54cc321a1723c13a3ee074a6b6eb7fab240acfe92c40282826bed5e48358b2e0ebc3936dc407b52860487d0567c5676b843a097d8a27a8216e0129986dd942e61626809cff245e3f1d421356f0913fff70c5ce303844026550518d87bd47c34bd98ef4423fc8a904889d0390b7d0ec23372b0344156bf57b1e251b2694f7e69f38d3a0602d0cc8eaa7a95c6cb284edcdda4762b37eb0ccab3f604031a1d414fb468dd65a1adbea43f42af5d176f94adc6861e278236b5a43896240631f0f0d3e208c134005c67554b0b5190b43cbadb531c880e9e5b864e228eee23b24c0336c2541037517cbc93c33158bbe5143784643489e5c111b831041111aea82805ac5c2e07267caf10c3ca167c454c80aa729c77e2a160170a01a8b16133cd42f7900b32059350847722f0f21b6a97b84187589782ab4c67560974bc88fc4e3d7e8ee9750d7b616c636e6646e2850b3c8285da6de075de88a6b6ccd3d4848ee98a9a8ab3f5016d0ccafec08d8303833aa81b5e8ad66b2fee74cab7190a94daed29cfe605c80335d6e9b793cda0b389c418a4b26ce03c94abe6d96a1f7354016d5446a68268886a21f1081aada0067d958182aaed53da5cc86c445ff8b4a0721a1b2b857107242ff892eceb4705572cbe74c48953915269538d0b10568484a1db5fe2a0b9528252bbac613e42e5dec9e4ed8a047ff689f7a77115792c68b22d0bbeea8d937760c7296e83b81dc32220646a5de9aa4b4c4ed13ad17c1eab0671ad6a515b6c3314f10beadf7bb95ca12cc81a00dfd16d507782cc8645f2b598568012b665ccef2e022faff313934b059b77326977f7b8e538439002ec4e7bcecc51dd8c5d5a75c4e13034f32498004a2794956c466a9df144977a56cb7f7f9c8465f061c52d394452222ea2357e31d3ce3af6af6e3cd35a77ae0c9b626b6ed4400e2b2b386be2fa9d80878b0219b336f5768aad9df7a4f83223e2e4015d02d293fc367cb20b2bcd192d26a2bf00369ac147fc28b2942bd09e01230a6aacc5803553e2055efe81bcd09526174a8c985025e4854c0b0489ab96287460ea9c2cac4e6f13e1ef67869b8d5e333da5740128028bba301a2a10978a5678f9612b844cccfff5916e103cc84fae7aed28132589c4b328757e712a2a990122c12a607049870ce9a081e743fdd70ef8bc0cd82797f981696ded98b2825c7ad4492036528ddf9c08698c1846b70f88ae13f54303e86d803f60a2a3b7d2a5096a750ed094e0d3a3416fc4c1271fce6770adb9a1d6ba4728780a3e36881dda02904c9a03aa71490f0520a28b884022f4c1fdc2da4733c6dd4d5b1cd541618044e16013151be2b6e73883a5e7754324b101ed34785ca85ad78660ad721dd7a9af736ae4349d7092fbe09e0adf40d7d24d07823c84862ec77e8c06152b3b3ea6f6885fac31c39d073c705b26a53705f40aec646e3bb82792ee89f5a3afa0ed9dc5b809bac5277dfe41aaf59ffcb9dcd9ac88f02d252408268ce471549c3d5bae07757f95b34506d6073abc1e44e95b6f86256a5eaf79b961010c29ee7e552a01a9918f0cf78aaa465c730526da98486368bb25cff5941cc5f4040e5016d397c112d247c96603aad5252ffa07e03de5d0af04840043ac689223b3ad430f36bae4e416d520b18dd227fd8a06dc06002fbd0c907acc9cb76b298bcd1968ddcbbedc3ac61e678a3dc78db1da078543bf0dc19acb231e623600e31a8718363f8707459f409ac8e7505ea799c230c6f3f82cdf0886ec79c5bc90b48fa0b94a642132653d03831f40652c3023013caf6ee8e57e5430b83a6b849b8081952bbd07a0f54809a1aed8cb9680a63c93e9fe9dd01a82a80ca5d105bfe76c34e5558b608c9e0edcef5e96e21f16de8124cc93a6efc428047f6b9b875267d5ef23cdfe5f4a45c20aae9c3d09d451a7e1fc6cf6ca2dd32de46afd4810b4540d7e64f50ba8522a0e580819a40579477cf5eb23b4af1551ade2b49746fb35edd2ed735679a26b46ed7480926dd5e89568068d128014a2b82d8c21c0c5250bd37db202a5b3ea62480764b9ee17a0c45ad9d2e507c7e5670fc218901916fc80cddc0067181623798aa751e5cedd7c2f3f06f55fb85dd1f0c40b81c6b0087df2d0c49fa7a6d6e2b21e9def437c128f4dc2d7030130522a11413e4d10a983808ec56b6d264ea8287c4ccdf01569c6b2559c503c467354628a07da9b24ec159ef2cfadff5168125ca86805e2c82a4d9ff5857cfde1ad576f550feb9c0e1f66d39e4d063b82f73136ef193473b82ec4b03dba330a8625f8a2e1e51f3ef8ea91020674343e479d45a2e60fdacff7e0a069d58a05a0934a11a969085d227923598237e218c0256f77f976b7ad0639adda1d1dc421a750f7249fff30f3f2b1f74313664c316c204a336877e7aaa671739c6bf47a6533a9efda1942e99ab68f88575fdec5637ff09925e66faa61ef5830c9aabbf3b8b681ce0de910fa0729d4162e6b753e77ede27b62d09db00c9b780d2385e14eff1d9e4ef16ce5a8d571ba824eb56cf751e9c50f646f2dcee8ba603b16ca5c67509456f118e1149141020aa5ed918a44fd08505e94164ac77332764648d61e37839218dfcc5284ad73db22d9c6c418492af0ee83e3d93f01b34d7406f7c4eb944c63b51056c409a8297c13edb2fa84c21862682678661267ae69baf1e2bca9dc2b9414295e89004d8bcbc80a92406856081416054ac7cbfb0c9b85c3405d2bcab48786585148f937d21246e23a34af23fcce8030451dcc75ea89a7f678949a23793ce9842fdc4adef9c0358990a36a9a5aad34ef932d3653829f95c4cfd961dfc3858c770e26ba08499eef60161824dd718d97e7de2957ddb606c40b17387729b4831fea21316297c7f3dd0097b67abb586eae1709df17e0fdfb062a6468cd2ca21e696d0f7c5932b61484f5b6faa30f81a12e7f797504ec59c0f806101c0dfb009933fb5f19d489502fb0f447c2b40f1b59a7da40bd22c9dfdf5470165b0091b4411b0462d059fa53374ea039eb1456f78653c4e5c81b39a3043a1e0918e007c3b694ebf554d9840dddcd65ad044156ed9c0a3c9b8aa29d40c43fbd66f32f045b4a8350b6042d36cf577bf44bebc8f0d7a775a83393f9542ad5ab7d1c159cfeea65e2258360f9b4049c89a18d1e8c795ed7b320a66aa3142a7677792b078722e08504440ccffac1127a169937f54e3bc5a59f818076cbb209da243230795c32c67b146665218d4eede59389d22a88747a6b442392d2c1c18eb8928e21e2f4126e90a0fb9ea4705d1d9f799719dcfa13fa9159d318815690c32d80615b1720d4534c232a0e317e8ce3132747b42d7bcde511110e34cf1e499bfa8806d56fcccec20d8dba2a4863becce29a06d6befb857eb01c1407c8e8754d4503638ac12227b013d7a0c9152fd597d530cb5aaa0c64599b08bf08f94857a91b310a6dc12145d6f8f0de5b6132b21b0b080ceb09ff1e6061c1cf41aa8ca7dd8c1ba69ad275ca4bd76b1a3f6020085576a852303ecff9786f976a0950cae755e3498687ce3a5304e36baa71da4d5df3dceaa3da7e7814bd2035b91f8a04710fd617e29dbc1bc67078638367651c646f70e1595f3ee3ebd8c8910db939d091804432d993445c9f3c06e889f94c2a541afbf660f26af87cea13f088566ad967d78775754c2cac9f32a43aa478da603994ac51e9c54286f3e6aeaf01ceb2ed5daecce741759864a5804f660f4736399bbd80a8b61807647e0013a40aa1adf98585409d2f902dfb0c2f898e00014fc34d05e622290053929df36589c26c61b488a45e4cbf785b406a8afe47903ea4909811b0cb288e1d8658e84c3c4c3c1b6ac34c5d0bc6c20dcb8051a15cd67480333ab7c9a323a6f995e0ec5605afe818692217f3ab0bf50f32497ce512e70f0234add80b2c5c1d55b15adce3edb857815f530e9b56b6577424eeb57ece0f222036ed2d33827a1537b6ee767bbf40655150b7c9f4ec7d577728c55b8aae71b6fdc4f89f28dd0473e2e5697f2c81391227a6cae786b1dbcdb83442f9ec9ec063a277aeccc89e99084741c3036dcbb4da6a69df2346b1ca92554ad685e2df98e935c9d6ec813666a0571d749b5d8d357f51ca50ce3245d5f06711e93d49ec346170ff72ec7d5bd6a48ba2527c8c40221862bae82953755c7cfa2b05dace9ee53382ad0524fa6742ef68722d2e3f1a8f5880fc2da37cb708d642029055b37582e1023e9840b93f70734b066d4101b91491960645a51c6bda8406a876f1fab1a6c2d667a51a9c78e0d185aa719453007c3913526da71a749a3319a0e2fff9ec990a5028335f1a39646ce3ddb89a64e34f16c2675faf1baee5311525117d85ce0dfbe2213580961559f1a47626bd98b08b7377b91b2191c0747ffb85bdda5276edd5e896ab93aabc2d57cfd707821805cc2bc20edbc1d9a34a9c631c3c42ae66cffc53c8fba3ce6e7fdab84926ffe6810b93c756035d4e55ae6a2ab57a16b07cb169fa824998eb47799bdb32effc51dda935befff861d629ed4d454126fe1e8d65cdf12d7367ace3cf1703fde693364e86effb715d43ec8e4764488a6a5e90c707f3805d5a76b404d4d99a897ac6be4ed26ee201662f04ef28390e3c4c08194aee9f0816779be33f12b7254d9bd48b5c7c80643b2b320fe700bdfea9aaa2c89165f06afccdf802af2bf0871b4ea852657108ae85f30bff1c9051dbe1bdd50f9315861185c46f90196da1c6542d68d5923553759618bb342ce5715c74d7c59aba9d01dd8000727183322459d4befbf6aa43438b45fff627734ba4b7a88034f08067be23a006dff326c4ffad87cb437249fe524ef8a69a989299af0d563384b505b647296968c83fa91d8c679cde3c964300239bc70239502821b176ef147ea7a48ef3d87cf6b0caefe648a1bf94b3524ff82af66e822bfb6b8f1857a71e31a38b0ff3bce201d810b7b73f2193766e043efb390983b714a19b713ee6422f41642996e983dad08049a887f691e0483894927ef3526a272f816cd1793d70d3540e9281fb35bb90e91f0d64d70f9c935bb875a03fd40cacd4c044cd1a48e60d31f1e2dc2144ee3f68700fae350dcdb7e7d1c7f2331bd15fe536dc0f1fb2ce9ac20b86d057e56fb002b6e0b59c40089c502214cb65eedd05f5eda98a5b6c7eff51af5e07b9a997110dce3fa441f73bc6599a05e4c152853274d2afc1839c8c5cac4a40d775ccafa334556ecb06e604ee2d727022f97f03def48dcf14a25cd07b5e2f54a4d4658926089bd90095f950a1b65a1404a2a7854a0f4337e8f75065a0e91caff48828840a3fa473a112c6b7bd02358c480cd4eb033310553e10d553bb5305adfd1be278b85de2c1e21c35cd3d65cdc12f68bc2bd3b22cd9234ce71b0cac360e540d62025d7aaeb679f6b6b47be20bbd88e0fbe9dd7f86b004c7f8e61554791d03d16d8d2ba499cfd232df4650dc971c0fc40613de10accde1f726b26603bdbab1ada0ad5210ac70b0045214e867427640fd633d579729a86ca8953b4e6136bce1345559e75997c92257792c89be1a7d0d7f7981a16bf8ac69954b785f36ce1e510b04dff100a00621f597c8c32e179183f62b658f030dbcb9a8640b50bd4b8f9ca001e8f4d5da2b9fcf1e008ac9056c7aabba0787872c348faabf56d2e7df751283403c98619c7b17c4b83812101e2ff85829ba8e993185581e4f0b4691b843b92347181fa03f11b0e4927090d85aec0175e947986828ef0cdf28e17fd0465e0080dc273b835365f83eda58d10b4449bab03d139a9053a573260cf3102323f0f4e6d59109c9a82e6ef3fe9f4450e4ed126beaf317082aa750ce9f0104b2d705a482e4b83fa3fdef8367452313660711f456d2df5e3b73569cddb88669385a8522d5b89a4d8ebab47a90c0c0738512c0396d0f1c1e8d28045edb8d737aa821ff83f5b157543b25bed6ef6544a28f09336e203d3f653a97846e61196b3d88c588a984b33c25269307178b2ba55daa04c510cf4130dfdf63b2699b61067ff29782a12d0da68a2d69c8a379cde914d23007012f9891f716258e867cdd52c504d09de94de627d59a26f9e28ced4838a49b6e43941a49d99c3f502b9d6786710e5c271532e0ba3573c766e89de2c59aceb43c86eb8ce74c1678a83df900c1ce724fc189131279522f2875eb813de5cc0644ccbebb7a6e8a8df82b38363daedf42fa138022c503080419d45f305e02d6cc581791cb571f5884c4e67587eb4a49c58503253002808c6b6bfa3783099f6bfaa93202b2e26284f27dd4a31bdc00e49db053898762fb8458465ce65374319ec0760ceb16cd420eacee70835c1f7321a14f48045428f22c87d3f113e8f5cf3a976a804eeddb9658b3f2fb1be470bab7d230623e7e9ac6d56e859918c4b9ea20cbc6df8e6c533c2c585301d2e951511de4f3e9355b8cd6539999f52403e842e9c40544d2802b44af017aec90038cad169016d739404d2519e2a0863750f6463f08118ba46af7abff262fce378f7afad1afe2aa7fd9ec87aa23793fa818a1ceece1319bd8bf333d173ca5ebcca5047d6a19c135b0a14c893545a6687a13a532e44645022467297c812aa18087b6f2ad63079fa9491e253e803bcd30c71a69477d29028fe7298517da32fc584e11553468f1aeb72d93033ef668ccfb14eaaff1abb9c710c2c4b347ab4b83cb108d517159871abd1f1bf6846da868060afd2d3efdb740e8890ff8d7a88e89354fa011eb97a0d153f6d39773adfe194a61f1dd93f4fdbf32e2cb91106d909611a324a06db0b79b34e058a7a10b5f0bc6fb4c932fd96c888d62046e8d2b0f204526273f9faafb0cfdee3e576e34d4a1dd7e0a5fa3645d3349d56e2da5d741179434b2113c2cd19d169d3215628912b12f0f16f9f404e300a965acf0865846c7d0121dbf749744b1fe003adee414eca15867d82bb00eb63d4293e490f9278518a4a4bc3fa3d612d7c0ba50a8faac92babafc2bcac753cfc1bfedfb5bb6b78e6b2df241c150d92772aca1261aceb770fd0ead28894b0a8fc05c8966846146c44be9568429b16bc2e2b18d11898d8fd990200bcae21eb3e689c9deda6f0296a61a2eed01b8afe948fe6a6614cb91d9b43c0bf8732da7e208122f005211a50affeedc66189ee8c703de96ff58f3c5015ec22987c170643cc87745c34191a22a81d45a2f2698a5248e938af83d4527c926d89f6a112c9ce69d9602d259c7275a9977f0b6ff5f718cb820023f383b7b2f5a34d92228018ca7b98b8cc3af8e73425874a80914ce12a29a8a106e03c9b896c86aa243843ee075183aa988449410b3a8067e7031556262c6174eac251b09708cae2528eef7510e84eff9016e546e6bed13d5e9745b25e04669b32950323bbaf1184417020c8212117dcbc047e44aebc81c8b7d9bae4ae8671a20592630c4755069c72ab78f7f9f13c96c10cf25f3cf72ac4cc986509bbd569be75acc307e3577f52b996191d22ac502bf87f4394ce7a8486c83fbfc08f437fa6396240e3b121a32720e2f9547e43901eb00e5e335dec2c7e400ad000cd33acf568bb709243df6d6e350c10d801a27397377746f9f334de2afdda9d25ec341dc9d13acc92a766b88b1ebc7a4f4b6df90f77753e2bd231339dd6354ff62dd36c3c8d40c04e428b5ba7828b41138310273adba0cd6108ab01777c4aff9fb5bef3709e49bde5064827ec1850b1f6720c51c7927cd09ce405b6d2f65ae94d4c39951c1352df10dcf298ce5b4c87fa6644a9122dd4abff89b36ce41a3ff54c03be4563a9645a7f907fbe40c3ed41906281eab80751d6dc7e56b58fd1daff1a7d248f363aaf062e4db47ea61bafa06ed9f341d5201484134df12d45cc1a0297ec67706294f16de476645744d555ac503badeb9fe515e93592b18b658c4af5b93d3f8b4b0d7ef3ab19beeee27749503a51f69061259383605f27c2ce10564d9cf1513fb64bc1a27060f192f7ef11253b9e8e9de6bd26001bfee7837b36c12105995a5a832c60c0cd09b64227d8c87984317cf59f4843a7d2732e57493f44e89c1c54660fce68a649ccef3c02b89a2e5aa45dd9ad5967abde67da1a247961ad22f1bc7aab86a99203201ad47fb9258d4827979983210d96350bce2365efdd6b8895d4bc6878c1f1ffee2681591525f10a422ec4b1e07a6fa5d8f3594586eb8a8871962ee5e2df0e3d30f78b5f880cbd7c68235cf2facb0e8199d9c6d18ae0223b23198d8ac667aaa171f961db8b15b2a5e48877052aa57440493c0156154171364591c27ee2e0cb65d06f88e4fbff642de9871dd7f6900220e618bb56029fcd2be1f5af6768b21f2eae202465c2f78a383521a99487ff5b20873adc344a82a5fe6c6892a02e5e23246004b6f4b7163dc279f724f6439e57f31866d570e6e7d4d2b48ed991555b94fd98f0cfdbbd97c117052d4e714b434ca6f32e51d207bbec918eb1249832d71239553742efc5a07036743b030b40d9f12768a6272551a3b00a65e7e1195ebf80eaf2cdd1e6b85a5232223bb9550952376739eeaa0b55fd6d7245e8dc10c1df9de250673eca969ea2ecb3ed022fb836000fb10e949f007ff66e2dd5dbb5636b750e885688941c21974a00f1a7263acd741ba318f9c2773478fce2c4afb68a84ead0d59838002ea2d218be81055b71a360a35b860cbd67f85222a5a897a3eaa000283dbf5fe5e75f51efc5e947a2d447fa681d2a595ec4f8f34fe9024d81ee2de0725c12ca80f72f967fe3c6f7a0738b7702751ed2e53e76f8d001483c1ee00709e6818e6d56a6cf3b056bd08f37a15f81857cc9072cb5831875d557afc30cbf254e4741b06696d13db8b15979ddecd99d514babc3a7cdb4dd0f508540dcaeebb1f3d944ca0dfd8a9b755251415fe92cfeb5b2f3ef645f94969745e6ec4d4f080eebf9187f3419565ce845d558ce06dddbb480bc864db2faa0f997038466b4288069444f240dd0f2692c8dd22fd4fca0aeb37c7cc55e3dfb7cbfa7a4550cc6b718e069469b07f82459595036322c28ddadc2b2297e4bce0a3fa4508a6f83a1db45f15a625d33d4aba314b4078c9499be7c64d585ea2fbe1a197afb6c02008a7c8cc4c6d91799b4855f16fcf018bb0e9440eb3a7dc341acf33bca88bd22cd025cabd10088ac4d74b6d1f85af012e6f27b92d5d591cbeeffcd1aa2c6d17ec284f23ef0f7304611cbedb7fff1d1cbabb14fe8617f508793f1a91ca69ee97930ff408be84c373ff82a08db68a546d619858dd94070df7122f3b6b2b09d6da2b62bce33bfd4ebea9403b400cc171954466076e65a5253299076353fa985d4b8d282f7207c136c8d3271f335690263c8d99f60813061ea7e9fbd681709e2326bec50520fbff18c6e7469c57dfe56d7499ea502f0674e171b0e17e915067ef21b6ce24f871ffb19a953d9c36fb1ce45a153038211794201b4b237a402aa3b262f5ab3d320322b304b2d5062f5b37318737c0a93da4538704b7738c6e139d45923d7a403d47c206f181dd72435d6a987041852569fd199433fead1b15d9f8f2e5a6ed616cc388e0947cf036df1e89bdff2c7459415cd3496741995d86bfc70ffeda43e38d2e040916836d3b4536e02ef08d3a39d2c8d74b2e0c9b4945da13c2c4d93ec6794ea41a5013d474c39f26a699d716ca741a291788e803a1e606b30081fdf636b9cbe8584bdf20dd797aa734bdfaa864a855af60429ba7ec6043db099d2ed8aeb8e6815375c21ec5bf97a79adcc0647f45d0c0c4ab87a8bef3201f7539ac2c6ce921a42ef7683c9e9dd44bfcf5550a07977deb5a1685457427e56f29dc04fbec2d1ffed786279a31c3cb1c83e1f5731abcaed24f145ae150010c9d043aff97275e114d76d463619539e617a207d329289c04f83f222c89f8263e98a3d92ccd79bf349bc90f1e1791f11113f6128210052ee0d198317a98074d61094d68c1d797e95dc5b0ae632d40c199c2897641e83715d55fae714c47110655415e3741bd91023638a964f1542acc298f6a953799984f11e09ecf944ff334da633ac6d7795df926a642e4df006bd8091a8ba5ce1393682f027b4f665c1c6cc091be2a654f1f767ae1b89b85ad930db4582eb3458a61d27c69ca4bb1e953fde0db48bbb7d35bc4359c1b89e4a078b2133bc30eab535946e0b39f164ff778e37931e10010ac6632b4cb2906bfb7307b387fced692f5484a780a706946401699fd9c7ac0a1f47c93bf4a631c1e711d9f50404259e99bdfab73eeb246c02a50bedd292e324032a29502191ba1166a31ca7a89fe300365a5ef65a05af4255f2f00d06a223a0d15268054ed25d1a7e1ada5e2669b840c7fb07f16956c3aa89985727c3073eccef8b31da6033362a82d30178649d2c30b58a429da1cb424d614a0ca8ca7eaf417521a7d61df265a416668b01c6633fb27adf3678fdd6ff15654531bb4fb19fd9562a68234001e8bc264532debdf6457d14f055c220e4f8e46b11d806768eaa0dd4d835dd663212cf7220b230e3ab3abbac1e3ff0b549bfdcf30a0e5841da41b0fe1dc03a3a35fcaec477626f11681aec6aa7012d5e4a03e321f119730e74fac3ca1221d27b01743708e8b7267672d8342fd6c83e70bc321c7848dccf209acbf8d204f45ec0a5317b97d383845ae1a7707ffa08b67683be61d76305c1f3268329b30d2f4ca5462f7a629f6974e5206f613e4c522d59e4c410371e17cde3c051b7c134ea3a92f9ffa7a6e8bd301613b5fe799527a1fbe8342479e42c7267c9b645fdc247afd64827e51d798bc48fe53f8262194d062adde060723960bef922b8ce564a62cdb16bbe9345454127d966b2c6d1db60239bc8c09d2028754824cb10b4696b5ef93e1eb7173cb40bec08613e486e26b2fa7c5025081ae42d86a5ac59ed341411d9423cb290f7bb0cc948415fae55bdfeda9c2c0308ba966250db8bb9384e3e2650037ff899b1f5bc830f3a142d14c899a632c17850fa6a58e39d5951ba0b691132e365858b9aebc709be7da8c2b30b2293206a267d2484c707ee7a7398d561c867edac1e50e74deca1491d71e2931b3440590fd3017fed49404c5447cf2cfa87a00cdc91efe5b2df7dbb2b361ad7fb2a6aac9c91c8b01afdb3e7b23b292c2ca61ff11766a0d8b97072fee1c611798e1b4874cffe24e70a1361890aaaf0fc1fd54e327e37bdfb1f667f14b3b92650e38ece4a829f2081b283e1239e7e9e404dc60311d5d7125e2c482cc0cfb82dfef2c3e1ea0736f7297aef345dee3666dda6b7d44588d5d761a0de0a9937da6a2176473748aaac2d08f494864d1f0612e3039dffd1fdfeb6915c006809641210f56289b130528c4e379d28417122957e3cc55146997b38c6edb49634e40be989e4276d243aef9e4512a111b7085dc3ce0d8c8793ef85b7d72f1b1388c52462f0c57915bf880df1eed1b85a191713c847439e2f1b57120d8e8173091171f44480370754c05354c88423cc4f706872236af7dab3215e5a4b6c29b728f0d1fd55e2dcdfbab9bfae2b98a3479196af298c7b8e67cf0e48930df222134f74e6d461e407d1d27d6e393db54a5a2928f5970c43ffb99be942597bc4c16f7c0e681bad46a97b197cbfbaa5c310b1737c8965dc8e07252107d1baf740f3e2d4f0b9129f297d5b2468b2a99d06b418691db88fc063c539d0e90f2a9ad858e70590d528a0df8ab8cd75d1bc4803fbd0b1cae1e862701f83486ee2971ed4780393c6ec51de3e1668badaf7fd61c7dade0c5a8e5d8d4b2cc79b04a2cd327c39361aad615be43ed393b026b3431edb195c81c14a0c71e371916cbc1c371b4c23aa2b99ff7e56dbce0b2311d1dbe6dc4ec682495506fc0793f917e149c3171556179eb5fdcd205587a5a78ea5b7919a08f9b814fb5c86bb445ab4d4ff065d8a5bc11d9efaec4e80d2582e333d265e202272865fc02438f9da1f22b41bbcb3c8306b96a3f1de4e9d3b5a149eaaa8a8a082bab189d479a2f916c5aeb7de58d452d39e7427705ac4b887683a7547117f9650b1e900818864d584802e7372d5c8d341892959211dd8f79b0a4ee659d54b1d690c5737fc009d93e4ea4a642e5131434db6301fb2c889536758c6ee74aa5c25d0375c2f0e94e376031e6fea9b0987a98efbd34f978049558e4656316e5db694d12af21cc40ebb5e77a28615906f9ab802ca30a947084d593ba482d7adae07a53b3517ab724bc7ee48528264e3d56ba426acef0a260a9cd58fd3a82c1406a1cbb62c055e77d2b41414d84e004600d34ec5e9c1f781ce2cb98c32753368871ba9292d659a2d2004468a145426f0e431996714131a27c0a1d43adb28db5441d3dc0988289bfc267ba391ab5214b254bbc45509d4f8d4ae29b3ab78f2bb42af8922be774fca94bfe1bc76f448d9c737f220572e420f91b28eef9f8e1c0be092dd4113c358cdb5ff181d679cc779c996541950814953ea6b5af1aa292920d0209b89df6b99ee1aab5d8a8731e79d524a44559195817c748881419fbf28e4e75604f4e1d7e58fbe24cdc49e923d544797d6ed9d22b9de8517f90710e1b53e55a5fe1e40b9df9515e7d19597ebd294d5ba5f5150e3ac533d56cb81b02f8f3325df92bd2b665609ac6f5a1e1be56256e9e22b312c69508c22afa8a32333b7dff4efe2c1e0eff8a3a2345d15cc762911f84df23ea086a569074c96b07e09aa01a5e0db7a811ed7358b6e84f71b880bd09621e1797b445550a58a9389863c36c270826541d24c51793e01c64655ac6bad5368fbb550c47fbe397942268b4c86e67af496c71e31e2a2d390db526f2fa5837fd283e8d441ed0f2a1c91cfc6e0b4ff84c1a7922535f76b9266859693701f6e72c231262311956668947091814905b4caee7f0000003068e90210d26f4135178f90d0118b2dd76b51b1c691cf57331b983d19cdf32e23b55b4c91789c02e5dc8c0159d0f2909a4a0c99aaae23c4bbd2a4aab01d16a90d7b2e48403f9d1d61876f206b66426d75b89e30219425ba098bb73ea578d2b4cee6eac1134ce17fec397060ca46ad962cd0a125b492308d7a208a71a32ab9ba76e9e6370472cda85f6e932ea90739c977b2c8e4745192ae571eb4fa892eb82938c54ac44acdde4762ac91e8b184b120fb9d392a9b35c8ad7b561bb2748ae0acfdee6cb46b1fe64577d0527bed46d03f929a283094b0f5e77ac2906d45c09f5902f879320f4493220712168dbf72cf916f47f4fd8630a650ad099cbfa737caa41789234d621c18a93dfd54d4a4f429297ca9cac060c6cf3130b93563dcd64bf31583925045663bbf1e10e10c7623d4722242c22f7a2f4d5f7fa644d7536d30ee3c6181e41ec10cdb5d6a458084d77dac5391ee0d3453b1214b1d719647c391100e2c24ce1fab2a0eb69794acb2a09ae5bb4b148c22856f8fc5d7efa80e1294daa264b297db886eb02128e1c9b381cc697a3bac432f30e42d6435e4365975698976f7ef34a4108fd484effdab9a6ed739d3ee55432a799290f37cedb577bb0eb10de96ade83f691f583082dec415633d65a05d27afe4abb872ddee616faf0ce102cb983a734315b68a96e91d5620652567ea6929e79f5d1b6107a62908ec4c8182c31bb0a52f5553078467d95a0f9889ddf0a44e5aac18f40f622fca6f21e0a60682d9471d130148b348c42c6778d3cc316d0267fc8912f0d4d147f446e87f7b0210b6da76c452c5fec7e88f38630be720a449e2299921f06b3550172a4bbc646643935d27b5465043a7a7f66c1f408a9793bf6c036d8e45d2e6df11feea7e316318ee428f2db1124393b23db81526270f00744ba5080d468fdcc8cc7070932813fab6f812bec8c6e5cacfd86a18377547921a2d36ffc6ec641289a1c205f428e104f670723bd25d1f54e906dae55ac497c4f932f39b961d4d50be017e280b05155aa5a61663a6909d22f1e674ebd3febc94e3e36baf637ba1732a2dd0e0878658a33a07dc9aefa12e0bb09b558b19e26c753494779ce9c9bfd0ae52ff08e08c6039742365695e16c2aeae46450704d3e8466904323896feb84d9f91f92540fe064a8ac64d6c2d5e812a39bef1999e05d72f6b66f6addd491b7c8f9395ee55802ba98e242853f6d3ece93a5017cdac1d6f693275c12c73c6aabeec61f5e0dc72934433a1efba339646c67147861357fe5888fee67440533a5ce1584489064cdba33771598b7d23bcddc543ed98fd8cc342e575a1e8cfbe3d769b0e860b76e66380eeadf776af77fc356fff24460469aefebbe0298e9af446ab6fe08540b7c9f8ae369415f8c68e6e850ee3e91a3c4b5acec034c3d32f6325f9b5dc376373c235fa1557bd781ce60a3fce5154b41b9ef0434d9c6b8d082f1f64f162da094518ba25d91ebe515dd2aaa1b6debf115f511c0238896431e4116a08af29b06efc305b88326e3f7152e85ce670d474b7ec597c472e4c6cc9df81dabdf06c88f38e67ae2f9c49adaba80acb8b57ae8a700a3bc9bcdd05c6c8fa175de5b69a3ad2eb03635fde355669af49fa393f21084f89c0785a118ddb859484c5463ca1647d57383e9a0104d7e363100de9da43762f262e61a2830bb23b58987df8ce1a3bc80f6e84bc25db304136c73cb4408093983deea5eb0c2e3b44456cc6abe1f1d5d6e959860e2bd0c4e8147d80f5006a8624de28a301b3895d51720b0e0a880125075d2098e364144ad741f265b54c68c6b1f8d4badefc5f34d5b3b3a32970b651d20bdbceaac8d91bc7a28e3e1d1f4a224262ccf5e9800148702c26b9458a0106bb39dcc614e6a9fab8a644efced4a2d77e95a498e78b73a9ecef499121ca5cccb0ba0f8f1d928c3086d02b4430a75b2e38f84d591c78589f7aaaac2c5e0bf6280569d7075c182af7356f64f5101691b7c85d95c4346b021acb8b490da5a5e14a54824d13cec3193907593d0ff4e85694910738692075006298f61ae0d220f17f3f05bc2913ab76c443114715dce685505d6541d4e81252ea2a8f295d7485e0bed3bf624844693ec3139e32323b49a8b2de9e8289cb0ea619835d4ae47546ef410d4691370d8d524d7b94a8a44d34a143a00d41ddc6268eba5b29528c930c84c886374746659ed8313ed326203b3616c23ea7d1b0ae3438447f417da6d45ad6a54be3d8599caed75a5aa88e6577d520b72bf2e398c491318d3093823e25a70f326274ecbffa21975a88411a114be2a02018dcf96679f902cfa0b4de854c65c252f2f5c4f255176d3e9fd75f44280822338e661701cf21d55c7c3303b99d04337ca6875f1a20d4a57ac0b78fbd9889ba9263aff9883b35fb4868a1b9dac762a45db2de77b67b89737e15328bb543a3e8dbb99cdf217c5318c9503f91041c5c73a7e578cdceb1a94dd99f98950ba1976c195905975066846ce830f92847115d60dc93840be2b55c23dde2f2086520b6d548b217614f12c438166d5d3ac3ed6276c98fae79f6a8c5d5df30d2b1705cade2ccbad3285e9d016c1282bda8172cd940cd6b6eab06d3fad1d2010ed546a368ea199fba37b0a56ec99c1cbd31553009c15b157122cf7ae1fae3457336b067a7f4f997347b82bbe091edb0e2271464f91cbe7c343abfc8e1345b7d5da868c9409292559ba92e66752075d911eee20c30dc22bec5ea51ec1b55510ac238c546d0244fbfcfdef310444ead9710720762f10fece53ca804507f939d77486aca63a4e960f29fbbb6e4e9b5fe5e09486d19a9b75b7b3cbc19b3f5c24d411f2abe8d7f9c6e289c65755a20e9c52ea0f9c2a58f97ed9b65909c5e61a9e70a28c65230400522e328eed6dca99d4d66bffa9daa00700cb9dcb81a85981c1b58ae0cfd96679b8df2730691821c6353cd627ac1a1946e0cf29f756b7cd32a4c75a7c3ad8a7d50a6269c74479008904794d8e7bd08a85df387cfb9681f7d9a879714d1af8a6990c64120afd652fb56d49e545082ab1e94be549bb2584a90eee9324d42e7f0a1ed2306779d9a4f5e50ecb20af2ed82688a911dfbba0be004b822d291c48ac352a3850a848fd37dc13ae922c71aa3244113d02cc79bc051a60041e61b845e140042a165e9ef869df734b9e890f0d8f315dfe73dc3ca6ad9881d6358343a539baf1febd6b7130287283410dfaf378b2d6fca3c9b6aa367757ba7df4cf837e78cda9518f48fb940b8a9ec81a5c89ebdc7926e47e9eb33ec6b18e638f6659de5910cf7e65b00394f4cf3763e977ff2d73db7fa0eff26d9f68f4dbcb1fdae73095121c7e879a176433846c50a1e24d41d0913369c4bd7d9b7ce561e6745d79b5a848008aa2e09bf395fa9eb43e011b5cce88eaaa7a888924abfa4c3db02b6c2829f0f4c6d25c110f0d1818df4c36e820bc63e91845131c352bab1ac7ec8b1d52d2e37e5f351e124ec5cf1745b271ddff4f8fcc730310f0b28fc917d112c51b9ad1468ccdf2ae102e00810e429384f0c0af8aaabf3b6257a87dca68a7db0068d1d900845ea9cb715f60bae116fed931e2c1dc5ffe63d0f8b51338ddf103a8a879755acaf4d95301d72c638cd32f23cde0eed0c57288c84df583846e0d130f44723c74f770c53dc39072d35fd1b7bd00ef9f698512ce6fcd6208e237849b347e39644457cd467177b2a7c71b66d2aca2773809556b1dea658f6228f926b976f76cdf070df359273b4286bc0a990d5223dfdaf7aa312730c069f370e1d26ea1752cf33f7870e2464b24895b2a3a7152b25063fb60b428014b0725371068cd8edc9fcdaa5b13ce4bb47147a67f10ed4f9941536a0af8926e12cf36296d784107307cf6a0a01c02f093412b7892589512821287fb83ded79541827d85aeb8797ecc11c1238726e9435d6b07f20a0237b58e58900ddb744a1287af7424a53991a7d81f5eb84cddd07d0b586477e1eb142f5c44934d617850ca5c3a32900494c905271d17d5148d295a7f2c3e6779ed78b3616a445832d10d49711c6aaefe242e56ac3497de0bf987ec5014932ad15ecb84615ab4f5b2cd8c4996c360fc1f7e4eab4b2976d01430c0dc59871f4b872d8cf52e3f3c2d5959eb9472982d087ea8ffc3eca28097242bacc8506fa329285db2b3ff1226116a534c4dc981317b92e9ce76f6630f1e55bcb29ca368784a8f6d3ea670391f3742359bb77d904c96bc065a129450ed061366bb204b4b762e1dc877a61494bad02c18da91c62b1ad0c0f0441030ad6a5d2eaf9c19425084694564b3b1425c6be823c7fd9055efe173464a6546e5182a5237b11b51ef199e5fe3cb527c52abcb1e258b4eb291423aa7460308e72c26bcb207b39153b90a9cdeb0062090a0ddbe7deadbbbc083a0517e46c627bc37e2355bf52ecbffa00cfa0fbc6ed5e5acd87dadca8524163c9b80c4f7e19a6a90057368b1c633bdb7f5671f0bf61d9bc987228deb004d3bbf72f159933389abfb838dffb61e7249e47cd8331a5c2d7dc3860a46847dc06f34c2d7f74daee0cc48268a5f8c8c3fed6bd8b3dee0be0fe34a5ab30d9356f81743b7949e42d1e402407e68d2f1dc98f8c26ae2a123bb1312255345321c6271d67eeed4c8027bc1b4d06ba8c31c8b080b3098e6788da583e0ca7078553a506da93962b29fbd0a5091dc4acc9a43292a8aeb235f4cba45e460bb666d78d651bea705e242bdf95e8eb9e5195cf01458350d600a0a42ed5df2b13e7373202dc3162dab3a102696d572d0673dc11f0a558e4f95b06c59596e191d3ebb10d4393901ae1771c290e72d87d84eb7366283e8422affa3124255c435172ea19e1e9c4039ddf7f91e9681908987bfe29c512b512abfb46fa60874f4e4a7d98a3e7e2088d0d2b5dfb59bcb6bfc1648b580d00b17c7162780cdfee83bd148e8da0f30a65d7080952ae955a276cf67030ebe838e0eccc1812fd94cae03b0fc636c5b119d2477aac5a8fb87323f6ff03b590bf9853b185d1fea972345230df361f84d99338f1a741b4e619192a512c69201aa63a6a58cb6434a83451f11506df211eeab0d873f41513e21b3b1cb7f84cfce51c145d6e442711af22be8181d1d3b8ac2378f8ca9342cdb8f9dd8eeac352cc5a3e58ad7e8ebd7058e14d08b57a03c007f6dd792008d986f6fff7ee7b210a1f9028346924e3df67acc5bb37a8361619768531c8425bf95966cd646c8108f16dbe566243259efd007daea9cd0d67a3f701677e636a823689595f435fd23c5eedaae99e6ff715bfcdbe54af8e3bde2d40d4693fb85d6cb400749d7341a17959d885ce4f29ca04a37d5188c19c7d262c94144741d748288b9e7ac670094f5889b1e66072b639d9d3c61b2903870c09adffa6add7602f993031088ef5016e2643602c0aac8c0be068d93194ca928d19271916a77f20ece75091eb75240430e30bdd4c5670a92d8665a8bb75ef5fecedc5dd9406f682434ac98143a11ca1767a167dde955195781428d7b4c8e615a8b90483cc0b22ca23fab88682b50427e5c38263705723d3cd4fabb352a2213fda28f0f11d1c1a2e7f1179ff4d44a037b88e54bb1086e8090c8e4f9aff2972eda2388f259c52dd28b63a499cfa50884a3000c98011763893ecb757da09e2fb0c8590bbfbdf352e46ecb3432e4cf41e1bd78cb2a24c6c4d5a233163c2ebff2aa1e3acae6de74847084433dba5bcce7a04480491ed723fedc2f0f01cf6c898aa810c48d6781cfecee0c88df363785f1e098afe3b6c43f79b8a1a72781aff630135a59e82dafade8bfd00110cbb60f83cbffa025fcf18c39bf05afe3376c6d8a6ada37ec85cb5a2f65e6f5fbcea808a5ded399b2ebcaf1f8ba3366ab2f6496aec4ac717c7919ca80c90190d26a51c948603294a515af66ca0e4717450b318a38a3a472648dee46fd1bfcdba74c093c5d469898c9938bdc12976cfc9badcaf04160d9a84694b9e85a86bbb8a65b557f15104c8dbe7aa15e868b38416b584972a7868311bd992ef4699176f236f2ab4c631fdca8cb4c4c2738ab6f429f729fd80626da8036c7466e1e268b88d2fe2100a09ba350f86bdf85a393aa5d148e891f1dcb0aef5a1edc51551c1a15417e737c5c0f083ca83e09280e4bee8e74672011c3ddaf535628415fb9e49ea7e1a4fc1cf27ba755f46dc307376157f46db5964d77993993a257f55ac0ebbf923399ed1eb47e1f7ca93c008ecffcbe09112489f2b3a440c73e24c2e1b5d5f66169df2f45d33572ac26322ce374eb331757dab279c10c485a0b8ed8ae863044e29e0f18651516249d78154b3c7633febd02701676d5f2e1d0aaa6f4ec26fa54dcd009417ca7f04a56897976e0d8d462b5a4a83b2c057c114a7960786398dbeb14f9ef5aeb3378e946f4b1962f7457d40a557c7672a9dce69ffa067c5b7f94b4f1ea01b8fdbbbde6b15f9dcc50cb043e5919839f97d87e7083925a6892b44d405bf47d2a77a8b20778b9ea9a133945e37ca6d25f24baa2bb8cc3e48097dd90374e12cc93d4a4fd303be25bd564eb095458cbbc6722097c1f71c168d0f3db41af877fe4f2f5f904430975b01b35899bd339e28aa9ade898b8b0ab3ed04af33e0d6a66daa1f03d59e6d67319f83ce7104fda8d4ce9fa2acadc163c108fac3e4997eb899010a6316fd36ddf6beb667390a28e5083c6da9a38646fc81003a70ca107aaccfa668909c06438a901c4a8e493134121cfc6b91a4a9cc3cce1bf0a1483de9b9a981a471c509221b0bd426dd20d38c8d0e20bc1638dd5a47f0310963a1af98f87289e4568e0b6fa2c1e897092daf2466fe9d8448e9e83572a3e8552ae942f5db33f97fb8e860e569620428f04a5475c65b6e9a01392c6848724912f8b6b85c416b2261217847cc56d43a303428867f95c6b755a3cc4661606979e9227d0b9e90f6384c4adc21c9e3def3614c179f8cfa5575d4a8b17615f77ba776b3bbca98821ead51a967a20cff7f9464fc39ddb03035714b80103caa5713467f8084fdb9e6f2431a3a8a3340f450a2e64aef381a6cf0a1b4401c011763416c45b6250917b8a7a608963125199c697aa80c772df22e94d9ea2ef973be320e7d00de26065e4ba250f3e353844728a2f30c32d5aa02f7d182605d512fe48b75db1d9479524ea5975a007be9ed4fea2db136840b6264fb62a752e53c2abe3c5cf8c34f77608e6e87a9b2f548b5a0fff9cde301891c4a2e7561415565d1fbd37ac6cb7d6fbeab2806e5d02bbb43fb7d085d376c261c62432c0ba652549a7a44f0043854acf42e33aef26167e3d5b8c7bd55d3def6fea56fba4da61b11ae576cb2fa449dc214d1e89bc673aae39842e66d8f0487debff8245ae9567e57ac315f46e7309ec4d5862e063a462aa2948f6e326ab2d8a51688e55b14fa28bc59bd8a5d7f0253ee7eb92e6314371e8bef5c4efcd82dcd6605c50dc09f7d179304b436358dc954be6f4e2210bb238a9a80419a78712b22440479491f06d4095c251f4b8feb3e79b3c51c609ebe059fe1115ec6c00c15c5b0a6130fb9f3b37bb36c7dab8ccde9862be45f0d81f157eac24d26b193fbde1cc8e6b5372de4b52262df7520e58ebe1393f60c7d390e36fb603ec656e319b650c37d42261843c8b911cfcf435def9c731231c7da615cedd21f31f3f7b6a421753f18c0467ec3df29b6c1bd6e468ef704a07fac3227ef529faf92dfb2195f4577e52b211595a6f18e01c8d38e30101d0d6dc68ef093b70af13187c0f6c552a2c756ea580d66be899b48d3c43b2c54fa991f68e9c2987fa35d7153a8448a43ba957861a55dabb0231d2e45859d69941706027be40ac7df8d2f84367336ca3322f67b3fc07cc7a0535fe4f40a4f8edc8e9dd8d68bfc8cd353bc6a66261820c3a5dd92c1d02992f1bb510a803b434897a36e462c59e251dbfcb93d79d99f81d4b28f82784e0b8ed8e42466177b62b78b0dbeecc881d194e43b1980171c52cab65c7b16f9d407ecbc908f51a3dbc11c673570c7c1e1ed91edda02de734d92b708387099a55a06b56725821ca79e79abc506c0b1840ed2cebac11c613ad1d5a4f4b2cf7ed97a3340f2bec30d70e8e7f321be8b7bf31c7ea195f7788d8dcd4c9d3dd052c2e070fe6c85837707acd6d7893a05d81eb04811340e2f1559078dc3a86a60e3c40a5d8c3c69f3bc5bd2ebbb7b493950c82906dfc24ce7d2fbfcbe681d33ebd0058bb187cdb5a8468350e7e7b96647e2c45e234414656d1bf5ff1b7fa962b81605c215c37b4afab77500716d468d402b2532af63ac7efb00fb7d7f62265dc4fa3a111b1c02b8e4d898d2fc701d46a5e16bf6a7d79868f10038adeabf1d7b06c432e82f5cb229b1cbd9f14de97ebb8d30e3604e7949278399bc8918e1951b9e18764de38f671f8efed0b6c81791bba2ac2262aee6e0839b14772fb02fdedb46b9e337b9909cd19863e077be3a8c45c8707c15aff13d3df843c85af21ab5b7594b48f7bc04c02d0603e04257d32ad2c4863d162f3a522858861e5185c383326df0c60816c1425b3d153c1e2190b8190c42b5d2ee3abb876803740c088ed23519581e11c1fe8d4589713ce009b1a96a08eb9e7c0e2cc27c715bc7b54e64dd83847bc560d8905302ef8c1cb70ca55c7004f37be100ebb3400911aa2506f10167872afb6d95d6fa458605bc59ca4d29e0d2cf46062eabb14022cb315083c8fad7e076c1cbdfe0b14a760865bcede26536794c8caaa1d9d3176f383a5c84cf61b03d9f239b5e63012631fcc1fcf87b21ca26ffde3702d6ebc92dcd81a2ff0ce40d04ab1eca0001896e1ba14e67229fa6db73e88e0b09c19247deac4d20bd2377243899448dd4d05b0a274ed8a118f2d3456f761905b33a242a36b20d8e74d169ca6e56eefd1ec49baacf59f292cf2972994bee46a6bda47129e364d08222dff5c45313381cb79ec318e69758b26bc91fb262c4ca2ae9b0628fc2990ee45bfea9429962daa4a3e489f36a227a6e5128e209271419bd8ee87d3c30826a8985f6c5397eb7dea31440527b9e2de3495baca1179a2f2b06459ac3e260419e5284fe10c502a789c10f7677049aea0cde463793bb85b428a217e479d2205901e7ad229ea322b04fdd887fe1376cf07845dd6d6bd2a7225cd3ffba8a7077d56361fff54977428f59230593863d71fb48ac1d3bc5387aeee979dfe78e23eb51fbd21b17dc5495e6520cdc99a438ddb6c6651cb40893e2bf386a1bdb90f33cac9f18f3e2ddb4d874648d9411b3bd80e1674334004b652f51720affdcc7480789e845154566c79b6117a05d6afbdfa895bba610cc6389642a2076a38ae6a1a52ca72fb1e7c8891a87c27c4beda246898d4712302d09ade1574e4f8c604c967ea7147542b176a40b37c293d823aeb422e43fd20ea10b81131d67e46c373a8e3d8b438e5d2b1018434a45d3bbbac949ba66ed2402554ead9e2ee95a1bcc0646ee4649743e9ffe01006c2de1d0dd24d5687eee9a8b21c9c414e238fdc0ab25dbea8ce71e9b29598f535e00421bf2f28269169be85a760e659161cacfd1598539e5b282953953fcb3707d5aaa1340c80c3fa0ee991b0060cd6c9e9c89e820c64b94f8649a5519deadb1aa55b32d2200ca83110e71b28041c636fb8069ec01e5fc0280f47d29c5e6ba0618c5141eca3eca9d9ff6c14d651cc8c2090264231e541f1ef397d4a67ddf12a06d3273400b6b78f6a16a66db4789e0d45f4de600fb02ba7e691330d414dc26ce388c63647043a5e435b2af231f26c4f86863dac91a57e067c6f0fc02dad40e0400e4d9557e0d1f9a9787dca030a0f56db7e5689cc4394724bdabff1a0fbca00331751ff0172d46cadd21337f53d41e915904dc8821ba56f35d143dcf477786aac2a423cbf4757532a076bdc36d5d19172db5a49547c1755878078ffafcdd7cdaa19b6d262a0a9502359691f2092f7de82990c8c789ba12609d3bfd714d371ebb6625f5256cd495e3e1193e42fbcd8eda042778cc246c15d42fbf08ce2012dd2fa7b83f10411bca7abd6d8f7f97a491cdde0c7ccdd6e91812137cbc33e24af098cd5c2d69697cdbf19003259ffb41c60a74b5f720d5505ddbea48e910d71d00b9e462626a40cc5d1c569f50863ff758141be9141abfd57e03287196e313f863710fbcf82286fab9f533e23b0b145ebacff1c81bb0cd79b9ffb695319d1701d04a4c403cac906915e30d45d4d07059e0a56069fb8baa44e37042b61e30714764b37db7172d0619c393edba046911fed20c3fd5712578519b80160b0bb73789f0383849f9f6c2ef69720750d1d89ccc9a140fb22ddebd07bb23d9a68d2dade6d4d6e29a54c320501091109de0857365d253b6480cca0945d59e5e336c81e8eb88943e28aaa3c5569caf2a9741923f8b32ce2440d1ce843c6b8af66e785ca0718c64c159ca35d88905db60bf913070556f0307566cc5f81058f3b386628c3083c36930fa31e3a7c01a9f7cfb163ec2378765ec86817323acf967cc9d292023d22f0e86fc3e92f0303121ff2e4217f050a421cfe921e9331a5a75a931113d1528c486644f446a2d2bddf37a594dbc663f26071bfc65ff7e5df6fec61f3e5420af88dff16b6700061071923b2b6aa0e5a766f2b4b3d7e76f80b67972e97821072ae4683c1276b0261f92b4b1f9b4925cbf261f86cf5c3bbaeb420cb25f2c81a6b907ac8d30548565c70c5873cdfbd18ad4380a68b5194525ac409d494395cb2fb4c884929a5d45f36474ad403b65a5b9176982133a971d3dd6badb5d65a6bedd9b367d7ce526bddec0b5b374b6b0db71a4aa9cf39a727b9bb7b0baf3ee79cddbddff72c64cf462277bdc9ed28779a79c82e2d64b7f2c5ca94f455e96cf1554e578f1ace39a7cc4d941bc7c615ca02b4cf296ef26b5cecc726c0f22ffe39695805779d734e7ba9b7c5565b9b766ddb5bdf89da52b6d2ed69cf466bbe0f95a2b65ece836243a5e8b63dc594526aebe53a0f8a220e94d94ba59e762954ea296a7b8ae25277ab36cb92697fda4ae937a7a2b6666b9625aa28cf7977778f34777628da22ea395447bbdb35ea5a255bf37da88b4a79930685c2e952a28db7b5fbd75e8e4ba132aae336ceab2c4c53d6729d6ada958a6e296ea5da62c1da220a655b44a13890ed456d9676f7d2e7bace9b5465844e5493d7b98704d736a3179b3fe996f2b04c0ceba3a9bf76b335bb840a535629d792ac124ccdf7e1d58b8a9956d15fb7426902d5517dff8ca9e33d8d6823e2b85aecfcd4b1b58ba910fcb5ced6b8b0f6659817ccc2b8e3bcef4508de9e6645d7ded769af0af4ec920d5bcb0196466079419b042ad95a109b55eabc18371c250ad626b958b9cdd2f0ccc28a155e8cfab4ceef15dd2e5af05a056d9255b2b594104c2775d9d4a517e37c25c4a51f2582d3e2d52382a4936cca0e4cc8db5b2fc6d65ab245e205b9d629391831e5ab19c540a23cdae489a34451145268907225cf222372282afa7101d1c448ad01deeab6d166f0a818ad9ad0d01d21a47a92844362c9bb622465a4d4c94006d49418c724bb473f29284db20d091494a724d6287665f040adf2b345a1216900959f282838d214a5e848038e8ca224d1d0802943516a306b40152223a0975962736b2d1042e024041108112107224832c291087288c2d1af14e796a28318f27871cd27897841e2058917a317a397234858cab77a35ba0619f59cecf0ea6138d79cd7a6c2e41403284c515e681d1c3233b48c07fa03d98956b50cfa058df409a1b1149d83a973b3788da5e42f992dd17c419bd023cba95d4bbecf4aca97b544beffe283c7abeabeafe99c527a31661e3603b66ed89ff999b0a7936002b3616bbe8a5fc5e6cdfd1fb00d6cdd98e969cd300587d9df5e45a87a0a7ba4ec8813266001e5c10db026a8ff01e366d73d45861d9880f5b880da01d6e4e55d985128016b82ca01369b3a387eaa0c50ad696574e42fd5ffc8627a19d9c81a623de5fb5e0fb8c9616aea4508deb82e8fffe2358c7a316cb462543355b0cc0feb69eaecc808e5fb3240f9cad0f29589e52b23cb576696efb3945846b4cef624e4fbac27feeafe3e8bc85faabfcf92d22e2aab8989b5946f0d0b296f216b48260cc7550557b2ce59c1d037f7efeae7887c8fe41b13aa98ea8aa6b2011e554cf9fe13adb3a3626a1732df15907cc3114f91efa8da41be3f44b384b72625f9fef8416ccf229a37f7db53c13e6b88842078bc43f78e5f2304c7fc7865accbc2fdbd08c1236b0802dc8ff4fd57c07ca75ab8e872f17e671a5cc863aed801cb59be7f652e59917cff0508e654a18f8c2e8c815ce6a87fb3a0e4cb32d2610dd137f751de640d75ceddc29e2637b6b70f00209a1889dd97bfd58c62b08609995dbac09af88f3895e92572f13e5178875cbcf7bd1b76705c1870bc433eda45eaefe3983a2b9aea5eee05ec156deac87b871ec775ee95e2379788ceb93f9290fbc136e10ed105b8d2e50b105c2b754ab38f9fcb254ed40b102cb35bd6106b459b3a7768dedcff3e8c8768f9de97fb3e5c0cabe0b19f64cc47fdb03b8735d437f73feebbbf4fe3037c87fcd545444df9fe7de22fefef5f29d204ff2b25df57dd57dde9c9f0b217dea1ceb9ff12c3633f0de5fbaca1f9f77ef77ddca7d69251fe242c8142a0cc5e70f2c2961b4e4a2abd1904ee515b844300710f858b0253b3f31e20336833ea7e6d8f823c7017cf25444b3e5a5e289222b2645a69edbb9ad22756b08445e94d9f3fa97ba5efce02a5594e29a54351449390249f250810e2c485ec967576adb576ed4e706d5365b77d0d4e86115ed6886df288f3d20a302825a24495dadb4de995364db957c4d7b52d4184519e51f28ca9611b224220803c7fbb7ec28ebfe841b6430ab2ccc0c91398cf5876243e6477777721e8952b364350c943e0204f150c91671111b22c7e3f7ea8b2022c3e39e8e0e4d465b643103b79ec868aae64ff3b0492ec5ff19c2e3052f3e69cf3c64af6a2211c70923cbfc59c734e1f217b09aebfa60f8a4c4922e4640744e4e043485234e79c406029040d2c381cc8bb6d75db425c8381ae40c9622c43260d22596562068fd10e628b0db22fbdb5a98385105204631a2ca0e1af0859d1932554932540f0832b34204f94e6cd0e30c83477d19318f2f82e1c382e08cbe76cf4538f0fffec2fdeb4ce8c9a01b227203b1334b70e0d74ce068e3011439641b2fbe81ca7e18e1c2ec24071fd51df375710045111294268333f8c7852e09afbbd8331019652e77edb489d1bca23972f19ce1f5be4f92d75b6703e9602cfdc02cd1c951947e74c262c2911e0faee62adf7625c419f982cc8c5f94212c8c5580c47e7cc9740610b32c73a673a1307828962fee84112c85d2688c34081e78f32766f0bd4c636a5d18f8bf36568c0743e8c506c4aa1285cdb8c5c2dcf274108fe099bc0feb5192500e1b4454a902707a04574ce8bd61d17db7a0b17db27fd279dd353bee8d5ddb1dfd6a154deadbe5c929bcbbdb27696dc9472727c719de05a3466ee9f53ba7cd22edce5afed49bb28e25b2e699d15e68dff658283969c7c4a70111fa7d6927b1ef5f4bf681de0215aac1f3733543c19110731c8f3595e8cf9cd9e88f16250130e90270e684f4491e7c37c468678c1d21436fc4064682e910116c251703fb1a4052f2b2b1dab260a056467fae189ef213ca97283990c62b00315e43083d24b877375f1f762df8049f657ada848568d0d4e0b1b0c81e50648b23ffe5e74792b24b04de78201e411478b090ca065c909226c6024fb775e0c7735c5623122734200890e538a58d2b4c4086ce24003926fce3939a55696187c796c21031ae4a009239a58aac193ecefd262030d66d93fb5e3ee5e640a1a777747f51148e4f9d7dddd062cb9b37424cfb7eef386eceeeeee5ebb4a1e3b560b268658f261e2853c3f185e7a8e90c72e3b964c7d0642b2fb0c42907d06409081e83218f941a6294c36e5b106ca09b23391fd2f8ec12cfb7fb1ecff59a0c83e8028700a0b93224b86b0081d31c202a5480d1d80b0207da0068d8605073830c1c067c8b6b13e10cc40f0e3379dc78b611083921a1ff881810e496a2c891d8dd0c30f4c4982624f828e7c0e413d605142848b6c895c82491ee75c6249f69f7372c9fe4b6a7217bd8088ac0451ee2228b13ca298b27f4cee222582b21143598c184a32e286273704e5cf6dc4cf928c6acafeee524a29a59c96a39452c984c754528bc86a9d49a7ecb4494bae52e6531e82c75a6d9d4d2e6ea94dfeaaede1dca2949425d8b2b2b4620552130d8d95d8f729a95081b4343363652623835463b1acc8acf8f82b26e6090686e9e54586b1d06a65a45221d13cefa8a9eb621c8734944a1d3da150b37b7fb68de849e64102ca3c484c72f7c625b63d6d4d9bd3c665063c5e262e99e9d6aed2cdb271b94059965cf40aca70cb02f2905d362d2efa6f4bb61f95cff6b3290932db6445649b0f972dc675432e7a0fafa993356aaea175a21282c7cbf484925d262b7499ee167f550bc40515b3414e2efad3107c63fe925b93bf6e53d3e6b45da6eb93a56f71f13acdf0788f8e84f2adf96bbc4b49fe927f97fc356e4d598bbfba680927d97f6bf2d7589dfc35ce277f8d9769e3224de8ec7f9d50b476d14b1d9335acb72cfea236c83ab14ef6692ba2644bcabe29657f1e2b49fe9a3fb9730be4b1926445c98a92952cfe9a36e4ce2b401e2b599696b6f86bd6903bd3803c485b9a9ab8f86b7a2077fe401e242e566256624849fe9a4a726715200f521292129252167fcd24b9f30cc883946569a988bf2692dc5906e4b15264363bf2d73c923bb3401ea4a35acd8a8fbfe62c778e0179acf85891599139f96bd2903bc3803c484e4f4f5afc358de4ce2f200f921626261f7fcd1972670cf220f9c86441fe9a4572e715c8831424248424c55f5386dc5905f2204941324232425ae2af1943eeec813c484b68b4a32dfe9a30e4ce1dc873b4e5a8e9a8898bbfa62c77e6409e232e4831a498137f4d22b9730ae44172323474e4e4af39247746813c474e474f474f45fc355fc89d2fc88354643653e2afe9939194fcfc3cf1d714923b539007e90911110f12137fcd20b9b3057990982001b50bc98304d4397ec4913cd2f90d6121a7404253680a09b9386e7e05679db556bbd9ad6edb5b77262eba8bfef85f145dd989064cc3d195693205132bb1258f95c95f5da48496ec5fb7787daa5c32169f9c93bb4829897a28c89e2c59f8e87933c2904079d2ea4f70742249f3590d5d68ea8cf02ec2bb0db5b8ce08a1d5e222845629f4f119e7f755a6d6cf65faa62fcd947dcb74dac2e4220cf0389926d3d4c1f3e97354130c1ca8f7cf54c107903ab75dd2041ff2499deebb1a011e4511c75f7502c93dc54f0aa5747316dd04ed8692a6a2172c71ef6d3a40b2e0623b80ca2c9b6e4e374ae9cfe9356f83958ce812d73674fe669fe35aca2e9987ec72c3160df9f565c8af3a5ee41b8ef333207fe4bad55cd616fc5d73a4d4ac366d2a9f82439d23937bc608c6d94b908f1a6a17332959bbe8234952880411499249920d7122221b2126a5b437e5da79d1812e1ba24b04c13641d0f5a07e7de4f9b4fa44810b92a4a4c027b7141d6ec8fdbd6550c6b58dbc1d83e253f27b9adcb8ef4f7b9adc48853d3dfdef0fb3e1d3f3668637f47fc0646cde50fa148ad4a3bec98dfbdb6f614f93560adba7fec6f6a92d9ce18ded51610f8c7085b0611b95b3794365bb700fea1cea4e6470c03e14ba908b14a6063c7f742127aa2f80e24e4cac5e5773816cf6d7d83405d5998b49bffef46cdef8026ccb46e71bd88677ded09781b37b31e8873655863ac77f7e8dcd1aeb1ce954a873649d237fe652b4c068889321492ff9325a574beec01a17fdeb1ca7469de3b3fe748e0b81b89318f2b6269b2480dc9b8cf02d5694441677af4112ec8c595508e27fb4bb657777cbee22eefa4b4ad7ce8b3af52968dbca9efe77ba96d76ce93940beb714f2a673e4778b9d236d6ab29c4fab7f3fc633049c14ffc377bafbf4ec3fa5e064dc1b6e1bedbfb37bebfe2decb1c794d3a1d0c1a2b158dd26f4a5f4a72cc0bb4b2a0bd03f3ba7eb5b4ae78f2a9d063d74e8bad6162cfb7d4e9f734a064800b84f9fd3fd32a00160d3394de587cb7a846b1b9fb4b36917dcf7e7d8e163ea7817f582db0800b9c349852d48d4b6a55e76cfcd96aeba61cb1f2fb843806b3ef726b07a2f74a1a58284792f615e28eb60ab9751988ff7432e4cb601b6578112e62bef1db6bd45a9bcb0db00db66b9d447590114e9decd983b3ba3cb3be58e7a3b3d1a4dc4a5426b6dc43fe2679cdf2429257dcf31751cb663eab45d52a9944d9d15a810b5932550d836e079e377071bef25d0d49144a9500e155901e9c50c773a072321298dbbfea2f954cc5819560ccc0b5ea9bc8e4ba1ee662b9d2e6bbffcc7f9de0e58562be80a9afc6515f9829ff578c0e04c96c917bc1e0f291260f94e29a594524ae9051b4629a5331da594d250a2ae87c20fba59af0607327d16e810b72237cd9755588fc6e5b60dac7de361e06e974371298eeb268833d7712a55e654dcaa9b6097b9976e82a9ccc5741b2a73311c8b93e9266833a74205f771345d0d4d6b856a7280f6cc806b9b1e174c40420111acc9fcfaaeebf28155834785d29042b10318c460f3090e3f31189de1add586b25096df23264f70b167a5d4dd2ffd79c111e71b8e3ba64a07fc1ca5238f1d19c5e16cc371474685e38ecc82bfdd7edc916dd8c37bf898e18e1e2eca078f708b863f6dcd6f0ae62e9a0128cf13e0b19b720b348f2e1fddc8da21f1f7fd8ba2cbb5c3da826772171d8992034c3980a094fd75bc4d0b279b6fa66ea6cea1839602cea749db1cb968149332298d3a699425ad73803892b576e184e589ecde2b4c9d1d77778f43019631e903e2a0c00ef7dd4e154a2354b8d5af2d191911657f7914f351924a8117309a3a32768d56d8016d5aec17471c597e8b4d812c7d86dff72f62d7513ce07c9a489c55127814493882c50c04a0a6ac6eb1ff76e8220e0e3b405092fda910d3129e57b284806fc3ece0548856d999cda6e0a244c479b1443e7d0af6f7e1d9a14b942e2a451ce931f177d7628386e2bc5112030ee5cc459fb445f92e43798fd07e1a5b09c796ec4f012d4d245f336e80c756c2117e962ee79115c5acef419b8e38531eed5aa1023c62992b38a77b3c3fb20c5ba8b907cd787634d370cf3955e802db1e86e31d3129fbe360964d8b0ec76fc41e2feb7644db9429866cb21c3fcf8b43d145d70c71c40fc7756c60de428cf93586b43f8a99c78a17a325f003d672fb96f2b9ad9b8fe3372e6ee3775b3eab667be0648a9f9d9e2296fde9ce8b3eea2826aa7139a594dd1dae17e80ad1c0805bbc40fe0a923240e011d3306dea5c8c693bb2a9b39aa26ffc8164874176c42fd0eaa973260c355b71058fae0b94fd5d2d30d1f58136317e62d19838a9993ee0d1871a0808c883fcd5a1a4f913974c68093ec0a3036121d0855c7c0f72d19b748ecbb04317722019d439fe0e34856658ce24f625ee2f811ca80b1e6ff66e5627cefe9cddd2b505cbf0ce9a9922f0f8d5ec3c1d89382f6c7cc03b334411c75ffd3fbe60e1b83845203c7e5f0d66551f74d38e27a1ecdfe34e39e794735af976b39bddec66ad75df51827feb09cba400c794689b71a66945f312fc2bb802a28901b720b3ac2281fc257f4573710534ae682b2632bc15d0d398c80e5150651a6eb5ce81e13242f85aa6b9525a09f9eb27fb6f492bda8ac9cac99625fb6dda92b65ae7d021bb94c7158d69ab6597712fc0e39d69b2487c4f5f0dce9087d5124af05f21041eef259a495acd8696ec7f63269530e92c4dfae5cbd9cffcfe0ea59cc9ec72e62f257256bfd90c67b55928675d9b85b3daec7acc99c0a38ca9b224e105dc6296b52d0532f60323a9c41fc60952e625c46478bc31bf226d204312547200f3f71ae051de792fc6dff74d246373672793f0021e9b8888fef4cb504e29e594121c75d0ae5dbb76d823ec31011ebbe9e77f37fd60470fe953e79cac2aadb4d2ca9195e55759c39e1d3e7ab46ec0a35c5a925a5ce763feb7934dd7074ec2da46a604f867f41f192dfb143f8865ff6ed2f22895e42543c0fe1e6bc96fc107e329fb9f70bb1a170e6b8697e02f8304e609b2c56fda0907e1f2f97c1151898b47c812bfe99e4923c64cd99ff5362d9acc1ff2b0fa3cd0e7a0c0a17b139e477c7094fdb10f8a48b145c761acf9325882cd299fc993dcc333066846bdf058e1c2f15ca14d4e763c5768fe72a0ce3c4741fe6a284eb8e4ce3c576832cf90e70a2db78d3b916be38696ec527e964478c89c01d985055d8690db838008ce83ac49ffd6a54b17210c01263d080480015d863084dcc5045d8690a707011172879787cc19e8974b32f52070735b0f0233735b273d146abe2eba1783bae842be298aba9b4da526752a9b525a5f9c887d014bc801367b9a4a14279a80b940699801964bc80166615823484083213017680c4f60f56dd8d3546410020bcc051a640a58c5e1bcb3db6f3f602ad4dfbeda960af7b7bfa1adbf85b6c337d3ce9937eaec6928454152602e4c1dc0eaff809520921004d30f745eb7944cb38606659634efeef4349510ffc31d09765c9c94d639a773b9e99ca655ac73ce49e7db9a8945496968682c7202ba35078b1144a869ada0f954ccc8b062605ef04ae5755c0ae503295a82642041102f425e8e68974296bf9aa54c755af57fc0e6572c69296ebea37edeb0a7097e2ebc9c4f14d01cf10d5b36b4e17367b426f7b9bf719f0b7b9ad48bba5768de5c50cec4ccca9cf4a41fc92425afb908ee9837f449c6a4cc9778e0eaf7df0af3dd4fb09b313a140be43163a8c45c574f7fe6ab8a4e087b2b50b133777af0936f7217f1604be65e64c9e590322abea7ecce9995e67b72322a3e19159f7cfc3158da971729038e171c3f1c5074b1bec8feb2055e1044c22d9a3a431b33e64d7d1bf3a6c284a30e168ad54455117ef8bf4781df7b8e979797eff620f00cd852f1332ff32bb0a5e2572f33d781139ce901e79893a50ac71e35f770b1fe8ecafa56c5e4c028b13e0e1e1bfe9a9ffaeeec05c71ff982323f4116e64fd0fef5c2160defb997e13df73f7297023db05dec68be3d193af2387ffb9127a883263c802c00cd76c7c5fa81232ba35e05d8428cd5d798799970b2c06f258bb4403138ea6015f1204beea4dc453c9065cebbf92fc4a9c3e3a30bc7751a266303fe11e7ef088f72e8c3cfc5fa2ba4c063337d7d5964ceb013028fcd54bbeeb9cee3c2560dee532f83f33c25bd1853089cc3878e1dfedaecd7973129f3d70d5b9b8cc320716d01dc453b78cae32d723262819b94524ae588b3a50d05e33ce58c5965e763eaa47ea27e7e0a35cff933acb5da7ee9825bf551dfb5580cd6aaa10dee533f93486a8194186c8a33fc557f9b787ef366b29ac06f43239ae7cf9fd12ebe3c5f9c3a3492a8222c67be122a74d5349fe6ed5bdb37ac6bd9a7d97a3fd6b450620bee67ebc94b05e1319dd26d2c2e93e55806fc3eab7f26a24c702f4060fa5cea03f8bfde9ada01d770dca98113c610af78472cce19f0a47986e38e588560f92c6647022c5d738a734ed9dd8deb6c3aa977777376f7a6ba41f4a4f4c739e79c60e7f4d7bbc94c29a594527a27a594764eb04a082c7f86077f992b94526a53b06d38b86143011cc34c679dbe6d281cdc40034bcb414ab073ea8715b0d65a300726eaff48dab998df343c4f8c31a5ad02c7b48e346ac5208f582ac5286523179728a501f15d8925a6dfbf4bee502952970b072bf7e123614a536992410750f2fcf0eb5a0b72b0c18f0d3328019509022338bc3448001d24c9f3ff26f181868512511491e78fe0c5984e002451ac707717224802ccbf51d09083a13cbf06cf0004b6bc18b4cb730b972d3be4f92bb023792203c512793e9d4f73754822530d050de073779aa72962a86285a80734cc0f24a9c1dddd5578311c0a2101b0edc5a9d34a85ad14e8cf4f8537e6fc1eeee96ff466aeda2e05decc6d5d0569e62e9d6e656ff7aa78d87207360c860896d1d9863c649756eae9a3be956afd6da13ef53705fadda7c01bf46fcce742279a37db57cf873c806574ea022e3a49d139dbcbe08073b8b8fdb63d4c0dd8860ffd18145cd7409ba527e347bedf20dd68965e8cb631df82324fb0411ed0060d42587e2b85f9a85761fe8dd4166ee18dd4f7f8a7409aabad5d162cff7be6a79e8637baeff1efde82d3b6a8df811cf81355fb4749246bd37a976ba97e83b5ea5b9884b5d74a61b5e2be03f457a10adcabc20ecce7c2163bc74a991aa40f586113d65e8c560adc7baf02f79ef71da0cfbd0ade7760bee73029c5452b9f748efd0bca22fac1946864bfa603b77ddf18f0f8e97c351f0c0cd8bebcf7a2e6f54b3357ad6c5aab4a863b57cf2870cb9c7b17c444728a88cb3257bb0bdeccd9ee829748e6b6abda72db6c4ddcfdb619b758b6cfc5d8267af1d75a9100c5d82624324a848408d50c09133549a936f15c61c263014f2e562eb5d600d4923455f74ef6dd2840589f5cb45c5cb43132c0ea34420d079a94414f59da980f8e4a2eb4320f6d6cded8f721613bb27f4cbe2fc351ca48e51b0e651fb18c86f3b8b11891dcc36b7c990703b2879246cdfe15ac5cea53b7baed907d62892c9375721dd4cc4af959fab14356c92ed92dd628db6fe94e4f40b1d12a59c224a83a5d61f254b9f8abc664d67e4daa4a3a5244f66b167f59999afc850a4f98a02bac3517edcb10c7456b7fbc20e0b1d66a2ddbb73fd94a97e2af5a1b81360ee2b858bd56292314a454a5db882e1b11c7b57382bfeaa7a8e0160a22e4a6ec7291db401bb33e2e9230fb618bfc0bfad6851db0b4f0ce9beddb4b3d172ed99f7963bf8542dd9aad14234bb2f5711d4b8bc5624060f66f70196bf2d846b51e6c2724c5c80af2d85cb245dd6dc85fa32b96ed8ff6a7e6afb12ed927fe924148c27e8ff628db5a1e6beca7c0f92e1bb33ef6c7d2ead2bf7dfbd2b7187511a2706de333d5101b72dcc3c897e00b4c3852f932f3031e258d2699482019b4a3878f9e997715cffaf1eac8fcf62a401fe08e79e3bfc27dbf60a53f1506060606e6c76ec2f9a53e56adfe05c4e1d8e347963288a62e97e8ee8f6d77058ff74a59651569be306e9d8041ae2ccb0f1d6d6fdb567b03bbeb46e5bb4e185d8e53734b390112495c5bf9f625cddf5a3b24546d8a6bd93d553c384c820d4b7d36d9e5a2f5509c8101fbe760a2f7a9f4e8e3f88dc5b12ba0f8e8e1af5187ad651dbe80fbf673ecc0d12e6ab78fa9733b47bb00b3fd1d5367c2be99fd9e2b7e70fd16382e5afb2b6cc038aea3a31bc56cc11a1e5b0987bfecdba780eb489b442d9ea18cb8b619fb3e7727e72f60c3dca660bc8bba2814eaa65029ee2f57ab17830798e7543d68a6654ec6704b49c12c73bf7ace639c3003c64e7ee12f16ee735f6b707f514df0ad0f63f5a9af7f024763f5dc77e1b82377ff02f6bc3c067bf0e3898201825b2a386ea9205f5e7ec3eeaf50ad1aa98c7a54c8822aeccea19fca9d6f17f6a099fb1407935127c0802d5408a37e535f6604f886306082603b6b9cdb2db44c47f8be5fb75a7fbb9755698440431418f4391c7fd1dfee9db1f5acf001cb980f17e907c1313f6f680c88e3226585ae5ba5943dea2637d4c6a25d744fff3916b6b7976b18709419e6b99d1d9d56ad1fb8e3a2e1ef47978c16e18e8b9487d6b75767aa803d12f61b8cfeec47a552a99ea37d8fbf62bef516465fca6c9ebefc993a354f5fd2a68e084fbf6fc479b55abdeb65646454c8a8909199a1a1990169a6915e0b3436a618ed220d5f845701fc9a6fdd60f1007800843d2cfe078c35857a3b72cc6ea68a8f7601660aa3dfb3a78ef7321fa3e2e9539affbe82df576ba5a1f9efdb8bb1e2fbfea3f97e05c8a36968c29d185fe85e0c15a81e34cf3ccaabc1802e5dbae4996fcf01302a409a67c0560d140b308fface1ced58803d2c7e46cf086f63ded00e840182653ee6e9db842d15e60824d47c4dd80116ef8feadee54260d6d3efeb4160995678fda67ecce36781ed624c485dac35dc1ec684013bbf80e02897b4403d1083adfaada7b1180c045b35b421c2d73c006826308ac1a68782fded3b9045e7d07f50722efa3b74ee205f578d11409c019a6fc8428689e19145a62f025803b6401bf3863eab09297bfa94d27eca94c6fd50e1333c00919032832cb52465e614534285a41c71f29226c87c3424c89020f70042a6d80a9430a4070f860fccd061cbee96dd56f67c49dd1fcb981358ce6c4ccaa48fa44570dce7c846d5d4c8e0484d48132260f22925080210cc41f90210183785d881101d92830e866a3741f9020f503cc1383b3b5ada5c20850c42d7ca0fb52030176a8e40252d91c128d54db0a7938e3cf104cc85991e7c5366dd151b9888794088024815245bd5800351d73c5003a617214722800122c4034b2e04891630c11c6c262d3039040a2a46300777a8c0da0a0b74688239b8d304e366ba09ce2e410465618239286946cc605b12488cf8c10fcc417ff202198cfba2189d4088a69b60436972d40473b02606306e05cdf7f50a118238cd408339b8a305e6824b7a04b7bd70d16b5e4e79e9c5bd7950f0885f5880b924754088f7abc161c99fd9bf9af952d662a2c0e3f713d45185b2fc727f353aaa50c61d5e1292e0f1fbfafbaf4a6288f0d83de11c3334444c0c100632c2dd9391bf466bd43de5277f75111725d9bfe3e2833b4cd43d61a318a0eec9a87b1a6380b2c700c5047531444cdbc72c652fc135babc9ad20fd8f86597ff3f6046d95b296c4fffc6f6614f1319dea07f4321af6728fb6f1e50f6ed67def80c639462b0d830a61663042321081e5d314626688fe6afd1328dde8ff7e394fdbd251e9325f0e8c2b14bfe92de8fabbb3f5c028f962977485df47ebc9f3006d87ff47e688cd512842c57d2269ac4600d6b32df27168771e4b1196bc9805b3756983733eadbf0850e98763ae09d1b3b2f62338adaea69852030b171430b0de8095c33c3f232b94097d63929d4d080b10c86ce71bc7a9abee328990c4714d1ea8996574d2ecadac87f06cf8c26e17cea95e5007b8d9158cc5573d195574a3082108109bb4cf306081ec5d84bae9662489c9ae27497c849d274e4f81cff4c396034136b47bbf809e7e70aa1c8a8548d3656410cd18c0804000400d314000020100c88844291382c20d444457d14000c78a44278581a4ab3288951180519848c31c4100000010240666a68462000ebe665e0446cf23d2e6a1efaec586a783271d83f00dade271851b2f580454f01a83dbd6d525bf8e45cac0428285b2ab7f5817d5dee8ce122ea3da2235c7cc6f6bb3eeeb5359514573c6631503fe45d8cbd014f192ef29e619e5799cc06850e93611d4cc197d7e42ae7a825684d77d2fe9ea1b18d7f53a14d0f7389a2018c500690bea0100159fb08d2429f1752c87ee67c5d87ed6c9bf888f97b45e6757e722ddc0f48034ee98b8b9ab7b6c651c1b126784300ffb624bb942dd07a560beb42fbaf0feed19a21a984359725207d89aeaf6e539e2c1f16dd4377c0a554bce3678ef593c0bf72c2e687961804101d5f06818d215e89b0d989346880d8cfdf222270db68d64aea144d9b56df0a022da91f75b5551b87e70037fe2922528c2654d68e05e6f9f26fe414ad8e7452f4e85a5334e803fabab7849c811d952a28908c6f9fe690d1686c827078c66d3088089d86cc7357d20a8ec4d1bd2ea873e3feadcf63fb2d82d15247c97b20e3dbc317af574269ecc28a3d1fe448bd2793027a0f593ec8cf1cf8ddcb87cf3cb3d4585422258fb79437f92ed722401bde5bd5b89cce02d39d553241215a4f66ff2a17f200dbbadcea1147db25ac2987b12dd88356102682bd91e8845bc27a6cf946384c34acdab58decee3490052103d1486c0303a15532b2747f189e42c69c07395645ef81258fd4a485a57abd462dc8551c130e37f7a0505e28978d0047f28842d2b233aa933cdc19ddf3d929e212e21b67283f0fda623a3b327746df6ea2faf529c752d102fa0bdd2a5e0c518f895039daf762a686efd3ee2856ddcb9183d2f57b17eaa7a492627fdaa743fbdd88e825f776224baebd683047c313b4eae69e04b2f9a972818c203d46a1d424e958f8e05339ce076ba9ca4638e08208a1fa9ccf31f9493468080a0d38c0ac8f167e891c8434c954948582e5df27185d054c8e5d04149d40c8007eb62ae947d0c8aba090b1098233516d791c197ee91dd636ef5c099b29f74a999da9a11b5c2004160598035815c29b652d709f92a324776fca477ea1810c2cdff3cab309e8bdca1620b038df73c1ff7516ab9520a8636f4f9203256e5c8d06e1ef1966cf6dbcea227956f692ace43b01f64e48ddb99f12b86b021bce6dbb5d146daa7c54b74b68b9957f6008275b05df108102c879a6b3a276edefe8ae41d5ea1d011c0a59a1c05f5046ae9d9bee2a8abc4970747c502a8d753a3487acfa8225e11bca47040f1fd30401a2fc8c3cd3796c06119ce506b23303cdeaeb410ae3a8b533e0d6696e848fe26b07bb0549b19ed6b66546458abd1f80557ff19bef01e55e12f9564dbd7a0d711c4d2b60e9915cd613f0b6994cd865b79a779fce211cf22ce02505b87ee0d382b11dac45c29bf7cd35713db9a1e536807a6627435934b174e5181c800f86ebad433a18b2dc423a2198ddcc70d73d7db99e17cc9000e78b780c4cd45a4b75587db4765ba7071d8ab147cfb7edd3b7c38515cdd0f200198dc8e31c0cc0b60230b23ebf3687a48c4bdeec73569e083a95446926b8c34711e0792cacffcd32d7f9ec7465709d6130d46996bcb7d908ddd9fbb70248f3ca3a70cf35a80b05c004fdff769d803c509bb0f7c8348329c34b107d681800aff2721e9a29148a1766e93cbfce0f422c667a8dbf865747c8231bd04c903de87f2bb0872d2b0407fcba625d5d39beb55bc53cd1f084c8b9117dd6d961b613f0c5d93a24eee60de1caee5a0bb600ad13b592fe76495d5d1f927461e40a4049fe9512b538012c8c3d2224db4e49b655db12df9a1346208acab710d90dc1c64e4fe97516221ebb69de0948995684705fc978a95a4cd3e38d4d0f77460e96ebc6e4e68bc6d059f00bbcb100c4189dcd7e271974cc7eb38ce8232218a53b24bbe8cc71b63148255ab50cf403482642dfe4b9f5229a765ecbcbf639fc02de0b67b69742a40c918b1c60420475e5b313fc1b7ccf9cd206f30eecdfeae2112e63191b56dd30f80acb282284210835453926e0d40660ea3e2046bc5258288c9ac00158d7fa4237c6e32986b896d34ed52ebf39628c29829cdf3216e22c5b0a19d6f73294a3bcd0e284b4d0717749f7c8b7f2a115252d364b3647e212ae6875c75d2252e8040b0a90c1019ec7e31acaabae7391f0c1ddae55aac98c87f6e654875ee9cf9c416ca8470676eb8a1ce5b9a6a0f1360a5e016b9fe26e590cb2459f0b805bd32699771e20ebe96de32dbe41c1c649893278fe66ae129a8c38fa26436169859660077957d51d54b6118aed293aabe4afe11d5b19c114ba51ddfa22d350fc26d04e1bc49269f8225f8f3869242b9c4f9bd9d559118f3cd899face4fd1c5dc92e233461ec50a34a5eaed4c9e7c208815ddbc51706e2d59217f16572074c56c3fa1a2eaada005ca5961a019620f2895e2ab443f96ad59ff2a45f8141d16683cf3828ab256af1e1ba0dd01531fe3bdf8821b067b1dfb6d0741b8278fa8f1648690d0879b8b9af842864ce54deaa381eafde5f228674afaa3126162a0abab4e075e505095567c50b3cee74451f580e8779a5c59a3fa35102559bd927d183473f9cea4366158eb729b13966a11f0fc4e016bd3926fb31b02dac8554ba17d8879ffdb93ee9f1728a9a621506d9540d95bcf912091000489c70cd37f740dd23f12a3c70dff3f15521ab11464553d2a64cddbc8394976bbe47a4a1b39e3bb4981a7cb7dd7ae3e3bd90651d0453e80df5e61c4c22d6c70a679186914d3c8a272728a714cc0ac129b0b47297b032bc2cac3ca0ce4db12dbb985aa2f702dedb8fabde69bf6fff69bd0a825ed218a58e82a65c0b38e1726e0fb4ce9f6c8751a2822b843e2868c7e664732b57295ea289dcc366ac064c09c616ec240f6c3192f7d6bf7652b42d990dda274624e4c82e2fcc41b5b1c7736bd673de0e3d647229275234e26d757d3d6a347c25d4774b4b1035421a476bda84197a9df7bf97893d13ea5501ae7183e3817147c905641211174083b45a8c927032b6b63ff4f7ca38ef98ee7e431986fc33b3a5bff1f6550f3e26fbfe44bed82a61c35993e7d1eb5597ab201395ebbbd5723ab69d13513314e8455a4ff740712f18357100075417eb137b7f67ec7a93422b49ce18e0fdd0d6e393d8c4d7d5895123d769f1337bdbf8ca0f70e26eb0d625f8ea1387c8db2ebbe50928cbfdbe116718b81a25ce74d48e20e0dfcd6130a06d4c6886527ea3de5fb75a69c91d3255e49b6c770989abbb3bfaaaf6f18473493f7d61ed3a074c1245db5e937b774cec8b43cc3b3cd3bde6701e425eb7bff99e071bcf5818ad8b07bdaa55c28886617c1b7df2e36130c2bf9fdcf5b4ddee0727e06c73f2bf7003a13acd924fe835437b13b46189f7e7118ab3c911dae6415b20e6b9c6c16b093ae9b86882206ddea4f535b925b84d90a87a344d7bee49121358d3ae319e69aef4eae9e72769b1e7a32f2be4c94e6757d24b88dd6749fab3c8d18cf80109967e09a3436441c439aeb2f42273622a30270fed77c3d7316665a701f7ef69a89f521e2e2872d1972f3acc7d4ea2bb598e1e616a829ff84166b971b0911be397f7d91e480b8b14f02faa0aaa81f56acff0fac3e879d9d6bb1a0b4d259412c733f666445f84bd277156b212bca195ddc2f1bc4ba4cd093a32605a4753f8d388fd32fdeec75da9a76931917c960e0666aa8bec45aa4ac2df3461971d1e0d6b9825826582cd57e9295c8aadaab65a89a438b9a6f9b24726fd8bf9b04a7da986beda48d70d1ebd2803f299b904aa09e75fcf18b7d26d3b3d80a855bf35383bf195876018183d9667d49b76fe0d311ec33482b4c489af6907d5cc33d94b92987de2f3018f2272de368a489f0adf74a5cb8ea43c81fe849f2f973792dfcb741eef1e37edb0fe19fe2d45f3e0fdfc20b755a83d9238346ecd9f8937f9963b17a06ae164b3abcef8122e0dd853ce65548605d2605dd1f648378e2fc4438a27d49851ec38803e7d05ce3816f5380ca0c544137e20b0b0c2c7c8ba24c0c6fa5ea22a4ccc3d4582425580eea79a69850cd4bd629e1a208319e338ccb0fcad8a2cebd56889d615368705cffbef86cb900488c9492f1b9d9283e1f62747021db74e273005563c0c8544ee67b706368b4ea69bf6022f331096831ef985e304ac9b213a73167f54ca6a7c72d1f3b6e3157007ec9e4117560ff5ba75030db64247fd9c799ac43b19eaa7bfa31bac931712061d94c4edf4a9cf410b331fbcc218a1a7bf58e4052744d70ac416c2ee4322cc87277cf8644306c4c4a16318570d42f4a596e74b06c5ca9b74ed1febaa6c847b120c168408cd76ad615a781e14db87e3db94901985bb820b01e1fb90ad184cf0e415f430456f3be8abe37ce4659d412251c949a6ffd0a845f67ec5a872fd522b5f2667d00352692ef9a48d0a732129b4896fd67355d75fd918ecb57342407e2d4c3d694a7f61a00c11b4d1080bf9ce5399946e86f846ac7284e8c599e0371b09eb1eec3906efd5fdf80d5192568e2d26044df6e0601fe0f83a47ad505c9f1f30489f8523b38472fe2d33fc7be2f4524bb07dd542aa85a8481ff6e8e5a255f401e1be0ba7786f87d38a9fd9e8e4d5c22f24eddb129118a60fa07493ac5e01bc4ca8b511c5823937fb6a6dfe0834b338256f5fc9563d5bb73eb20670083189c744c130dd47ce53c122272833865990fa16e8819a166443b1093dae859164bee73d338c6b605fd1424722a632c37f59f62d04b30ca7436cd292e6f281d8dd0e2dd2d8bcb02e6de83b942097302239f0ba4b2e04e58e1196c60b2dbed24ba088dc073c59ffcac25821912fff94142767a916b48425f7e141132d11c129b481eec022b5124db73927986d13609a9f05b4424606f68ef911617cd17e181efee72f49eae511379ba3748ea918b9d971b0cb85e4c6f0b7bd1764c504d737aace1af9f153791a6afade8c4586d1e30ebcc797b628349400139a3fdadc43cc1b905245684df043c7b1f29cc8da21f03c4e11e5fc63745bd5d24b67c1f87b58b090205127adc78908c555a9f7cb72743d939ae9611c912a7e54795c971f39f903870a825f1c2997f5ae000988bb8a1f78882d5baddd7e6688111affb1ce759ae5d7db86d3519cb6920dad8ea66d41651ff44421ab76f6a8f97a43a739c792f009c89739c050489244b43e59e25620de7ce00d4ac6573d15779f283f8a36c18ae48184394e28d2271dbf27bb124776c07879dc7bf71a440cf75c7fb18a0ca24f3342daf994e973a2825375b732421e3879270e45a1aa9b5168eda945953ddab0e66989aa135345b9bdc53df7d0ea84ea798f92f57d007069a6df186bc24418dbed596d1b19483975a9ccfcb8f262b0cf519ec6611c5a411a2fa2a6e228c53e3443db6df4dbe7d79174ac7a6d519425ad7dffa8aff5982494339a75bd9c18d589ad24f1955ce1d67a29a388099a8f8b15929bff0d93cc31552cdca6bdee050972211b9d88c6631630c4cdd51171d46aa6b86ece4dbbeb48ab5a7876a7b2a5be28de1fd0cecf7b077f1d8fc9ce2c83d640a49212687ca89d9240551f235a6ece0848e5cb7229594b53791c47e5d73e686e219b2d6467a1ea85c02acef46c745d3402db354055ca95a13c511aa0e7ad8f517cdf8730db68df23cc991e27800041ce890351f0d5969f851f773676d582048235416769f0caf72353c65c1f58f793e7025a805ed531538cc98831c72c26835e2e810f20d836ce39f165b71aea137863d99c3e73ac81c7868104e84a88570514d98360edea3dc3b5246cf91a2faf0b328a2193b7dfc650949a378e50a2b809c5971cbb18acd79c8d98e77508cb3fe17dcde9d97aaa24267a62ea3060fc7ddcaa08c876c02c1c826ac42871d7193c122869431fc0b59e0edb911b58402f10351d396d14d739bffffa2a56b377419f95d41f280c330f5fb33d06bcfa9b58aab62806524dd217a4f243a97ad4ee9274e87d04fadc41c0c477e9f9ff780a40f563d297267ad58a6fb651fe721ae5df3b4f06b323a7e2528d4090fb6c187b4e6232b72dd5c1e1527f0e1bcdf73272b1f3d38af51302c7780a2f2481e8598182ecddeba1843f13a0f607311f73d2321d260672bef3fa1726eea02f22c7861bdebb4600904a57f6339cf602647481c2d7724a8faf59be7788f2bd54c903a70c1e2f30c07b72ea14a28e9e2b852b9a993e58651952448def33c4226d288f2d7c885be765ea857a145c705a9048822b8f8db8242766a3100f0593f07adc38212000e68cf2c4e17d6ace956c0e5f7e4fc6296ab1503471a6b455791b1dbd391b2541fe6c46ef2bf8b8448f62898792e55778d28fb66bf2128ae9cb07f5cc796f3bb370482117bce36381c5c18b11a8dbc12d5013602f8f4da83ac33365f0429dec6cc193e0bb24f9867a00b9a2fa8f83db099233d0b44a7eed09854eee5dffd73f15e7c5c6f64dc9dd4b798e355902edfb78529186ca447db98f543edfd1a3ce99057aab104e5f3d2b70512fa4345c706874325f04ae7427f758ef43eda92e33ec5925e52644fd9d110e34945fd02b14e9275a1640f0b215414598128552310eeaa903c7a099e8547acd728e16e457f7689cd3f5ea981cf5bed5e5035566c30f1d14d55ced075faa6bd25d44d878fb86f0614790926ab33060f331f95ca0b0fd51e88a78adfc6d5556fc6c8000dc56cfecaa6ef4256b745f610382f5edceaa5b668cc7310988e64f076d98a1d496b675885250c24afd9fa034117d6351006211910980180154efef8abdd971dddaf3245ec1cfbe59925f67ad7644919b71bc25a7dbb03f490dcb26d0f42c9d0ea5c39aaf5889b265ca45cbd5cb0d258ad26ec1766018e62d8ee30c48f62522fee4d008065e7498da07a5b14695b1e2e36fdaa254e8949cd900bd3fcf6fd64b5c00807f1db6e67eea235a553a10fcdbdf5ddff86cdb5b7ac7ac3da409aa4ff06f4f6cf2e2184d6fb4f7dc2429dbda0a9fea2aa921a16e475cad40276bb9b11f0a0d74de645bd8d37b74611143b99545717d5965c3857da467d7e59da7d1d8251061d21b70c006063623f494d017ebee2d6d2feaba2c24562a0856da5f2c5436e4332b4906acaa8731b42329d49c5a96997d582ad4776ab835e4192348c7a56a5287a54f4a98e8f08be228cd228626a7054af6d1a8fd98a4a81b7cf350e8103d2c7e9a191642b00748abddcd19e760bb54a17d7e9ec251ecd8f1e9b0f17ed1f15fb2840ab19b2c099b557e0afeee731cf5c779014a2f1b7065be32a0ae10e3bc51207d11ffcd3045d9fa7c83733a0176e288d0ed7c8f4040052450a9d953f0a56e6f5d394d9bedaf203d0c1b32d9d072799319dd51f69515667bb08057567a50b716c2c61c03f2c88ade2ad640fc080f11795dc8205b07ee33f33a94324a2b409e42848e774ef38110362dc6c40b0c9b2b0c8e451518c0be1f357127eae9e7ebff1e95a4e3828f6f0f569630c3a1c2ed746a3aebcf98cff4700bfb747d154d889689c38dc4f44939abdf096e2839ba633fb5b115abba4571bf08f630d02eede69e8fa0719a75cef2b640bf0d9fbfc2134b9e0a4a12b008140856693ffe5b40bc7400033e06cbe14a5cd917399a7a97ff17a2298c4acd888ee5eef178e8e98d0661939a2555ad1c84399386b126ba084b3680fa0b8f26308b339f46a26f288c13120c969beb3462dfd6a39203cabab92af58ad3ca75f071208409df02fde48ab905aede74db1130bb0b4900b315ce0120e2fa5d1b085c46473fb3b81fcb16fa8a73d5b68d18ebc6d1f8d38a065f14257ccdb73adac21e8874f32c0825f03d0257e38b75ea752d53606461552d80b6340be4ef2e27969d096193c6f8aa000866227da48ec95f822b214b41dee3fc0582060c50beb32d947831df6810e07d1f5e4fcaa42c4b6d2a345d219fd95de876f5a945d6f4f5811aa2440cd935a0947c020f26832c7cc64022ca1944bfd4e5ea7a9651885d1a08421eb45b84e44be5cdd3cec898f17bcd90593fa695e1af369d0919988ce79737d5b281139c00f3bf8cd65cd7ff598b1528d6c05b244af0e03782a05357a49e35c0d32befc22068df0e650809a4a341735144cacfb819a35417535dea380c79d37597531ae8aca9090830d289f0901be3860539484618ae5f7763ac42c9a79ccacb66c2ebc4413dd6b5567397712f2fb99bf4b8a584a3fc71e0151b6c91a7b9414e0969f8d2971c4590d53a294687ad5cf3ca77ba96d7bd8db31f99b4ca1f8cd99bed0d4504c0134b3ca2952f71d5b4bfffab342da901558fa497f4a7ffd8814f71193603a91d71b1e075b3db347f5d6e5f6829a0c871c6863dbd0582c1f4938b492621d87506b48bfc95041cc51f8a25173242a8421f932f2d5806bf451c766fdccf2541905427095154df74a53b8e714ce559a85203d597c90a074a10ad4845081731065777505a9e81ac536962bb3b3cc0b7b521c6196541ea8941930266e87d280b655d1a0334cc4a3013abd933b096b352dd36b20e84eedfc38d442326000e069a52b1d976ca32d6a9ea7ec2239be7fe7096ffbdc5c14018b39fdf4726225ca38e693d3ee306e1f770999363ca079f8450267c56890cdd8e75a322731ae240e59cf31a9e7b7144f25cf74381361cdcceed60643051c61cd0537e3d87a6cf229e6608662050a85bb0b49431eb8930960ea17446733ea643d0704611dfb165445751734909cdc81a565a465a47b396d576094bdfcc96329f5126203aaebf30503469a50e035595f7c9381211c482951aacf23abb93180c3ce55f075d3ae9ed4f52342128081a1c7a2e2b244a273116d300a13cb3b1ed1f47e748b3bc3282d38e3d4b80f55d3c4ba317840c2b910d51ff530fd5de9cc55e6227ff634d3d326c9494eab34b093efb1666e879d76796cbe2497793e44e08cf1e652df10aa37dc04764a5262b2b91da1d1b7df9294be9169779824678d7c8d6cd17bbf5d20c42b1ab95f5dbf2c9eb33eb1b505ed95fbd33ee6b150be589d340b9b447110cca5dae5966a00bf1d752bc85abc8a335aec4b748dded4bcd0bcc0af2b97b9b04f2df1fcfc4249613a28bfc8471e48cd3369aed4511654fa84c004408d9fb262d9f1a3e71035cf399d60f17f81e2235e68512009c60d6f57ac47f88d0bb1b7f14ead3723a810cd96418b7d7bdea590567f96e5242bda035ee18eaf68f3a6b2c35e126c1f0bcfb35e071456307a30413be1ad64970b120b9c95c7c0a6907f664a8d5758ec178e31aadd0a4c8f9658b2c2fe1ed0a85b61ce010bc8abc883d97e00da474d2c71f01dbf7def97bae95f13a34ed88dbc48e09b8afa0a677eb6d62a40dfcdbde44be1707878342b15b62ee26c3a5a9d3ebe0047a12362788d3120312c82017c5112ac3362b22ab94a52b107765560d53ca7d9f18798163b03c0863b414dce7ffa145062e7da7c8608c9c1e6784de269b07ec1bc05915502bce745205138d6050011e80d2e5eb6171289adc816fbbae6cc032dcfa629b31628e02b63eb64b12430262fc2fab0075d3616e59d9ddc84a785b2fa0d96ca65b55fff82617ac6d03e59dbbcf02c8243d1d6b24b5c65f53cc760272b02067bfae9c5139f8c633a7b8273821496af7f0f0e923831c2b7bdb047fe7fe78f9dfe498bdfd23e044fc2a717d15a0eeb1b661ea9a6b3ab589a642df510f26849edcae0a9dd5f1bc48ee4f7e719f98f19c3cc8eadf4e22bb8d8740a0ddeffc0c7f12da0371560b4fc67f767517f47c0f8d2bb94f1286990ac79996158260d8540e3cffdad4f733e5985dcb29fbdbd18c2c2f4945ba5efabd97db8a0eb1583bd78c8c0838ec243b4070a318dd777ea358b0fee0f01b4a633994c83fd1d4431919fd865763351f973028f2bebdd241a6521b7238e6bdbd1cedb054786d0c2d01f16061841507d8a04a9e86aa746e25f7d9a48e7ad77bee11c1e7c41b60a390d500a7777cd893f86a8dce5f9e64db120e921e7aa2490a90c5f0ff65fac594731eaa3caeada74d1580e83c738d233497f86a7ffb978e6f092e69dbaf5d2f4a7a384221b65bd10ccb13e738ffa628862ff307d0e7bd7ece788279a77153156122befbcb5ffa3500db565400a984a18d2517263a342caec24646c0e580415c57753ba6716b9d00c7ecff5e7ca99769bd0c6c026853c4770139c8f40a49afd931444299e3b5936bb9b2c05c917fe24c80278f83e3d02a3ed5c94e2b2907557b4464b7e038bad6566cb0e790c80ac9b8400682dc3f680d6a770837537a496dd2f2ce9126d7ed23240132a157c0704fd7e8b74a711a2861f84ada316e3ec8a4181aa3d13976651277b21881102f171aa9a26746fb3220f82a2c0bc0528e10201cae826f64efa1dc0a7d58d3357a087f410704292272af85d466742645624c55669138d682c702b7b85456b0ce0a580d920b84fe0350a5c7ae77b43b71260d5afbb3f0ecabb7f1182227cffb0faf60fe0fa0787916544502ec0080f3aa2efd6eb1c3d3d726937c978eb8ca2745622671ee3a7c62121cef44099a10482e046d30fbbf52bf748fdbe786163397573869bbce1264fa86846e3264d376ee2c0a0c966d02c3dc0a0aea7bdad16a5d51baf3238a5f794cde08eec5151fa7a5614f1c120aa27de60fb4469632bd1a607a142da4dbc0f6a5f5c577611c9d65db7a9f7e2ea1e5feaf20657b898430584f55ddce1ba343d0c369dbc45cf31be69db351535326843f8df0710de2d19c28d4ab51e8d565a4f1d38b4e1eb07901d5c936e572af46350b49f610b718ce3e073b35b044fc5cd5f32d908d6fe923deb2ff064b8a21e95badf95300ca461f356520439bd485c590b260ac78308a52a868a794f6ba1d19a56467347a4e7e2e49efb3065ecbd4158762560f8a78bd286a2cae589d7647165d55d7916b0da25ab56197b217da76fd66cd610a2c8fee56276b927018fd8feba8865a422e681bbc6e0bbaef6c58dbc10e580db40294d67e15f63960785c4b35c3f125906a69bf50fc6955266503f1783246ef697e7b7e9ca3a18ff5ef114d40683308f5529664c74c02c93e16a10494fb4e2ce3a63f05280efdea1843425d740e99a29ba560ad74ef97528bd06ca5c47f96b51f03aca5fff38193ac835d68a83e2076798ec6e3bef1db81caa3e2ef1c505fff0a05a0f1232efb09aac6d1c440c9adedc7dc044880e3a64e802e90aac199f986e857efccbca46d82e932ed85fa9f0b867aba4ce1598f0226733a1d3806f845c5d63ab3df0b3cb08b9a2369bb1c19805e9658a34e4be8c984fda307a59af33c2a36786d37f19f40d935e2ec27540103e992d5e8d4204402f7b56730cc0c5ad7cbdea75c5fb08f8bc5c01ba5dfa2543931dc0e7e518c93983945cadfdaeeedf5203cb50a700038df3728dd05329f16b8f67433c88b35ae8a1d1aa3df3f95e3d87bc4c1918af38c3abb1262f73cfc8117f81ca36fe8fd51d42a7f8eaec602c1be0f6abdbe8670661b3d2e53ac986b792c751026154f5b482143c3abd65126c7c4266e2f90d4835014b6d93394ff62d2ab2715c8acaf8f9e938e0e26cb587d0c64e3e52ac728a178eaeed04d7d351e16d325e8962cba01e7b5407b9e652e2667333f7972a704edb68062ce0d0f59af293936b74f6c21c1ff08795304dbb8e65725c734e94bda027b5b1f27e17658c3b7c544182193193cb6e5157f64120f6e104408e16708ce3c926ef8a1d2697f71a347bc0c17a7f6e1c80761285c77078b3b6f534cc37744d2cc49f7ef1726ec4bd277957b87682f0649559b196cdceee9c92a3252f2ee5b11d48a18e83fb096211629aa0fb1f413503f9686e9e09a5ecca4cfdc1904d22fad2630e94964b6e8d9a3a2ce41498d4bcb0aee3d2a5b75910700ce718b46c99284f3b1d27b88e4b08525916bab874025f17dca841c642a8175796b603b56bb4acc94148322de9f807818169d37451837639704a81b1bb3900678ef07d3aed355bb73d5fb4c48304e231a20613a2d5c13361afd2601cda023e88928443819d16f122b456f7274e578d71a51259f0a285bca56634842e18f4370a3f4c42e51034da59e2dacd48e7073f0d5c7e39b61b12a1da7d318cefed375af90ebb56f8396afa201410d5b84612e3facbae4d367a841c54b4381264bb0628df383d9024d46c9f621c5a305b1252c80657ad2bfc3d7936054e19c3d27fd1480360f89db7a940ebd170d0a90aba53469383596c3343fe2821188b6623331aedb625461d630d5800cfc7c01c570699e75e00fb784ebfe355a4c7c7ad2eed68f52880ea5fa1dcb1edadfec10a939da3d6f2d537b5a016b76a59ecac1c0d8e0c7885ce2120d400ef9e955048bede0bdfcf211898e65bee4956bcba2299e18d58d2a0a60e75bc435d779054fdc6a4e59f790144dce41de521552ac286f24cd3aa6f26babac4b7bea29383a294321e9682772c4731945bc78ee2e4e7b53cbe01830d8a508fe9306aea68a7a486be5b122dbb42726428239e30bb8c15694bb15f4330202ad7c15ef75780cc13da6a6c3db7340f2805ab4d0d3295cd245ddc606fe64f9ff10b223ad7b699054d0511e6eafb6325012b25e6f3d4fb6fc3c035619a701e9f4a7fed5b2842078e3a28e329848a930d36295e1a252c9b052cb029577e145cf9ccb62717dee7f0211b5dfe330037f399f04a06723c9ee927ab2d999fee278180e620d9de4ae1482f585df8ff57f375a3bd72c9a624fdfcb07aa58791b92ad49693163cbe4196b74021812582569e5317fe5528ae404f85fb0195a8868f56e0182eefe9f2007fc9075f80d2551d1c5c2846302710761163532022a94e9ca8c9fa3375c0bcccdabf62c6cb063333aabd23f087273748b54ed70b3a5859935b22334b9aad75d470d7d1659db552679bd407fb31191c7dfad636daf6498c3b200b2f024376d5f45f6f7cdf5289719cc686af69692a299eb5875326918f96aa62f82da6f6d5d029f990fde936e84512ba3a040a023592c5f0d1dfa8996dede511ef65516c6d086674c2b0def0ad48249567e4d969e88c6dff218562ce34da261f091ad2eb8fec4d733ca3c4a087224f60ca91b61eb0372686de3af2b3c13e8db39e98539c243a8f4456f57c4a580441e221b080d066c62face9b1667b8962d1362ab149d2f165cf83f9de1cd2928bbe5475afd8ca1d2d915a83d87cd40fba74a42104b07e38f749ad61df8c6fd611a092733f9999db20722a9f57c141e483800e3c9c565b92eafa56fbd78ccf406a62c0c2024e3a1fde7355cd720ca859e933178c89c58b017acafdeec82ba012800d1ecbf7013ac7e067c68f568c27a6950e005505976e989d9b893804951800076fdcfaa3f376c4e211bc8e7edea121cd6086e52be79f2c5445bcbb6cf75194057554ec5296adc26c4aba4a95758ebe0573719e4afdcf058583561e331f8f376b1c6fd5763678dfb612e6d77a4106426e818136106ff5057abd2e3c6ed5386b5bf2fb3255b01edd9f244da1cd2031db335c019fde1d4d393b13600b2c7af62f0f596d673168c17ad77820b3bfd83a7fcbd4022ab49405f122998b8d6b995b485837cf659d3b807a09230e6a874016a8d3b1ce0ce99d28e7e1a693d6e1dfe9e97b7db0948bae1843e0799365654ed549bfa1226c58eca0b95c0abb8b97ce54b76d6d5051d74a1d25b35b708ac9c37ceab3d4711a1150892f9799845e0893c7ef583ff586c1c62d1cee85dcd6e26f17300121931f20337c9e4130dbe5285a674a736249304c010220bdb69a8fd822aa739c7d5613874f50645275eef160f3ac9741fa1e1f5fa16478553efdca7b292a88225b661cb8cbb0afad2768ee536ed52e0b5a11cc57aaaa24e6313502ac4f6d395af1116e786d1f1e3ac00366ddbd696e217d7947018f9608b21ecaff57a3b93487b8bd4b94b696183215d3f0eee8833d1b525b04305cce926a55b352a489929641ac2eb73f74124683f291f20217523c3130d40b3e8b9ffec42a91e7f41cf42db71fcad196b3305db0bab15cf51f4d33e743179698b17b3fbee906defdbfa18141a70f269d560b683e39d18f17d1afeac060b63e1496632d353779b3fd69d4d2ae62ab66495aac995afbe604c515fc6b0d3d0bfeb63f070a0a74b02157272cb5169a636acde43a81a30931716d26932b856b56d8d69f6d21617294d1fcca760c7cc04202f723b1c70676dea9a8305f5394c8485172dca2fc340a2324b4e03a5a7c7573628f55197f73e6257ed4ff0a0485d5a6b86b0b2100ce18f8a2ebe6d9add354945307dbe3056473b8a3521208840e68c4373d2c1800014344b83bd61a4c204c5bd36241d314f1d8fffa7a07475ecc275a923f6003a28721252409e20719724be63dc270129ccc0c6f81b0f1254bbfa88acd7eee2d337fda7d6481b6a61150aa49e305d873dda2bf6ad9811bf266024ebbc58eca404e2649cc68d6124ef5ca95a3d1e03a1f0d76446dc114411a228c9eeae2a2f231d46394059891d0e9e2e468ac816f7bb1fbb665d3d416d382f1123a0affcd35c19ad7636fd748172013e5a219243562dc4a8f8e48d41a5e9be0e8c6211879a474c795aec39505d6e3171cdd21fbec8f0e32745987d26a96e3a550f5ff91c11dd85b44b0340b18a81e51518a27372436a84e93cd80bba2df37cc824be62f05cefdc94071405356bc2001f9d2b277f4155dc5019c029d7e78ff5f307cc7241a0cd658bb68a6ab7f961423696457d39cca7124f6fe435f43edf4b9b7a1637bad167799fa1b14064aca735717c4b4048e31e9c5a29be950f6f9fbd84bce80efe7c11e626cb9ec4408f0dd4d08ea3ea6ccfea17210bfb9ed62cca3ee66e48365bb92339ae22983be17f74c5ecff37950cfb3e1f3e0ad32fd5892a881013a640189c2f7efa7bde3ecaab7dc7e43e21de7f664bc9900cf99e158b79f7c84ca612a51a46e809029125bb7cba52186f2dcaffae2c0db1e85e65ebfb975c7085144fe1ba7117db36040586b351ca7a826ff905f207feabad8879f699afcfa4780769e566f11e5732b8fe7e525c43a3ea17d54e89707e2b16d2d5463b14e9d4805d393f2ed1a054ca7166452a45713f98b0283afbcf7321dd67325edfb78d1eb8cbc578a462c28081a6630493b000416f35491c8325e6e3b964231f785048cf8b19ee674c2994fcfe9289f65011b654913571ff25af79d145e8d37d2969a8059f5e8e9a97e8b9fcc9c7eeebcc00aab0586d4dc50d80d8d67d39c72c9fbe07c82af97aa068c6e57c84ee55d982e04d94e2f5f6204dc9b692a9c2b4bcd81c18c98ee44b4b67432252dd0dad6d3335fe6b985c3e171a2d485c13ca62adafc4bd0397f4abdecddafcfc640cc1aab69a6b9bdfeaab33254a012389564f36502cd68212c503e4849d5caf6ac72973b83b3fc9ba06be805cb72f3708367d9e80079c2ee591476a6d216b0518c4705b5f5201019edb77011ee16b4aa65335d3baa6283bd63e047aa78f77e41296aac56236bd4692d2f51f6b7a0a564790faedd74a44afeaf79fd9bd13de212cf910a6073fde9b2b73a3916b66473cc5e0264509b0809954363cfd552b5020bb0fd65b77b07afefe2dab30c03385a8e6b28136060006f646e4fa6ea2d113e084d7bc2003bf97815ad60715b3376572c437a34b32819a4d7d001645f07c56e6d61a00e529861cee6bfaf6f73c8ddadb1068cd8ec255d6eb94ed92acbf961059c6230d45b0699e9a085d18941db1b6d3c75c274cd56550eeb830ab9eef635acfed51b7ff1e29c47e0016d18032470b8e78f975b136f3b792b3df962a3c1996e28a601eb02c69b436393e5c3fb2807871cd3038c67400d5a976285ccc82cdcb0352b5e5e26722a4901cb931d3a9302e858bddee250518e6ec1d1440e8e9761968c5359a1e046a58e2d49d5b925f7c56a346d11ae9135a851bfd6ea2e09c69a1802a159aad09783dad565b8615b7f7e6d881b09c0a474836c7f56789a882440b6252f348d21e2a8c13c774e4c820e552080ae8b98d97bae180bccad8d11656192c52115c7900263a763362c81fb409a8fa2d89d504e0c254091632a5a42345466838958de497cfc4309177274f5e5a60438b1bd7f94434d709cdc21170df7d4ba0435b62285371c1ce7fd6835ba5043ed4a013a574d61eec8045542794fcbc079c165a739f1e5e47f9faae0691e8d7e9be4740a1366586221f7fb7beba61cb0fded34812d63d0efda82511c2ce2b3ddf8aea84a1969aa02d4c0cfa5caa0e66a8ee715fa51e866ab9c95abd83979b93665e3f22f13c708dcebaab03384d89e2dd574ac965951c7e1415bdbeb7ef85de9b7641ef0e0f351410666f703c1a51b54529f9e5f54ea7b4bd42ce2a7c80175d8b425487d0a5f28a28de25d70ebf268480319b12e92fbbb6f2fab9bf1ecdf0067341de42eb1407922bc6910f74d943b7d3735b551ae8bf2b83f07642da73d4247e266943c60064e60b8a21e4e94b9b022cf1d8596dc2387f5acf21d0c93c5985e0ec739f6d1c896f96de8abb6dc858d858e60cdb6bed0a9af35722fa030953fbeaa63f7351f7b5fe66878613c1d7cc9e0efebe56b5f24b5f6f51e472b4c077e08e42e4061a8f55b135a5b5fe75fb4b0c88dd48f0ecff8649d575d4b33e44b35f5b6fff408bc391f170827cbc92b185418ae796401840775b00de39bdb0280cb9857a848d73aa3a85d4799757bf6ff72291ff1fb9d1e29984f8454c5c396d5b465491ee32e74ae65e9f81570321d7e6687e291fecdf7af5b87f687870d940a28adc23af0e944c2cba54006bd15948746e2e1b770f2578a9501b9175509e23c6e37acd288e7c73f8d59e96a3e9e7804c7343892fabcf9ce97ee7423bf142ef2db4a14143a88d94dc873cf36470052a3575114b9d1bc372bf58f7af959f055291f5c4e07df9dd4226a57d24f7d7667defb8f562be1a1e17b14ea5102e2c082b2894f5c3699989270128bf2e0bfbec2d8a9aebd2ca9fccd0f682ceb7283d98497706d738eb3f8cfa5d4482bffd24e53961e579734c3b0863bfa959a81ea556ed176c1ebc5372ab6cc0ad1314e174780c87ce1348d8b33604e2864902771b06e771f76f3a01834c14673e951a56d2712c067a161cd9a1ed80b500aad41deb129a4afa19f2944700b1ee7d4c203a21c7526ad9abe307571d5b802e3960a455e9099152e167afa28b6edf9d2d18e04cea3c6d3b2648988134fe3fd4fb92581524648b410b5fff8c642c01e1ab5f8f5729c5d4c64bfbfd9b0215271c19ba86b3162afa018d4671cc4505ad23011f061d874596bdcf98bdedc3389887c9bbd44c0b01aa877f46c4ca860301fd717c2e8045014a8cf1ba555ad873d085bb11429ccb40add6580b7cb0ee8ac6a6e5303d42cac99a4ca9857c5774c209441ceea1a469b82beda36ccfcab7aeb8972bec34df4e6fa95081ec872d49f1024db5f7f3ba3585dfed2b0db2f11741e7dac86d564509952eb86c9cc230937be72793f3a3b4d404c3f9be632dd8da8a1a30f01977f9fd472f27192d0dca31ec0b0f31e539b0469782edf927e9e25d0088880c59f39388ea960b8ebb0264111c4eff602dd3860191562f93cc5677e380e3c0f03873fc9ce77d3c50571f76b675199d6b5713a97f9100decd588042be81be77969fe08e6b218f67a29b7ed1268b08c35a80c884e4e078d9b123abdf0fccd683dcc0950ccb21e3e25d36a97e7e2cc13476542cd7922076d239e694442d14fd5a87c9284194d74571477327b3711f7946f169e6a26a41eb4cd524166c73281d72ecb38166ecfd0bcf0ca485e30cb66a954fb081f7d36af4bbe1da022b31625543f73f58925ddb347a57ae7a2efdea877b5a083c404c6bceba6f676d76fa5dc008555e00ec0349abf1f90380453acc6f1e5658efedb8483a3aa184c50deb8276ce7ee02cb312b05ce863b6950ac7c398a41b5e609b11900666d1964b6ebef81bd1cf7b725ef9468a7a5364618f5fc99daf0982b8e5f93a1d90a3bc0a6f3ac57035aec70b97df1d81b6de19173b2a8aa715806d023c7f49cd8e135d90726a943b34951d336851f224b27c37ac0c6ed80182fe35f96c1ce5b0eb2c11f9910e1c88f29db44f61a7ff8a7c8a372f416bc100cb8f061e7650f0171458a85ed1a82d8469b08398322b916700dacab77abedd4ccd1a42518b062a53ec26c6c168d000218201b79ec24e6a2b33536910126417e11a1d299ffc692c40ca5572ab286e6af1ddc2d80a8350d94a4b44869fd681372f124adf8c3d9c6142d4a5964f082e3db8e8fc9ab81c65f5ea42aac52b6def9c1b92d45fc094dd87a00cf381af9eb7f0be6cfedb053bc515128a1b441770ec9e2ca908b0a99dfe70509abf9ef0f8084ee622c48445c7140cceb08472de10ec97cb1afe1b94c0c1d2e698939e2f3db372d0dcca67b0e723f0b0b4bd3b8be2e1e4bd25b97c4b2e39565b7c067b5a88dfa24b7080f4e26676f85f6dcdae2e1802f1f3347c42a113df0ae7812c1c811e888529245adeff6968b203d50086bef6f905afaf4070e23e91c21670bd47a25291886d16a7a918fab4e6821671149b153941ef252f9bc8391d3c6efe09c9e7c3b64a0e3e02325e07ff2f00e336ae0f613eae90111478943248ffdc7c9f467226827ed44b226ee99b47948c409bb370159a3a8aa2e7ce3587ee38ed1731cee9420eef1ebfe22d8be72603028e34a72572d01edb722f509e8cc81b2756aece90b0728604a5a3f6c1744761f28ba3301693c20cd2790a2cab6715817e86e763caf30e2fbd0db37378d61486a113989a441988244cc45d1eb8d5c34fa2096e4b95e31518370262b37069c62bc51e64955e4eaaaba4a373e2b8b0bb59c73428e56513356dd89734e525bbf05de22872594441be4deb7c955ae72cfb9c449f58b52a7673a9ce49dc50c2515bfcd33563da05e4e419786dad16a4bef85be143adf5e12f4b784c3ab8faf6db9d7be27c130c43bb8fa3bf6cd9f0c9fe25bca0128782555b17e481c63c816ebd4c4069b8c5dacda83fd41633c5facfe8cb14da760c56a0ecc67ef8809fa8a425ea9335b4bdaf407ec261801025105326f093faf8b63451dd4c4580616eba55bfd71476614ab3ace94260f1f74ab5fab42d87c1bba8ae0c23e4e1f5db3b0e2ec99fee82b80e64aa4fd37ba81bb916b53d90fad1a7c2637b3862f6b4010ae22cd9807af1ef9e4c76709d8f250e02bcd55cf5f234a7090dcbf0ed2361353bd9f5f3fe69f8293a9208f7d705efc626e8d369819e7bda417b175852b298d056d3b2927b1b36cb3c951462a1e648b32eeb2e998049ea680e8158a574f51accf1a2a4f2dc98879e0f998d429226ac533761d16e17f8b115d45ec80f7041eea81994480af2f0051065376c00306250509e482c74b9222144747fdeeb70ebf976961e90e252bb58ee958c8cb27c71a9a69bb0aab9952f1ccc88dd444a5a4a304ddc9e98aa596016c63d28e3f48518dfafc02963e5faabae96a1748659413f23c056f58d4eabfd1bff185f2efc4b54c61e0fb5d31b7e210ffa43b0b89f1584b1d5d0a88f23b4f26e14cea156a1912e5932c5472e9ad52ab224799f59ac4eca30396a05a5fadd2f1acb38171447f14ff2692c8d92a6a009f6948bdf0d8e6efe83be8a110103f70688e235eda15467165edb7e02c371768ee062d332a0c6be2a5342d10afb0f10f95a1f94feaa0e623ad81176ecb9ca18773e087a9922b4e6315b04f289268a2b2351c432aeedef0cf39298e8a977fa41b28eb36e610cfd4b48b9bc124119ee59805f02cd2a614a75025bbc1725e94e356ab2e04bab5c422f3968f9c80b50f6c05d5c36eafed867c78ca907af885371f1dff9d08e32136515d4207fd280c4247b37b1482855b77eadf3164e6f1a3570f4be45a5f0d71a914a53abcc0f7656e8eee3a87131a6bbac344b48135024b077bc5e4debd4224425fef7c26b26d86f54a3714f0a3e67088bf40ba6f9a41196a7295200e0574880b41ad5c553b019e8c8047b4a630760579322444ad0ef3114870f91d3d6feecdec8a5e861291ce0d1d3f1db4c2057d1cd8972a3357d16e9589ab3da5262242f20fbe60637a83fe663336758e620d28d3a1704266679ae275c32319ae239b0b3160bca3cba1aee69a9d5279d26e2bc40a423427519e174ceeeca138c7a5f0ab36254ac4085dd9fa85a8cc80aa3f3e573ac2e813501e67f490d1ce42d9af0065d92bb69c59a49e4fdc91fbf0216c9155554756ad9d5c216c80a3ca3214abfe6776bcb21186287e4640bde9a23e0121dbee14426ea3993ccc7dce0a2706be85b60f006028c754e08d815113cec64684b9455654e87171b82699de68144fea653b866a2a20ac3bb419cceede45a7fafc925badcbcf9231ab44dc7237df0f75dcad6017713cdcc1cc6b6d6a8005c39cb45a60e15fef1f7e7c515da62ca2911e30bb5243d97af976cbe0d5cdb00b0a5a1a783e94bd4e845ddf02ff7d2530315679af89d37d8919e801972c13de59d155c3e8c38b10e442e8930b7fabd2ebb5decb1b0d239511b126922fa33fdc988502f4244ea8ea4c1d723085c2d90e527d6973de756580f15d94c981be8e22ab204b3e4e2df4db55d195f4eeec8fd5b0517f140cdf2b47b4c3ff2ac969a063d5e3ad7b62543001bed6e520707a8c776844a9615ddb4df7c8d7f5a1e2e8a7f4227053768f867f4ca53c5f0c6bafa3537877ce26117a606480cc81e38b3284574e735717da83679053c951eee27f5761a486cd62620f598e7c6745f8549a885ec5a0eb5a492ccd7a490e2544e965f10dcab8e4ab666c66848a445b29baca0594802aa0048a865650259440d1d01aba4049a8024a4013680d454149a802ad13f4e4f606640025e6c445b147313bea6544137c4411847774b3b422ee5fdba9da2b9f126882fef22e7091accd39021652d122353c0cde39d12218c6a0e5d381f111af1cea589ff5b0c066e69c8d4c1fc7fd67ed25f3317725f0205143babed27bab0f5539be90e40ab0d4922bd1a5a1065ae1edc163130f1a3164ea094205dc74a91382e4838b69eb38cc035f002d2b9dec032c3afb67c1d47ccfd48d7e1f34d246debd6b9ca5b2ce48edd25b2c6b7fbe2e007935c83e337ec9f4c4c8e89cf85d0a23a70c0d7702ba19409ff3dd93736acd44479bd2678c18bf72cde0256476571eae39749b8468b76106ecbeca04fad4371c99691f1615f5147db831f1b9f06aeab2839955500723ba1ab117e6601db96ba18ca1906363d8244489edbc998728ce603c546b5c2096f72d3eb5c5ecfbf58c492898c9f6c92c6703381dbdf55aa8c566a952a329a8ebe11798a5a0d12bcfac263a2a7b0ec410674fde24ac760aaae2fe61ca47ed91a35fda2224fb5800d21186122c19e5fd67a5891fa757b3d0a138f1960cd466ef0cab614e139e70200d1eb06556fda3de0156536b21154961bac56f84df4fcf9a38162a8fa2c88c21f48ae8d51637063a352e84f62e71740bd284aca43a4d668998f379ae42da24e221152432e10edb57bb2c2feaae13314a81a73faceee30938c4b8df51e86bda29f431240624a2574cd54d1aac39f30b78e164668ffaa9e7c36fcef91003c2f4028c3c0b9f7ba6bff0e6c6c5bf44e1c5ed828bfb4d1fd8a26d2b0ccbd7292fa1c6f1886c8b2670520cb35d83130d1e67d25882533d396c11af05a9a96964db22f3ab2f208bc5050ecca13d014698e510aba2d7ff852876d8b58fc0ce5d4f59e5d00cfb6846282a632f440d41f5571d348d40e985a11ba5b88ee272032f0a87e663e943799d6bd4cffa199b7559bbec320cccdb018bd02e1171657f56855cd4e481cfbf543cfc0cc7b0b73311b166f350c8078044cabcb448a03c9e2185dcada02f9c1eda8738cf0299f9c1db47f98197ba6c14cbce275a40356dffc8d5726efe9fc20ec2aaf481eb2515530ff4c76fbaf5bb395de5e8c2167a566c906c1260fb0a9dfbd8ef95882b6446474fe04ab573ae716110461c8b46ab8a35a5469ce7f7403107887d2b80a98705c065ed88a36fec5e2154d2a888880b342f911ae45b75db99250868263148f487a81c53b601f957ca9d4f89854827bc95dae670683e79da787afb3f91a10ef4399ed0331fa56430ed03c2c60fc67bf3b3f3196011e62addf2ff9427d06643aef90d0689e13be221e9a723949566b7d74c9da0129e16de6d34f79f8b75c7b59c1961228214d9a29e85cb29ecd9bc524928581cc475db277c134dbe2a5a6b95db593db23f9d4701e30ae680f7a6dde8750fe002af7b030d74cf04124b500806f26ce0bf962551318b2aebfe1cd0d6b4a30942754a6101bfc47a58315f079dd48e5e1a2780042365717e933b95ebb7fff5e582a516861f005b3dd55a70df19e3df9c004c4cab0e9752352591bce830067dea94fff261e3f0189d0d66103cdd80e24f419a15b2240e6795f25bbbc57b0f68733556b586e433bdd4894b4a1fe238aa01627fa064f53c8fc4af4b2e5cf6bc9ac12b69f914cca65294b7071d0730529b81143793be3922c091295264f10b2814391ddd22ec26802cae596e1e729659b54b95c444c71adeb85131cf9a3919aadf36d8aa55802082d47749ceb53011bc0e807b967cf9a1a4e78112d8aeff6528f5b5da06c93a41e996f28b6fe68447466b6c0e7579f646e8b89f12cb194ed629381b4751b350a76653d28063a2141784ed2fd72799a6930503ccb2723ee12b7fcb1d4c14c78a160e6b0bc4b25f0264100da43d5414a6a6e105deb1aa666a4ad8dc190f018618143a9396566c723f1d76a180a29221470a0d890cbd2c2aa9234053fb88a40a77bee82c4c6a7706aeb4050f9b692043a8cf01bbb8d9eafaa805115557d7b13f8113b70f83b7918d2e1915900c5e6d61cfb4fb4ac3cffe7b9a336ab3c7ca22816bd2fd74afaf8586d9ee0890f8c1ae9bf1eccfa7ca9377293a1a738a92dc13772a4770c5f4a404ea9ea3fd09f1155cd858b76b0ceb0fb688b9b6c861bc20e7377e9752c1aebb5ec090a2e2bfef2d02ac58b75cfff436d73b6bd108dd0e2b2f718145370bc51541f980032c23ec39a44164cbd3ab41863dc44b4145ba7105e1030d85d650768f348c3dea3f66a78c6878c7446ce8184d9d0483f4b01ae4bf71089c7a5b7ce73be6c2bc05c62ad43710d1d17ccf46f0ffd0e68162d96fd5c9283ff2db8009295ce308b91d751b844b2a339f82cf893df86e338a572d1f86287adeaa764527eaa571905685961522fce4558805886e816bf3ef25412db1a309395a5268d49ccf24a4ef53221f7ca3e6cd0652116d55e51ef2135b2461a6efa70e84528450f08bab96d103b8c67182f489f765c806bc04234e04bbad163e80d42e0f1e5fc548cc58f5a109a97a744a145839e1db1cc6b5832b7048006d6142be208e623983d9aa28f83a566010c80f8b390bfe862a1e7ac520ef1c0387a8fa54d093b325dbfb15deeb0bd9ab4821bc009a674996815c1ab1f3a257f6ec79878001dec21627cb15da58b3606a7b239ce9070778a7b48269cbaf54b9ad87c7e809e6a686905e3d758947f79d1e3297ec836833ff9bd374bfc6629b0cc01b8c472bc0da8dc65ac06dcc7ba2f9c6bb7e6934c2704917e4fd211cda28b02c8bb70d2d5ecdeee422e1cbe18230f21758a4f3b3a430011896e9a57ed70adccb2be71742a7b04c68a6c5bee506bed12d99a478de0631aece731524d718749c35e8cab17130101dabafe7fe36b7e086f62a7f221cfa3bf68e4774cc9cd3a82f6e6b2eedd6027afc1c6915404eaaf714adf44239710ec36cc9ee25944ea0c112f1e73314c147d3b4253d477bff59a479103faac005e90a7b26dafd31064995ab3188ddfc65a407bb749164fff69a1fbc0aef4450d23970d40240abf40ec28cefbe81d0ce307399c699f847772f0f3cba9350dc84b0355cafefc111085058e299026835914830bc90685cdb7a8e8e540cb41171497fed3e40122f42e39ccd648df29d364870e5b1d41a81405fa320541a5d5a0ba0129ad3703c11bb20bcfc96c6cb17a855f2cfe46d5cd973c595140e48f1b64bda1820eb926736a75973ca7efe06e831f352ae8b56fa36b2d9e1fd23af5b0e0b5c1e6ae2270c6f050247b9fc1959513672b63b304ee92ca593c6a42613fd5ba328b6d50d7edba0704078d58c0ad23ea54870b7e72f26eabaa30eebf47de1564d99fba498b7a9673d995e5c172a033b76607642303828979796bc62569794aff5c2fab13f75af1851c56a5c429914be54028b177a390b630449b82874fb6871347d1ecc91594b9657be2c442b481287dc62d647ab65d63e16189f3afb109a8554f0c687566a9ab0296cc29bdd110a22698286216f83c7daa0da1abdb0c59fa4ae86db59ee07f79eea50ab313d730d64a040305480b057d02f771de9f038f0c074b4f2606ffac0465c7dd5d552cf174d046c9d322e2b08e3732604950e4ae20571818659becd83dbe61285e8dd7e97235da2dd928a98742c8ce35d2120d6b55423b38bf53abfc97ea38a9d582cd3c80fc668b18854bfbf7e5c946d6d7ec9f5ef713ff6ea935a0d9ea69624b7d37366b91e792a6a3d625ce8324d363e1fb5eb3ba77032d5006f868577fc3b5306fed19a8eb4657f3afd4ca8559f1383e6ce906c337f21ab65a7fc888da5e87353da86efa1af17dd49c3923ebad5530650195b96256d57c5960c6aed813be257a80155087ddcea8889175b8a409e59ca23b7840cc217db78bccd87eac116252ac3fac899583e3a23b95be0d9b5b72f10ab600235789e618f3c8dec83078a9985bb00f69591c23ec37f09de453c339c3dab4531495968604e253dec7860f6e7da8745ac90b46e35125b9f667134074872f425bb56e6c2d290b307d010de63f7ec916f349dc30ea3a91e0d8c82c9af4676dee700d6c303f438e09227768ef44ccab2cf4cdabe26953a5ca39d27afeb39623cf0f062350d05461afd90a3162ac91dd9ff1ddf0c00bdc68949613410787042f3be4b5fb478b0e7063f35b1762f1e6022d99f10ea2a56b881bf5662de01651deeef1ba20e34018d7afd966c9c6be7d25aeb076833fc3a193034b11d5d409032246e4d0710b82d4a659a149adea8ee1636dbee4f73b0549ac541a2350838eabbb203ffed2ef6a47261d468f316ce2f564f054fd221a7c6cc86a2c680358e49d6871c22bf206c72a64ecb1fb181aa70379c189a56c3161877a82d0568650833de9b99849278c9d44a4d8fa858acd13341a581385aad96998493baa8d92c4d43fa1f3e715c10a1b2e90108388a3e0ebc29e17a743ec5cad59cbc90c7382aabd60aec96a02a4b98ea8a54f147e3f57eab57031d295d819d815f75657d4a697ff3dc34d43f5a4379d646ab9176f148a1ed5ab260bb267a7b400a87e2ccb535cb1aa8c7edefc2453f0c7bb856b3564dc290e14035b0ac6f654edf62444c0234c90557879c753bd9c8b611196e84e9e12c0cac99467c761fa6ecddb72d1285e68672e822b7e5350b48c4f2e210ba91dbce47c478b0e8698bbe1be6369095dd16cdb3b86d5ece224dd10d11983a45b8cdb678566bdf002390c5731eaaf738ab668138326b56062143100e83e276bc0e37f469338eef36f326a996633bdd740472012f1b3d331a0b90cd2c7a0a6a79aef27bf530dc943e02ad74795f51376967708b1ee7b9c904a550d8ea9f6f2f1d836483bbe16436ef77aaa6d8eb056f826776d176aa5550c04f4774dad1aa555876f8bbe0d49d4e4560093fb73369a9a2170bf95d0d9f2a65c27e55e9f5855cb562d61430078c5f557d6e8b1ca5a0194ea13d5884edf783c1b144f00f2cd1f30a130703871265adf43370f594d28e554d2837613d910c202b7259e0022a6f6e632c78a4b2be3a00b574cd444eeddd0316792380173ba12c64e77957982e84ff890b7374294c4ad946fefae4c92b5e4f525951c9886f7f37ef8ba023b31acb4602232198101bbd9a3ff3569987c4a76eca9d32df3d29d3bc28351582a8606b1e320a7c5a09291302691e5dd3c3df403563e98d49eb8893f672d65d27d74877b7e80f815c69297a3724ae99596c2ca899954616288c021c37eb2ffa11e5a8979bb03cc02e63dfeb77971af4bf12b503e5d80ec845ef433962c37ab60c77cdece229350cd8bc8527b1123f389d57b751a7b1bc1f6c4c0f0f948bf6cbe13fed8f93a084316063a5c12d8b624e9a9bf6c1495112a69c7a66924c94de569a60e84838ca30270bbebb3b8a250909d6be9d22d15ef77bb12067490aea8e20b09100cfef1247747fbfa28c08c9c852410fa58371cf3576812b1b881d9c1bddd596054d24a3619a5059026411e52acc5251a4d67e1ccb228bac1792bc0642f81b8bf42ac95b7c8f32bf2a0dc39a500001269e45be5b760a63796fdc1a5c4c6b0a5e0400753a455f941e6c8c9e88803f06767362cef57e6af18995edbedc218cfe0c702fc061731cdc1862c3d61ffd7e5312e99bc740bb578fcde31a2cca655262be50d29606eae22342ba740f42e6d719efc385e3c64daa23304123538002217854a4ebef5ece36b7708b2e8fd929240c81f73953bc9755fb36fe5c4bbf7547193a6e47f1fc34348daa70dc9a9be35128fdda8c2db782d86833fd7749e8ad419e3ba30f566442d43c7941c7f050d312a7fb437c8621f07668c6c223972385e476fd3624c7507ad270cbe12e72b48f8ca1cf3ec7b502912184fcc715d032bb877827b17bcc48f1453fa3d9d0cbad39bf92d0930d0909507ba3096b347912b7667d7d958c7e807e596188d080c0f268f530053cdb5e5515392fbe940c01369803557eedce8f3afd2495658081bdf2a910e3c7709d9eb78a469b50dc576d41b09f291e49554d2223addb52c740b5ecd7539670b589fbac0924bf6b63aaa93042fbfb9f54c51d33573c4ac6607715ef5fd8038745298971b82c25d27b77ce634bcd16d3e614d6ca71df928292374510e184d3e674a619114b1dde5d00ebd100f181757800841b92e07a241c1f0013e11b16802d6371853a4d02bf4fef1f879f09b44c1494660d891385b54e8f18c171650b6f1ec3d0d10ae0e18893535df316aea0e738a433a4c69494a8215b9b08685f40472e2907442de74a4216810b753f2144be9eba9cb51307973655691dee5834e74ebae656f428845d12eb1ad37aff269e06eac3cb71e62e9bdbee7281600379bf87b0423d45ac288402147ef9907f791199e90cd8a9941c3145c8970aaba887d2d59abc9d61319f6e9a2fc8a7a92d5e5fab95ef2526736361b62225223cce531d441745932d755928cdae188c8550cca706dc47d92dbefa5a5ece458a51ac9672275a99cd8b0a60e75fd51669ed555237c7e6192d178331bd41f83d3919be0a149d4b22d4673a542831c0c7093b5690e7cc178a006cce97ad4e8f784633f877dd2de9f230a7d92213c7be4aba259cac1fa145e649ec30035c72839674c3e7b55f7e77669264615da84b051ec01476eaf1f26f3c1c76f9dd092c84ab31273507c3b2085558b6f4ba46bf7d9cf1983a700414bc019f20fd3e97fefab64f559c60a78abe4a48c46ba48569b433380d1dacbec79f9907f80bab8544834e270f6b3f5d2d4bde2d5be6e3596d7a2d4965fbe4c61322b982ec6f00a5e57485170d02793770a874c82322b956159095d8781e9cb28de785c99075905a39b69dc516734e27ad276fb8adaa35543b8b3efda7293ba8c3c7b91a91c2c595a2abb2eca73fa05c9c57ff259284dddcd00d2d53938522e8e2808d6be3dd35181c61945e72628b9cd69c4aa3f971a162d58809d324a055c5b516c1c7f832b17295fdfed5635f22ed1f9e19292566030662bc370f6084931bf74c253e450f7c5f82c7c93a5ed71c6b6e000da86f7bbcb9a011d785aa35e99cbcd0d8b8878d24489a8a9b4c1ea6154f41d7da8cbd2840b6bf7ef6a7c9ce3ad3ac06ed46e51dcce42cdf8e7c95cb3fc11e380d97e7fbe611c32e2652a87126c1e4816a97a602be2e698cb38f0facda0d86ea0c358a257927684303195497586000830ed6d5a85cf18aa27469d0a50a8bdb8eab7a1edec7d2130d0c8d0315e1dff5ca1bc3c1db7801c53c1d44f4bee1509b2c37bd58e661fc43ca83d5215003dfa85263bda401fda9d9cd08d9d2b076de6d37e519bef57586729f064cfb99f80a19a5cd4d12cb7cbc9559f82e0ce1c7345e68be8865be43df7ff8a32e776cb46806793c1dad1a53201f92470e446c6cd0a4e7402ae854cbb23c8e659268212429bd7b8584140cef6db939c0c00d71bd8fc7e9f52e14b152b55112823feea005427102c6ced5f812007e0cc1d00d3a100ca05dac1c6330b04a0f28b55623db7c89bc274ce58ed75b4bbf77af47501f21888b4c88757804cc82019cd5a3d463a4d70fcf15244941890797e1e9d14cc0216e4d2335b40fccc9656ab2ebe2545ea3312b032e4f109f9bffd51c946ed36a9e3288cf127b66993be5623eedde9f037405a8f60849266bff8446dba9cb159571b87809c9463b89c3d36b9ddeaae4766019b26d6c0b56061582f95d83408b550450fe9188dd984a65d4c83402f575bb26f9aaa12089c0f962d88d14245b441a3297d66b472ea8d007e3fc288e5be680482e5d64192945985afcb97abe97c02c6801d11e94080aec8c5b02f8305df5ffe3cd63184068adc1166c8d1a9a097ca7143a52336c20630c1da5842c0fdb820d9bd907e162513fcb6d242a6f86242aa5468910e30a37c49ddc534eb53aa43979c7b13af9abdd71ce16f964417c2802ff8deb7ecaa8b1bb7823a9e03dc9536a5ad7ce466989b352ee34c03d50c78919d8d331238301f8f49c0ac4f6b2bdbb36cdcd33b2ec48fbf6a8182fd2dadb28a0d97c8c3e069d932a000ff13aafda7f672826d56a83c360da8d862640033fde81b3e1d90dfb41807a1ff754a3881803c7a99011045133e2c4c64c6d75a973e5812a3772aa9d122a23ef79f74a69d1c95c9d5ffb0b59e3dc50a1cfc5f57cf3055b96c8f4f90ea40a19692ad2a745efa90a53a112fcc32ca55f2ca91dce2d1952134b533e693207cd4f1615395bac52982a4289873ec548d64f41d07963b4c8e0e1c588c813eb8e78e08dae3156f8ddc30d59915a5bd3a0ca3bfec6f62573564c4734194c67a4c2bc38546fe7db1917c4ca821a85d61b1d9bf098bf59989cac9d5c529b4fb0ae16fc89162e3a495eb74723e13ea34f0c415b7834ca2227d31ecc460aeebc0fd0f15aef198fc04a667b76d74ba2d7bd9c72b847840c7535aca20b6af5ee62e7dd54eb76f6ee6dc560971fc5f55846f11f85ecb71afdc14f65fe4635e0e6cc125cc6a279b35f80d7f6f28c79bbf1d50034b643a7619b7c013d1499ea05f8c050eb57db2d6d2b5028e3ca1ccc0c83b3f4abca58765274f6e7088969586c56f037071d23805250a794afd6c433078bc02c3ef0de9a77c65aed82d9a6186ccc2197944cc6be19f4f09c9f907d7a3b65772039fcd97949093832b5537f98d21f0a3bd598e1ca4686fce62923fae8a7b19caa7e0a77418e552462f06b0d78f1c778d47dcd67094bf4f1a5cd121a63a658eeae783d9c0e3ac870168681c7d213fc6783f979bb4a60659a964c874b759608005cfcaadab6cf7c3b4065e57c77e23caf1da1f0f31adfa12665099b14e576f4b8ac15ce94497db96e416c1e8de1746b689072ed3420d07dbd22df8f2454ed6f42c7ea04c094f609e2de9fbcbd3279633936f2e45ea666f7daa63c86532ca4996bfca8bed4dd3cc915b95b44f4b04490a817e2c72bc109e685dd200ee0f4dfadbcf20c9d323157e536919f3a0bf381da0a3d289f446164509c9cb1e3d4c5310baf9714973b5f0e822852f6c890ea077ef5c8659ba3ae540fbe7e068f440a0ed512531346e30a6236a49e1f29ba4afe265a5be4485920d63294f8ac9b1722131720676aa284f9589ed68709311e8a2ab7ae5890d04f6e927978646fa3d3640bc22207032b9357841a0599022d821828dc616d41551805118053fd2b184ae7e357575d3460fa04cf9aaf6b4d913465f92db14823d0eec3edb190bcffc2802069d08141a284816797e75af65b6a4b2547f720c7898ec9195c66faeb8d7e0c53c5ece7f24aebdf1f0f464302a55af8b0711ebf0af4dac801d0760a381706f1e03943fb2c43f0b67fd81278011a304adf607f54aa628e310f516d8d9273f77f3ace4c4ce811fc3a01953510ef605ccc599bcfa8181af52d73a4c3daf39d2d79b7a57eca27896871054e868def16f7f24634e01ada2ed9c8fa4a23478fa46907977476d08edc0f27509f02cfdf681c6f2b661a1ce3e7c31872e0131db416b8b1ecf111192e5498336c9c833fa5503528c935f95e3ce5cab34b8f0f0fadba8134880c9e82fab0ae829fc4c6768255b4f0035ad216935c305232520729af2d9cb2c8182327039c4df0ca5daa311c847114c2911a6aec97e5a623338e5e8540848964a342c56000d526220a8190425000fb1df5d0fd45d6db1a58ba0e90716cac7daf4ebf0af803d60461db265beebd654a4906c308e308fb0822156b7a53a7809071e34f8464cd2a8acc3ce95962cf8877e606cac6cf994ad9d894e5bf8927ae7311ed4b0d33ef33248b870d0e538a79d8cccfa96a989ffc67be97e810cc548d1992c55f5cff2641133325f30d8b89e23a508d0f7972bbcbcc17f3e43f4635038d0acc93ffcc27cee4c30fc14f8270fd6f8cec339a46c7325133e31ce958c71d6369ec310a6961dce9f2c888c4c3e01aff70743026ca5f343a15d3ad98a563314f8ce3415c939ae2fa7371c5e421981e6596de0491b9526208e33814aef1f71ee6a98af8841035a19aeb304b1c66e97fc46bb5d664fa2fcbb651a9aecc0fd6f68b84834981168a63667ae699bb3be9d2df5deef38cbbde33515ed21ca0594b7cde54d13fc5173d11f145a4122694842ff6e8c5174925a21f9134e0bcb1eb0ee4b0eb4efab84f90952ba6ca71e05a72d77ffa6de8769d5ec675e31a77dd5fd77de6ea7b7f97493155fd649e7c982ad6324ffe345a15554c295040a12cc7d1860d12c97dfa7afe5698c633dcf51779b0de7fbe02ca2bf9a52896bf71d23aa23c5f8347d08e524a29a58ce35b4729a5948273555eeaf4f98a2b268a3e8f30977beee8b4525ae335022c435cb6489745daf33acff33ccf871499d77b76594a820ca02f6c41230822838712cf756f85c0ba3cb7edfabb7cd7fc3467b556cb553324d3dc962e8b5bdd45add61291b0decce9d6cce798a65828b21f93f3e46a5db3e1859d393324cd4e0d36c7f4b37e5d9ba981b5f153d48932d357666cd8bad6a5f8b2de8b1f56113dafa691652d57d56a6db74a1b9fe9b2b972990b9d331369fa97dc4e40e9db5b73d564aaba09e84d260e06aa0e738501d3ed69d58cffcc387d0591ec2b5dd6baf437bd8daf85a6b7f133dd9a2877f9d36ab60676bc2ca6717b61edcbe24e048fbf2f641ea25137992a9679b7e64af4de372ae689a9985d93b9aabb98e168e455acd8d2aa9ef9c9e78a04db5cf1e8ce82eb14e83d8651d8be35ee3a8e9965f54cc6b84ef1f57e9f33917fd354e1569844cd5bfece3157f5953ef93b96eb1d13914d138565f10d1a2caba9b4fc3b51278462aff83b28fa9ed229999f287f364d94bff8637d14bead81dd71591d76c8ee02f335623e48db9f220b23cdef70af9566581b23d3c6ae31f2901d79cbc85946d238bb2c91ecc38c4c9362a6ba8b5906cd545f2991ac29bb2191ac1b9bf285649535990b9d334376098a6575abd52971e7e52a1ef3d48f825d3fe1b6006efbc700c1ba90372524eac8c64c54977862da06d0cba3d168b72cb8dbf7de0d77a3429b287f5b8697bb7971fd3f29e6f7647e3478399eb99a5f15d7ffcbc2e5595f17d7591f14ac4fcaf7f970fd6bc23b50cd8ea155a6209164d92ad5738f6495d599095f41bafc795eaec66bc4de09b488d9ac3b75f717524821851a5c2d36aa0389e752a740764b9734475a1cb37886bbb4b14c147d5d203b64db0e59be403c9bab112efd4b9f8598899965ca44d11f2c93952cbd2c9378cc80c92810da5f7fd69dbabbdbd8ccdc19047483cb7aaf26f0682c526159dd08442a6cc7d3a533f15a1e4f0c9065793c1e0f8e56752f98cbdcee055786be2c8f67041e8f47c344b905f88a39cb32793cf74d2ee8a07469badde3e813d9c1aef35cd5048241e0053501841a40d73fc7b7064d947f6ea2fc431e6a957f3d5dfabb8b097bdb322ce55c58c30acbb9ebcffa9cb8fe9f0fadb92aafffe7f349f99e5cff8648e37aad22d2d47d2d1b58be4998184226095f24dffb8744e357c70fcaf7038e9f6bfc5aa3cf88effc9cb8c1360fe74a3f96f5b538373b92644d2b7cc544f5d76a2d7f5fab06d047464908f72702be934a94885a897fcfee4463a7a8f79784e86eb06badf4bf71e670a5cf62999256c284f74a9c9fc5f7c821fdf55b647287890dfe2069437f25c5d96529876eff01f3e77d9ff77d1fd903f5843a57e7ea442fca433877d48eeb47f391342eb8d7774f4297e4f07bcdc1ba501ffc21f541320913de27a15f499acf2387809fc4c1b1569f826d94692cc435a6d59866b256ea649b0bacd43085106bd1a265562d25e58e320af841f1e552fa947d0721659cee6efeda22b0cecb1dcb80b093693468e4debdbaec5eb7dfdf858898a6eb35a036868090b8414a0de1dcd14cb265d0b800bef834dfbbbce86986b8bce85deadf50735c5e44d2f0933e7208f8e2832493d64d8ecbdf50734091a461fb44464f1abd1d914a46ff42e6b0ecc89661bbcba236a20b3487524a6f3afadfd84c268abe37b60d1345df6534c03412ce2c2efdc19a5b707129a594465da2d7fd18a75b8cf35dfbf90f968686e98b5e0933b9428bdad53e64d7eab2bb491fc5c9839d44444fe92b015f347388d057e24fc9cf139aa8efe9d8b9fcf4bda8963c60c191a6fb91a63e4d07824f534906d0179134f519e00f9234959c39251fecbcfd895e9dab811aa8639d9ab332c4e8baaedbe1a3794e3edaf5caf008703d1b156559efa9e77d0ff10476bd6711e07a0d047694c94479ecbd2dc3d2eb799ee7950152ef1b68a2bc1fedc0f34c3f352e7a5ecd5cf5f5dee42a1af0fd97e478a0e8e9832039247c27938c3efc24a2f7e16814c73446a06804862370ac371c8dc0b1e270437104ba782ed525caf4530ed863c1179134df8b9ee62387d01fe21f86230c7ef25e34967cb020595fa04ca80d130543912ebd9e791f8ddeba571d52a4f3b1d56403cbdf394921a896d6a673753ef576b57bd1269dad36a61482dc7479336a266d831023385bbb2c79c07a5d0b42e61eac87bfe379b0de83de0efe8d68a74438dcf9dd02effc5034a29daa38dcf9b527059f0bab46f5ae4df77d0f315144f0a9fbae97e8236ef7ac9bbba5cbee3dd765d9599ccd714e076699a8aee6d262eb0f314120d7ed5c7325deee3fb2814661d8fe7675aaa3f96ed70dc444755f47d29de247aed0b79d4cd49521c28ac89daff9e3ebdf3dcf0c28cffde89d2b9678f37de8e3f710e8d4e8fdf4fdfc1e72e3aa7e2205888a2bde50060496e54322d19689fa587659b363af89a23dea2753d550ccd3f71f0bbcfd1a69b1a4cba217021198ab7abfa7a3d562f9ca30ead75c89f7bb4c766ca2be1f91f1f5ab53ac6983758aa5badf7fdf53bc1f8f2cb2336daab96a1ad18f7e49fda487e8264dc31996e48429a524128944f221b9483eec506e5aac53caeece41909a30b53b23f6ef8fccf03dc3ca978d7df01b85f889eaec80640de0275992037e3f733eb2062541dc471adf01c959c9231f69c45a7bbbddf8e6d1e0eceeeeeeceeeeeee547c98ac250b0f7b889767f51329dda469e03370c05e80dcd331f8b7d059282f3f2e4e98c91c2259a5bdd24d2c9dbfc72f4ae2e9b47c34d429a42f7057482c54631acdb9935cac8a0f0571ac69a83c3d44fbd04d582827cb0d2961592897cb0129fdddbac99ff26b488adb67032d063f24bb9cb0f5bf37d2a9282597ab4ab5da6ffc1d1470119306734577cc959bb6b057700bb8044c5c17552553e9655e09ccc7bc92d2cb7cf74068b49d9db96a1ec24573b14b49ccc3bc12992fbd48d33f24e6617e88cc977e8a344dba10f3304f24864cc2c410982ffdf413111932889ffc4b64121e52b61cc1930343da99c34478d3a9907416e2faf398380cc4acf12f3dabc2f0ef9019d9354e3fc18c31949702fd61197e871fc6508a469b282cdf07d2a5cfeb3b37a4a3d67143236f328907e87f791e29d0ffc2d77ee82fa36561f4a327d118c0e5f959707926774c14e997dc2992e2b3e82591ac18ca6b97dcf07b9c97c579bb3e226797dc3357340cf9875d36ecb2b1a1d73d08f7d84a965ef7d22e4ebeb00161118bc5b674e9b906c3766731c5262ae6a21db2ddc58260bdff98f68deed3a5ab4bfadebc8d1bd55060594c4b51afd7854c327af12369c049537fe6b0805cffc2ffc8f2fb5ec559d038468b895fc8359269a52ad67ba60dd1844092a9d86aaa60571e5600a3f5a8b55617d202969b20a09e4bf2c0d21874ae53a45332f023a9570624f6721450cc1af71f21390264bd58ecc671376d6f9659086bd238c68492fa217d17ec12e0ce20217a2e8bac24fcfab43e91f02b1122a207c9242169f293bfd4c0894c889f7282dc8861060521a9586621852efd19098e981b7a6f8c40c5b266102147baf480f5489bd2092cbf80be76c84c2f7398cd9562aa984a9ffcab5411944517d7a9f833a75d5ec9e85f5cfec545f47d90eebeb3327ab1560bebdae758da6d0e833914351b6451ab8276a5f83c16a3035b49b6714dfc80fdface8912a1d80e6ac154317b21de84bc6573c5b75be6e2a44fe729e4da9ce18d906cefee3875425dd40776a98f88dcb129dfe6e90becf08583b933c8892fc8b094b28b5d2f34ffd88985f949e5d75c89ae3314ec044bb948baac24979eb09fd7793f2f9711164997ee22bb4eae99e39bd21f87a32c0bea9822e0914e31d7441ff2500bf9b9e99eb088ab4824924b5d640153c55dd466ad8808c57afd66710d86d11947fce433876b6c9a62a15cfffe7166d2cb888bb88af46eeb92d459124f3b56b6d56771fd232fe3113fc562a4dbaedad127ff1e3220b1b9e29a0b5d8546f44bae8864f1152580a9304f05668abb98e54cf19559facb98b9c613c71357c455ccc3368e31c73a555dc81c6ff1a13eb98a6dfee3a4069226cbb59105e2e5231a6dc7c789cfcdc7ba5c5d52ebf2f1f171756a861fa48fb891195c554d6f2a9f613b47e68ae6d2476212816da4734cd4746969f3652559a6ff4826ad93f5e6ed80a5a765d225b511e5650b6b5bb6655b3c5dd2795996e7d2b73cb6d5a9793f48c3e0aafaf48bb8ea33427d824c14e520f4c6a50f81227ef2294b96c965bd1a09dc764dd4568fbb3bb373c794d93f080876cdb8e2f38614fee9eeeeeeeeae3ea23ec08b772db77c9ccc70657ab094ac2fea83cff5916c5ee19af816c7b61c51843257dc03938e3253b4a9986dc56c31666a8c99aaa1488924e64ad4f13f3784f0bd83c6d25804310e12aef1efa1f6418771afd4e6020313e5df4ddcaadcda4fdcdb4d883642f6e262bf44829189a9619ab1f1568b6874f9707cf93a921e1c4bff8d31ef8d324f47d377e3ccb38c6f73c5e3fa336eae62b6dc78ff9b1b6b61905892c5e3da8779960843b26c58d5d23c8f3ac610c61c9f129f45b6c164923573b217172b2b91605c26a60616c6f19fcce230ae69c6c68b5c7315fbe418d9879dd08cecba31f20f141ba6991a32313025927d198f7489c49398285742871e98dc70fd7b07f324c48fa97d008e2dc62cc798564c2a66d9613496c6629e90304e0761608adb576a5c8460769459faeb184318bb8929c434e28a4ff0ed757bf1ed4504df82f0213911b2edf687a1949cee594b3c8ecdd5cc5cd15ac564b114b73f6cb1ab5d7edfe73ae08460f2b9d3b8738cabf4cbed6715f3c04141f0dce6da6dce72dba7b78b47b2de48975dd637c9f4609d04b24909e70e29fcc3cccccccc2deee1ae0ba202edf20be9d4cbe53f92a4551c848f93d9e5671b5cd6bc85b22027635cd6dc12d4c4e7b2e695cbdf351d09f113bf35d937b1cf38839ff8bf717ee312d397976f82c8f4609bac53c6afae3d2342afd56a31a06707f29803f91453e854ba641ce3d881ba8c395017b85a6daec49ad552ab716e94c58e7e76d9bd1021340700594b2e48b6e0fde1d7d92ec1a7a2ce101239e4c64ffecea3312c8b6bf54aa76a05c19f39e087f3e549fff24448ff422a616288fd975732fd442289bc3ce949a4124b563f89fe45f4568bf5cbe21a3b195d844413ad231b63c2f12e1b3568850e3e20ed99e8c16f17ee769b4d54e93f5d5631558d8379f2ef5bf3ecd6aa22429dea7f93151b74fdabcce027ffae0c56b16bae48b8e15d796f0a8145825d39b0ab03ecfac1f5ef3024594bd8efab8d373dabcefc4f54db204db5ce9064106f560ce3ed17c97eb98cfdd3566c7d9a2b5c3776638b2dc1dcc249e054e6eae5597ec5756f81a3e0fafb0f53c535770fb49669e3dfb8b91abd7b08ae3b12d7fd03e3b4deb73ea53d13fbd602cb37aed51892656172ae6a979f62ae9ccb555995c95bece5767b66fbe62e3f5d0cc976914ef66d849bdbe5be753bd64f5e848511f8d4ef3ca69cc5d9c0c485c522370053ca96e012814b587e9f281e13b5c24449a0e403d32519cedd0a8d67333d9bcd3c63992b1ca6eb303dca4c85603a15d3ad980e8687c138232ca6cae4a720c601c2354d4c87c23843b8c6df74c5c4456a8a5be589159189f2ef61aa7e84a8d55a93e9bf2c27eeba886c5f4830a59899175f34ba7c1d5ffe1bed7b63e9bb11e619a8677335c25cb95cff1632bdbf988435f9077547e665c8249cd3fdfc9b1e2e700e730b0896556a71cb1e36a4195c254396a4908a3aa4916f976658d6dc62334bff202ac494c6e9a7156c6e059284dd0ecced172c0ec896db7f23ab89ba0a1cb91e02745cefabb729dc29fdd43558180f66ecd1276a33f6e8fc4b2338b2a6956701b91d597679c54fe9b9979452276d8b326821a1379964617673095111266b5df5ff5dfe0e8fc5265b76d9e57fa9533fa66d1d77f4a9bb89c6e3c06689d0b37958bc9b67f36cd7bb756af21613a783c235fcfc3b2c6461dc41611cde82b9b8fc24b66cf260260f66f260260fc6ccece1b4082dc097bf5d8599909dd96c36eb9f968e57c30a2b6467ae7c7631bd9838e86498dffd8f8f9f7a6ced94b3d91c67edee16e2a7c67540e1dc31629f2edde7bacb7d769838315cd3299a15fac23ccd01e67c989275f9c9dd5df5f54c147f0ce3f80eb3867b6075b7d8f542b2aae94b8fb4b9945f8608eba4f5170cd3198d8ba1b6d57ace9169330f58aec9bc14fc303b3b3b7b0b463a12866e34d219e9729eda080c10b0c10001233da71018ba879425d75ae74df67db69b3730cb1a7d0ec73c6dd366c4080b862e7bda6030b253040c31c21105b676d9fc252d6cfb444138774c667ea6f133cbba6426f9a18421cc193067c09cd14f290efab2841996554550ba6c2a5d760ca8cb16055996084814eb14d0287a894a39b0f359a2978b044cc3035b7e290576b6371156c42a5de7753e0bd2de3a3809c2088e00c2944991f367d94f995be6259c0630520f8648e0440c8060693190a2f67dd43bafa333da7a05019d60cbe7b4d65a6bede87bb5abb73baf73197fb787d113ceb98427c8b4a9dca22c772699875bdeb7d75e7b1eb7bae461cadc6ae6619e1e5cf57e083db4433c43b61bf2401aa3321a446355680c77635a454663cd4404267621af794f3ff82cf099741a387aad1f7c0a80cfa0d73a5534364ffee087a2d16b4deb52a84b775abfd3da5b5dbad3689d6251d8754a6114b6438b653d0ee20d5dcc613e8586327f0ad429965ba1303a85ad748ac668b82dd6df5bde6a894c6cf79ec8c2e419e2dca4311aa3311aa331b761e23018b3c63f34828df8c09481276aae1acb4489c82037b3d70d8c6d3405966e9f3d46dfe8a74f504624f41df1c0facf0b86ed5a2f3e61bbef810d4b0b85b6e53c13e52372928dcbb19c0658973d0897e8601d50c3b0d8aedab0d9f047ef4063962e592d45cb7ec8b2f8369bcd46685816260bfc23b230a1b8a6e81b0683916042bc8a48d6b764ad9bf0c1256cc42a4ca205b500eb180ca89d682abc45d64233e9d2bfa343f7d52e86b95a5dfaa4350f0b591a066b29deb02cb021ce3251ed392dcc555fef1618c7b11830b620d17ed0dd3d7705c15a2b188eda48f84ac0b082fdd54f21f84ac22712823f7a22e08f461f86a37e96a99bf454509bdd5ef5befa8940af63dbc5209c3b58353e976cd2299689eab85d7f1db7b93a5d7f1db8b9a2402c9997daf5eeb502298ae53b4530916d8ba55a5896c964aaf11943cbe4617155cc296c5485266cf7d4394897330f62978dabbe770ea242f3cb63342c03312c16abd69a4cff6569138b31942efd77d8491a269ff97b3089e82be91f8b3114f3e4afa4fef73d914facff8948eb279f7d023b619f2e7dc75534ec4c1211fd68144e52059b209d9a3eb6a333a8661441d2468ac0e08007cc553fdd61637096dff2d01380c113736710d01964501c42e68a04d2a53fa453ddd7ffe8f7f5a3f4a64b1fcb3e03fb8273e21fdad8f4149b8bf05782821992cc152d01139c04103b8ce003e994bbcaf46daa619241f448ea750941ab8179f267720625315fe395c8bce9f987c47c8d1f22f3a69f220d932ec47c8d2712f335923031a4c6cfbc69a646932c0b2cb9a50f9f555e9887f990f668f46177256b6148249817981238228df685e6323265614d2c575427cc1b2013e52f3306b1f95ea4ae3790794d276191cbf55cd6bcb2a3bd1aa6aa1b639e727355ced5cbf5f768f03a30579eabebb24c56b7a547ab5ae8d35c75a43f0afe26ff4e95a0f7188d28cc9c3a362993b9c1e64c385ee6d5d520e373299ddd6d7671eb7073652fa5ed850fbe887aa3903a8a46ea4247bb1bed1d843ddc6fbe6f19d8fb4e257a242d1a63680a4c947f374e60d6dc6cdfd9fd4d0a2d2434f24a444f6b14117f3716195fe8932b197d387a713412a7c80860b2a2c076ff455e982bf0fd1b30530298a57f0aa3e8236dca1bee82b0f0bc04a10587fb737fd82a4c947f9886fdbe5bc85c8d606dd83a26e534a13e383269dde454f2869aa3ba5e9ec032e17aa40a5f6f614a229c8c23328e78e7bb6721059b20d046bc54f7be036bf258ad0c9ee75126349947798e98d27677c701e7eeee4e9b3b6f6e17af92ac8780110747939f2810172d1c33b33b3b7577671b765248902e4599654d59d008a85cc7c57e9899ddd93f77070224c8891d2ecb644607dcbb47bbd75aabbb7b3bcfb294ddf08df5a16f3d6416f1221db9d38d45baafd5da223b436893b9a35da46d6c96655364c8ec8599373501edc00c7c235afdbb59e3d400ded547526f3751de062a5849da71e0d2ef56a05d03a176de836820c0d348039ca568814bbf76487978fcc159334272313074e97ba20164001f49bde044f98b5818b8a24ec5405f50f4415a4456c02f48d689cac0fcee89af26a58dbb751903d4e1ba9b8dc1713d1c7d93ac49938105995b014e237c7921bd9c0ee86131516ea7b02c0f8bebcff2681ecd3d2c3c1e652ee802cbea6ee0cbdc297edd10ddad1bc23dd1bb8c32f0f562314c3a63ba741c345706084b4e6b6d9d0fada056b7bac7a75bdd6a9f4ecd701ccbc0811d3dab5bb732bad56ab5bad5fa7c44397093de2ce548975d77ddcdee481fe9ba27ed724224d9a856abe1b6e470b84985f84aab7e60589eb83a289c90d22e53d9ae2b1d6b2a3da561ed92220667d956ebb254c5962e7342b82d70381c0e4784895549b2135427dd50e385bb552ad588eae47be28be2e3e29bc1e7830f8d2a44fd4035412dc1e7aa51ea941a84ff97c6b7e5b3c127547b2a0e5f0fbe2c9f185f0caeffa7820ff659f99a28c3dd63aedfa8546a09aa09aa10f503d588eaa406f1a551a3d4291587daf3f9e043e39bc1c7c5d7836fcb67834fe88bc197e513e38be27be2737daf4f055f131fcccb79b98f0a2fe7e5bc1c11337027f9905c249f9b16da50b4145d45b91c93e6b1b11175bcb7a926fc00e22592cd66fb3cfa9acde8abfe8ce746b221f1d95e361bd799ab3a2667744e665dcf8c672897f3e1c3c70f20a2994c26bb52abfdf8f103c80920ecf5aa128b010102e48454e5168987c796fb6117bb4e38e18494ea23d188b994ca7ba1484d75d30dd9a1dda0405b10324361e543ac70b7320551a7fb14fe3d27b7ff4675e0e0883adde3a42000b6b9096de08c6bfa4ba46eec78e890cd551d9319e3d03372b76df446d1a0339a731d2974af1c2db2ce69e7947ae7b47ede480c459d531c3948364ab7a2348f9f4430985b8e1074f84b098ed1f1342693c90629c20ddced761bc791765e3f9bb73c3126c6c49818133b110a5148b422438a601241041176f098a9591a4d6607c9d2ce6d365b28ba72c55bb59a388ac5624242eda242a57e5e5f9645e19828bc128adc451cd54211f8d598508d7d3121af5d54aaeb6b1715191e23c850f7481075fc0400272690e8c7fb48286178e87c0f00ea5ef74999a866d88f28abd2a5e8c5e755da39105005f253b7bcd5b658cc5f0ef316e3f8136ff9147fe2556c4ec516034000ac8d00f478e98172419550420d16150808a8025512cc1a050bac80c16d9babfcf57ad9fc657b4d547f09a28ef725d41a137c90acbf68e7b6b6c5625cd36fdf5b9fd74f1ad9657d7cd847945589891daba4588528caaa78cb556290280a5911a37089b12a6c6c6c4cf021629edc0c39134c30c1c70fd0880d061a1f3e7cfc00526d1ec90aa940343f7efc0072c277c348d00039e184544aa5baf15b81a2dcae403731e63db3acc66ef35b93535314cb1fa4511075fc51a02b1cda39c9d6af02d9b8a6e7774586598639942e7d8a8d71fcc9aca9241095db3147c26b1b25fd55858e8fe6f33a642e34a1361004c391c8457c016970e488f91933ce79b247aa74f97263b11070501d4b9c4a970d04a4e31de63093c9648314e186fb78cb5b505e2ff04752041b15480a415668cc99388ff390229886729d1341041176f098b1d5bad6b52d381c8c8b5fec339f795a455f4c7c70e75f60e2502633244132572f6f679617786070e7bb3071280fd7f4db6c6e2b326467ae444332d9952197f9951ce201b854ca21ea903ed4318a509230e49ca8528944b2965cdb3252d4b1248dd231d185327111fbfd6514521e1791f2dc16916e1b729bdb865c76c5652ebb2244a3f11841860a50038dc02d228184fa8df8c34e3405464209d350fad52f1e3c441def4728551ca291ca28ad42d2d80d3168152a33d19818fde37291400209a5ad46169bb49a8ac6da898f0f000260bb47ec16f740b9f0500e5582b805d765ffac19c94c311af32cb79bd77d3bb3d67d0d6ae2b8adc75c7ddf72196d36ebbc978d712814afd7475baea2412e631c0a0515a22d6aa5e5322c369b8d8d8d093e444030182c482633c104137cfc002bfbc4b45a505e2f1f3e7cfc00528fdc8cd8f8f1e3079013be1a7304cc0690133c15829c90ea52b8b949a928101b1bd5bcf11e5d96fda26117b31275c05fd5236cadb57642402c0adbf128a020ea808fc20ac7c655323f77bcfcd420387168abca8c31229148445270a4325a85be5c4583688b71281434465fd40a7dd128688bc672882b6a2bc0d09c95ce29fe14754297ff4f64a1415138125d5e2ca904135363a6f37155e949394a24120d8e1c3c348efec9cea51c21d0d4a85163e6c71b3319cb64b59a0e1da20e7d1d6f63566deccd28271b8d723cca8d681d9d213a117b5e33981f4d30e3389222cc00c11806438ebcd7fc1a2f2fee6126a3f0c78c3bbf0213a77ffa56a4553d736171e72360e2b4ebc85cb9fc9cbfd3c25c819c73e2846b5bb6d08807e0cbfc34a28ed5318ea86880b6967cb1fff2a5915d2fead89717a3b0cfa87f4661ffdc7e771189ed1a853370c4b976c2b976b2a556db0223c20e991d319da25f27e24142c7fc20585f04d6eee331028c0822883adeef1895cc290b2e01bd7134d4a24e1d0d893c5878f0e031c29c2bd287ebd9ebf5cae57055c85e992db4aa6d7d9b314e7bf16deb2ced450fbd7acbed99f264674e00780140005c02d043ece14d363edc3cf567c654f5cbd1b8fdb5061d7eb85ddde6ddafb64d54bf8d1e3d449dee7b3c6ad4d566db5e2f20adea979f027039c735dd3de39ad7fdf733ccce7be55eafdc0addfdc8516ea2fa459ed1d0cc552293e61ae38cce9835fda213eb62cf088d89eae7da2857420925d4d8888cd860b0d5d4d4d89800eecc84d8d8d898e0a3ded8d8d830c104137cfcf8a298be9c0d1f3fbc1f403a2027d01352a3b6798b719b049b281e46b954d3fae56ef3cde846d4a97f23ea44694ad28e562fa752893af455220b3d51583990ab4a4f7ad387eb17d77c7796c027812008de1a4b979da54f2fc6692f664d7f6ff962a2ba6d968ab6e51057c8713f1f1e2c3fab6f72848083e6860d530e9739519452caa3802bd6da6aa24d9b3f6ba2d8237830128201891ac0cc216ef94069014c124fd460205e5c97bf5f0dbb72590dd4545cbaca7d795607f54c0928f7e55fde45d47949f9b82e2f8a3a2e7476c524aeb8e28f441d9135bae116374c82892028b51056f002b55cf0aba80356201778c19353ebe87e59dcefab1648d060a108918527c7f30670bb9f473ca1ea2d2c98018d871573eb932e9ed83067eabb5d4abb21ac8629bc251ee899c2678827b6c801085b60a1d5b480a5a4c4aeeb26195a7cf06304142018a28a50773b100401423ac2dae430dbdec1eda75d788395167387db7541467c71bb5711b713add81d2c84c2ed6ad7bdcd2d81e20a1a9a6431c60f4c8491d37dd92987c1eddea653dd03b95161a7aae036edaba2da6aaab4c64f1618283c323f3f68524a821653440d3d9094b0c114e1811e1b0557024193175aa5b8a4bc48364a1317265a16a8899884cf0b143b8c624c517c5cea14ad10099a58c40b442c7c711b1551d3018c02147ef1438df569caa6ccfa34afc854440f78f8a4e4402639784a541e6ce8a0d0be1f6e80021b020729b3570f0b64d8a634518227080425c861899935c1a2c82ecbfed721a8408b10b4681242931c408450023474d89e0812082df15a8490c65bc9b185871c523c151f031c58fc14386eaf448e30706002440e2e6c74d8d024cb657dab091affc47634b8c0926ed0b1c5ca08a1c30cfb32250421ac8d220426acd091021b7367500e34d021440e5a7484c0bad410021176e4430833b0b5881c3eb0a5246860454fc0ec480a2e2cbde26fb05e163b58172e72046161ee0cd2010c1d3fb6de19a443ae89a53cbf85a54c748cc0bab460967467504f10f781b0f6cea09e283e58f1cea09e2774dcece8cea09ea020ac7767500f4d8528b199254889313c6807105c7a679012627c4294b6a9cb57c1d4d4f4ff6fada983e0b0e110e64398b1d49130413f3e23985af303b4ebfddf9032a1b61e9fdbcf23dc7e864d15133188082fae6ac636837eb0dce69c19d444d4511b6858efbdc5efd2b7a906a5ffa8e46c173741ead08c00004010006315000028100a060482e17838d2d3c0d70714800f6ca644725618cb046a10c3384c1965083140000000000088ccccb601aa72704a7be73df01bcfa6356de84462478a11e119a7e9066a5caa7a5e3ee6eb4058900eb35831fe8fc1708a81bd2d302ec422cdb06495114692760254d93a7fabc0dbd7fd05277337c406f0e5935eb608bdbbd9ff6f635c7286539cd3ee5598fd284380df2309116a13cd93106991e2aade18b60e90c93c090fe2ea4a2dac1081b1ed0e897593c09c4d8458ea4a93ae8858075b6a5c22318b1b108be704c5fe125762914362a5fc19fddcc034910d28c5c6d9c79a78f076ed65abf9b10b34d6724122c7213767e180192309eb27f8ddb40680bcdd7c80933d91953c09f72f217484cd9b4be890af9527d8d9863929356d16ea045a1f9f3e6e0a481e3b9943573408b7c642215c78ee8e8816843abd00ce76f0683643cf339fa89bd8353bd6aa95aaf045a1a42b024f29eb2e55ee699867ce8aa7087c081be6c8c24d5866406d993cc4f974c3714a0f2a5b811204ef1c8112645fbfaf4f011791ae515254fac5472bf63b8410e9c09a06860ffd47812a376d3ab3c4c363086cd04f5871834affa7c5502c7586a614ca72b05584179268307cec07d5e742ec3bf81c85377f6d11f74a89b8f1763818b0e99430c4a8e0573a7a17ee2442319bcffa10ea049ae1946a016708fe78fa57366160da98a00e42f734c4c6b00a5b0711740b132adcc7d59b595e72045ce02c8572bb59674a14478369574a1155e8c8ea95a75774f44c76e5d4adba2ee3f486d7a6995961f73abe303a6225420bb119a3a2e1eaec84db3661646bc7df021285aab8d8fbb04978c9cd56f538a7862ce98cf8ebb900b0108055f3b6adfaeb85adbcdbf5bdabcfc87a2d518de55fbf8025a2af3e05a707868ddfa43b8f939e68f2e9f71ed930b16828961a5abcde44f736c4f33e85e34a86486c121db93306720431070ec8e2a51091d3f8698d9cc61e5f145509ef9e81d45b543aaec46e1cc512fb23b0b6df32209767bd9a82cedc74efb22b6e0601f9b059ac97fa28df0040edb05b9089c9c94368a48a06eb0c8b869873526fd58e182927f510ac32202a7b48e6f911902db39893b38659e41f16f7c3a8d4f076c15895dd4db32aa55c7ee35b7f28787d97acf7d34d36bd12701e2c577825b081b88fc53fde755294eb7f65f0ca1d11f5f2a8c44fc77b0f7c7af19475b637bdb219f4ce50e9f7466bbeb831a913185b28e426220c3a59ad4bd8d2beeec4c425a03c814595942c4a65732633d373094e3fd3b6646e1bde7d09cf3e858d7331b1488a124ea4be1ad6cd823f702d9314f5ec6e48f02d91c3cfd5c7385a82c880ba3ec4975101a0707c1787a1499eac4629f68fde0556fa5ed80a9cca1902bd5178ff8e0f89e59178a2568a27105d2c259b658a19e3b4dddff7c25df4b3518c9cbc145f744ee8bde8b6a4f86bb4c9f7cba8ea89f30aa2be4a142f157cb8dde2dd16da4e23ebab9a3c6926a0c7700696d0ab6fba901e617e653af689ac338e8a1bc6b80722b0d218087a5b4b8f2e541a06d91c1b356c0b30649b4562c645707bf1a5132d714259c2779b25d8fd22c8368bdd1dc4281a4f2e3644c5688a8fa5815a125716f12ddfa1cdd6b81fd5c36669dcca258e812738d08a6ea25bdfc8e42c5bd4beb3364b87b4412a5ebbe3e84f8cf0a053a70c29431f88a6958fd30f1a508b93b7be495fedd86fb3c8d7712ff35d280974a53618c8ce7880fc58ad247c6dffc8b1451d7e77993eacd15a8422a0b70747ece9a52276c667ff2c782a11c4336a1e3c7db6f5297371ba8dae77e1b19487e7da342f08742b20d52c0432602a752e8f9dad9988f83a73133bdc37d8d03f7805b3eed9d6d79d17f2d170e61e3324c4c8aa42f3b22fb1436b8ad535e2cb87479e2fc7ea39ab3ef4ae987b3691591bbaed06ec7b31a6d45f4aae0bfaa532f646c4abbc1be3d7724527fcc81b448f1d3eab3407015dfb15d06dc9b089f94a184176c7c9cc88ffce64e8113b7f834b99a52cb02fe40887359c1b4b472d367c6f78646b93a4c0b4faaa10d30946b48cb30852ac59e278c8e1fa5a88a6bc6f701925af2d0805f1182fb49faf9f74f010a6759620628733e1bd143d6f0cf94a2dc435f68e808f843d5313502a957e6f0fb544a0a5bda74cfda508352917132ddd3dd25fae90e21f0175ee167824072cdf453dd43df307c7820088e26700d568a88034dd3bd2978841a59404ba2bfeb76b09159d2cc5d5143f902244d0cd16d17083ef00b1e7deafc5cce7cf49b6c381033cf4814a79f8f548728017203c78f401f219ad18450852d0204bbc0481a07291745c726da4c26695df5516416003548a2980f8a9b3eb23e4750de15df96d8474c7eed05da1c58d066e2dc157e980b4d3717dc44764a0c70645b26b99db484a19c356cd56c9472ffd035ff3e893def732e9cc9f09b34e5f7d37216a17f959347a7891b79aebacde267f2f927eb318808bf2d5ee69a29782782272d0c0b3f0925e9f78bb119d9a62241445304cfb6ce2e8ba537be601001d105a10516b8824d53b7fe6351e3d30cec765de62e2a186bc3e4614a50f9037ad7604ecb94ad49bf95ee612d941f76b9128edd265e69ff40f3c092680dadcf4f6a9e6ea8bd1bda422c30d79e7cd5d516e11f6265851341cd33b0e3ba67c2f6c03622b745bfe1a394743d849fbd0ed63139df1ac02b160230814dcb2d57cacf5aa1e29c3d16013b25e28cec74c2d7829bf1652e20d1b9be6be28aa027b2af093e6eb3daa721ae19ea346cc325b32242696c2be6b6c9e632380cf9fe935ff358a58100a3be33e4d80e44d965f1bd0f0c3dcc3d1e0cebbec5f169e26bca753cb3a8c83ad0ebd2817b8a9debcf22e0327121437a70a80af910f3772c103796e5023f784802234581e5601576ef1fd8b80016b511478050a6947544953c6532b248a4e661cdededec4f54067f13e4e14496ed96978a514655743865ffa37617be256ddf6447bc3ab02d073a0d28f0f468514428aa9a761e669671c2d0a25cfb11f448344119f3a716a2f3ae863acbd68297c23aac0416f57291bea68895c7b66fcf8073d3047fef06cc58b6a62e88d91e6e9fe4d7e17f31359f20d366c7a066525412dcaf09b9968448aec0ebc367386077112eda944f509b16bb69a88611d5de0cb5ecb8fa06e2626ac1fdc5f1c51634c0b47c051fedc8a359371fefd8f5edd4b99628b033e63c16cd74b2e9c0e151b559e626d6b02d7125eb8453e7a4ca99c4838c98331abe0adb1c6d7dc3cd7d83997a0b0c6d1ea40abc3f0a670e7540f0b732e4071c3d20526b6938b83f21073110d8a9c985da628d5fe8203a7bddd2f25544904a0694b9855b004019d78d1cc61ef0977394850a6e98d84b199d67770b7c40bc028f781dc72f22242fb00ae655ccc192bfc4fa417681e09941b5a7a730d5b0a42c3dc83bb8d7439d6c85485082018355d61e6ce336ead30890a8cb5a8a5c62412a30db7eb8144c5ba2aadd3fd48bd5c2a07db1f6caf9cb91a846564a39e27c1887109bf050b92b367a92b1afdb04d589f9749c340666888a16731fe38fe2558d058ec54327e55928e8daffe40e07cb7239e7ea3619f8f39521905cd136151da11e000bebf88a0e0770a88632079d82cea422b1213eddac7b6efab0dd2c54934e2b16f085c60803633573bfd53a3c8d98e88cef89b539a9fc78cb946dd6acc3b5c7fa4153fd59ddcb9b8376180951e1fca00666557498e068c9e71677092c2938ef22821e0e569555dcb0bbf500de7f083f7a5f46a7af5a62c444b23744ed19386ef2b62899760688e3ea07be4312b523aec3e90242d7f442be2b208ca80c17a84a8b99e01d646276f0f1273deb634ba694e292153c77dfd9d7d4a3ee62d7d593e488de572119977782060d24e1b5f44400f23af008cc90cd9b465024baefe4401c74b1acf6d580216bd912c95b263cafd7aed579a13e5ae27e135432afc6b090108dbf5a68b7d4fa589b14968a5a18cae59d5ff42f646211f214a070a989d2c2a93f0051202aec5edb4d5c11a87d24cf0714d6a6b92e6c2b3e60286e494ee3f755a710291697cd2b51b5eb945b3fe6c2ca2907390fe67c9f4845c397f2f0dc429da0a2f12fe37817b581527f3b8b6d079d3ff98c89448ec595d93a43a1a8d6e52051891a3ad1796123251cfe51e5c884a4cc63dad95d7f31d80a2f891f0503b414341f6f02ae315e17418992afed80040b3826ee7bf3a41a54e95ca1abf4fa5a465c2664ec4d421ed1c9dad8419e4506cfb0fe351c1fe0e0574a52ab3956eed34185aab1af379a6f94c127a21a39f0e1dfe0067a9b2893f50e4d20726177df5c4d1d3feefc918ba273190e26ef73d28148f3a0d69c9538b79e8a64300be6835bb67bcb27f67fb396e7820e14c532faa46f515b50ca50d3ec26d82903106904d1d356af3f505ba9f360bfd09ab1c29619811921cb48294017c37ce72e756153402e20f35fe7707fc0be6960e812c19c2b819e66b8846ae9230ed90e4c02a762c2264f640b4be5a848e0ef9746bddc108bc4ed4dcfceab1c5230582c3efc98f97b625d49c31d028e1ad17c654d727df10c5e454c961fb1cd376c0757ea4bc37346d984ce2fa5094a7f1ffea4fc5a0748e0ddd21fe31bf23aa526b3a12e3071fc86de3e1d5ccf09ebbb8c559d8e7583f2b98ab7e2070ddc44b6516b68406673babded21b5163df4515d45852d6690611374e5a817491cf5df1389057cd82a134a34de18f1c609f657cc58b32778e5944ccded57b6c149371d1074c38de42dcedb3a83307862329daad345f9934110fc62b027b021af054a9dbab52ae8f526f9f6a6ea1515237416683b470f4b828d20eb75a73d1b33f4143b2f1ae458eea97de47418f9c2cfe366d8a214fbe43eb22bfde41e044b6854fe583e6f6efa76d2e3ba04dfb1aa9bf54c0364f0b8a89689232c9df4388c736bee0552d8d02b810e25b3a979f9b1ac4a1fa3952ff2f06191e0107d9ad0f7d258061690c3ff72aee53ed62be02322708ccd76b369bd6493aacc9e5b84c3195d8718a8b9b16d6645ac423982785d1c5ee94acd632321f4c7c9c1c1ad0eb3f27d2c16ac3a89100e28f3c353c740a655f25adca50271afba488873afe2ad0109d56b16297eef1e39765a56aeee9b53d28c6f6d3ffee594ea8ee300a5c61466e1029ad70d00b5ac90778838e42f215439d39f035079f2cd84bcedef4778aa1f1dda2daace70367564c463af545559e8046fa77486cceb2217df07e761435f3cacc5cc7bc99bfe8c6b92134a1fa88c09a1be6c92c07c2fe57a20fd7ab8142839d024612f1db5f9fbef46f01989649c3a15d3582d6bf8f4772e6120083e4cb5872c94d77ecee4f1be3b6cd77e0637c7864aedd20b2e6e203fb15c627c027cd7393e2f03072c5e0a93c5586e73299a2019d6652f3564e9eb343312c7ac2fcca460d0f8ddaa14c28d6593d19ed94f4e51d8ef6ca108a9f0775e6ff6518ef9dfa5e02e506e9b88c38278697b38ae02ef9bbd1d51fb4f9f59e3a3892c3f575ff6fc04ae44988e24f4ef4e377815713dad4fa8f897855a63c732bb32f6d525ced1f43ee04206e6b4985264b48ba6826fcd8812a836ccb2c38318be8c3a80e0f08b242a341c3d3bc30641d4ae042a491c919bf9aade8ccbca3e1a8bec4f0d3c495fd3302b2bab3ac1150d317e41e2a551dbd8dc8e824c6acf83a14fd90bf7b961375ef7346c06567369525e20d86dcb8ea1a259d086a5ebb50a46c3cc5a55299f5d8c10e8b5981773d976eba7feef35190e24e371bcbca94c9fbfd24dbc4f5f28ba1a4ed1068cf905bc543f907b39649d4c7c84acf64f8efc7b0b4ed776c4b3309b448e0c30c5830a07d8997127ca96687d8f1dd2bbaba9312fffd7183e9ff61cd2a059fc5bdf4cd08423cc7fb8c61f8395b84fdddaa82b2688e6cfcdf5b3118d9699d92ce66118db15828d1176eb63ea6ab3cab35652e1faa603e13a0d9c4617d7d37a87f3d66b351e76e734e3abc786a6a380cd0030d8e5c4c428a532ad44e3e78b7115289145e33f64ef77610fac3807bb2a89a16f7cbfc203e43eaf9d262c91526b7127fb81f57d862b4aef3b5dcf2b1ee7296cfb60fb5ed9fc2daf510a41c6324c301760479eda827f2e8140331f91758fb44d24741b3ebff3ee5265e8d4e85ace09b29b74f9cf4a40cb14ed60e1a729f886a92e79857957780a2d00d50bb984418413be3d711a527f230b1a4427c158076bbd1c7cbeff96d4c7d7911f4ccc192bf5bd9dd6b2cc5620f67866104edd63c7b221949ccca46ad929130645e06d5231b978a1917278829ab14781d04a2a94b131b623ed606b690aa2158316ab1480e744e6f34d1866246fb9884a4593ed0b37843e9a10f342748faf4c788361de7ec4822b53d60a77571567b439759d59bc8a97d2262638f2aa5e398d9d1548126442873f7fe3a43a4c733043d1a3e7d0345c04e1dabec300b2738847a998e82386f5e57b7a26965564e9ff0640c2fee4f31e125c2895a493de5ca4d2200846b00d5d127c40d00f379e7ff6302542daa803ab7dce132133833b300b9df186f05d5af10f9d9d70175879c14d1ac4b247a571fc62e2a208ea05dfe7037b187d56a0e660c047a69c3ea036c842a9ba0eb24a5433550769b8ae0808bf0a5354e01118ab54612d9cbb8050f6a4b5a84d53bcc06d147864497ed3582823a055a26ea6e58dda4590261590a20b77caa2b9d1e617d3f302a3749a642234e5288add5e94ad14f155e9e95c5a114def32a293205693ef0747260d7bc244538ae11ac2281c39225ef2e39f880d62bf5b965b5ac9294d428e6b15b34b1f888e72df463638dec7a11b5edf9762c95bcb780ce57e79d75040a08500ee96f3b3c4c31ac70ab19117e60e5a8c59d4c6b4704e420d26f2b6a83fca05c3fb91e0a6e3706fc46780421973198d6f9fe2b3e648aca499ad9de3926136289c3830ca0672a0923e0b6c8cd54c2ef125d48388c66faea2fd098dc287f89e14f33e945241c106a539a00ab22ec6266bd28852322b979e50f3d8b765d0fb0d460cbe8b1feb85477466b82f12ec251b733919bae22ec8af82566ae93a512e4d03ccc0aa7af77f24c14f9591d95ad66544b8138b3de9c2280a928bb8599de023fb377c9ac91089ab10cd4975e08389c00de5f1109f58567a8a63a75ab8802e4c157bf631f9859a532398ab5925d44c2813bbd193af9d051d5333810336766273a06d4dc2359a152c7201e6ed1560ca13178e7066a9144b446f7e91704d08d91aebffe12fd7ee02a5bee09ddda19c60b41ee2e4caec35b015b70d57c17808283515a80c02f5610ca10233f05527c812f86608abc5603351bb12f7bf4e8df47d2fce8957b97d9ac3586b2cec96b5457721df2206015c45aa700e5fa8a40b5a55ab488a0f538b55a4f6680b5aba0b92f3f1fc57e0903cdea88d1a4b72cd1f3f4612b11e0597a2b66c577d33464e246407746dfe606ac4ac8e834664ccaeb28ecc203b9b38b7ef8af1472cd5d1932d1843354b70a710a7a6f9c30a934275642982722561df2c392fc01a4f6887859609e4175f6daf58d05b9a86ae9353b5d8f6b7340f7c9dd3dea7a41420db2739405621c0eaa2c918191d6ba69632cc7f6cd770485d3b3619809af248b5e869fac7757d62e1970ed972d9d9f5fcce478c04b30ccbf2a1e337816694a01a9734b33c456852c5dfaaaa5161dc72cccb4e9b52604cf6d10669e888caeb51944dfb15d94f4b6dcccc95a9a68b1bc779a34ff2fb3e6138fac32bc35ddc6a06181dac9733465867f506427ae8b8442f88a2117181d4a2b5fa07cfbb18fa6569d6ab6c42b38dc79400dd23d01b6d19536d2fa62297666aa9fc15c1d9aa54db68b0d3ca0fbe8590b87cf684939ba02579cb6ee248991329fa1dc0017c631f2563ab38750535df86865b0575ae15ddeeea0bb3c4899485b6c49db5c369ea1fc6d9388d40fb564ff6f599777f4c097d33aae2f953abf979962cb4b9af8dce33d2ef4ee5f0e66a754f747a4d5130da69c3e85b6f421a56043fb7dd828393d7532f6b0c67d6611d962f2d12b490b3a0f01d0e5cda3e4e06b3babdd9aab19003c627f635bfb71080d919e24a198bf3dfc9bc03d33feab9e8efbc934306f1320bb26923d39e5320a23949c82816f1078481bf17b3973c8013f919c823018507fc0cca88cb08c7cee87a6af1ad0c884d6972b7fc2a014df3d120056165c0452c1ef76420fe7ea13581623221cce527e911deef5206e5de4e7268bbdb866b0995110a3a7eea2b8f9d0695db87ce09fad2b05360927bd53c53cf617dab87aea5898772d90b9ace349405a2c108774d699d38dcf4fc4179a16d4cb8358084f313afb5f5a62962fa8a7bfb188a87985119479dad34fb3c9381901bcd019cd7540f41269bae1840b99557f013c56e1a5906ccdb6127e215ccb07fc120080433961065ceaa84b04d93ce13154b1e9a8361fa18300326feee0bc9663fc1907582c937386fa2d5b55e514dc8774f2ccb16e6843cdee94df1a4a227e94e5b794b348112a400ae2fb5f600a60e6f362c714f047e788a022ef3f4747fefe7664cd7963134e8909147e6fc62aeb47a6d007213d3e385e3eb8feea1b0c5e3a3c90f44423e88386713bfb3159fa0285cf13eaf4889b666cef2b46466757af06a11c0b27d45ec52a81c4e6977772e4a515ec5854e8a560c141bcdb2d6c7172e0ac89a6dfba4bb128378fd41e7224bfe4da57b2e314cf8f5249ab263892931c042d13a127231e6f23423c48ea1a6244dcc18456cccb7874cba73e65e7eff957a82883b98ed4d69426c5508867305ec0412e21f78578a2eb108f5a9ff1cfff991715eacbd79913c2ac6bea13a35019bf6398854e65143cfd2b919245c704e2194246173481b7a1c57aeed024081e6aecb102e8648c21b91dfcf300eac91a6bcbebae4f3fff571e3514a761b270d2dbebf30803eef6407eb2d6f8840be90401f949b55f570792aa6b17c94325d0ba67c7442a31a6f99e8575cc5756ae3f9a1e996563dffc495eabd9aa28c9a815ae316adb49cd329eca9896d6b7c7a3ea40c5dc028cfa8810b31ae9d645953f3d60824590beb360c320630ba30c601101e700a67f8363b12ff09286e9cf01b6cf213b4898203c657e441f8240cbf4d2075e04d201ebf713b5fd58bbc86dd963bb336c7dfa946b7cb35dfb52196e142dac014611efdb1aacc4682c2cf76c855fc487cf8a851f0c61ff064099adfb166e4504f618d0a55f1a521c2b77686a5acd799e07f7623ff656659ef5e63e470c317ffb0f3000ac3905a8468d4124a08637bd187f41821749c7790f1f58f3d6c20026238e3886347cc69aedb6a1a08bf844158b31cf69d4900e449831cf17ffaf7f4424c298676559b1bb14b4c90dd9413ec2274f585656a52e31e5288b2e474b48c05081dac382cbbae7aca55e80bc01be1b63cdb08ac8306118f385d95f65d52d98a99d40928e757abd84724af74f2f058e06ff00faac44c72eff978a014aa9666cea8282a5044ddc459b981b2e4ce59aa15b2a92a58a4b757daf6851e77d8d35b085869aa7fe00d3426dab06f11c07e2146551f01da24a8b70fa10b0c860b9639bf46efc3cd1339ab78b58eb33df7ea7942e5219354708b49aa93191c349d226fdb0e350acc8d521603b8440bef2457e0351602923811a405c485608909ee31a4a16c8579b02238a08b85832db9db7fc92889192a42f54fe46b91b4d6bb03a02b8d3d806c2fdc12f57604b2dcfbb5a80c98242579e846506908137d19193cf0084c549c9341124be119389f23283fc2e6bc49688cd2bed9d46cb44fb45dd0acacce0067b6525aa00716351e0c74b9bad5349baaf82370e102234089d2f72ba9eb66171077c1c7eb7a6890cda91a29fcdef3a44304d5651db1c92a87f3428e271ce36699e0d5b82437a8f8e62b9d7481cd983161907d1dac2d55317407f1b63f80f59b4553fe2dd7355cfc2dd53c8028e5543983fc2dcafb0dbef58613823650d1e6b461da0fb5503463e955618492019c6208fe8d6fe201e4f89c0e1ce2ee69288f375569d016b947ba089a8af79f435d51bc5aa7b0dd1f5064869af2fd9510dea90442dd617d4bd2a4017a82138136da7b5e0652ee0887492a18d437fbd8a2a91a02ae0eafb783005a30ab81ebb04815de4650feeb023558046d2889aa1c5975bb3c943b301477fcf2fb0d4e85c335e8885ecc11611914d66e4eab8466a7a1937c2d8431555a9c0f9808c7b5ae879b743c3e28489a2515b74871ec1ed5e86d2f08b4e1275d4b13b8340db2d94ca532d8163a4a71657cf589ed10913ead31a4e5548553f23f432d9494deb5cf48dce1a778a8dd17c49345c898b355f03c7d4543c75e46c9eeaa812a995b7d8c74fca8f597be2cdc174d1cbb21e2e371eb0c091a0dd93eda8f4c999a65f487bd6da1ed2a2d0415b1b9008e97fe6a3ee0c6d79b06aa0aeaf678e2c095dcfe181ca381e4cea8cb93a974c97b2a50e2c4b2bdb7d76f744111bd193e598865a5fbdcf5b656d13a0b8f709727cba236e5a22d2399ace88311882c56a1472a2030c0914779f43624a917431072a2130416eeace4fac6adbb14a8775ced95fc69e0ad0e403f81599750962905fb4af55f33f2473cced57fb3ab14531e5e3f63ee4d8f1a951efe5925100b86b5fa4098d45834f59ce2d372fe4f6078958ef91fe09046b02c4fe57c04eb72af3fd0d8c0e155ab854c80ed5e1f83daef167e987c0fd35c309f639531ff6f3b40af1fd4d187ef52d3d3f861c7c77fb965864804e863395fd7baca1122e3a417a714b1abae7d82f3fea339894e19f8fd9598df3658267506012ccacd03ded5ce59ccca2da710091e136a6f44dc1aedb86d2730d746ef64e94da0ef435c870c3c59d11c1ebf86e5780fe735141e53d64a2d7f34ac2131e0ca3aa0dadcf33dbd4985021f2ca4948b768c647dc0eba1298a298a39407d110263a6a5689203b7913b7aae4a1a656757b3d38d7d46feee20069cca1623009a7c020ab667473514ac4c3c0d8faae0010dbcb2bcc6e1c051d0fadf8b758a120bee57b9556c0e99cb34df8447a3b16925e772e0a7cbe75f0b4a1557cd2d366b7136f5c2cf1c6f193466daacdf3dc94e38f7bfa53368ff4d22a7bc079c07a4322463c2838951b723c382c0df1382144d600e207139ab73ee8669dabe9a1df15d1a1cdca4c2fe544ca6fbf4017f3a2f34fc6a19a1d9b2850f29f70367c7cac1b7431d524f7908b52dfb344e9040b2a4f630a094dc72b51a521505771e8672b7003d6d777c1e0148caf9dbd6b1d7045ebbe373730b95a4ec4bf41d7cfb54ca1d4635260bcbda2aa18260957fa23db1a577157db79ca00aa75037bd8c56d093a4e127afc296c5675020385c9948bff70117da9af3a2b054f7fb6ad920637173bfb2a8af07ed05594555baa11021b86e664ec4ccc2d57e8d19d347ea2c5addd40973bc3aa84ee2d8f8fd804e42118221c7eba1f32ef2dd9123eae1db2ef9f2bcaa3377ae7a15604542d4fc3c9f7679bd46361e103867a1597b4d7e10edf880b8e3f00a6af0e8ca8a3fd8aeae4313afc64df21579eaa252b1c21a702702882f1e4983fbba96c565a90d0cd77f0595b5eb34144be271158868492db0e6cd1176ed216a60ed23883129042bdd69ca1cff09dd7ee9cfc676c2d55b836652b2ba906ea367f084d1bd5ccd9b4ec9a146affbbd31b5c8f685babb71a731caae771e2094cf994a8ee8f3e6512e374b647326c07648064d25e1197c67877bf0b4c26b3803100ef328099ae73f4f372511871a2d38c612be08386408830846f0122ce47dbdf40951e0b06c70d92f54a46b15d3eb7d789754d36ce06481caa2b131a3e1c4eccd81dde17812255501c372fd60b182d962eebd98951c47c514a6e922c7824c7c9ebb18d92383165e7f35dcd252e9aeb0691c856d35cfdcf00b61f16e808ad7b1a7fba0d94454a619e5c4609c311119a2bdce5eed864f0e110d5392bba9fc6312c4e4c5d81777a164f3fe61e56c8f1688016a445fe8e076c5c9203397dfa04fa81addfd3d7815dfd4531491bea0a6c6536e2af078bd6ac132d290f98e534c794c5feae5d237218149166a41ced646526c280cd9d6ad8f7c67e4df46b044c1006623b984f9d99d312c1a794ef18713c43bab531c3bf66a2e2914cfe028484529a9c6983e4ca477957e410878d2e4dc0160f85e745aa54f6817e1a3d98b4201c5a048e12079af968f66158f88652237187e5559e583156cc4235beb9487a671e987cc12cd3f61914604433a506edba4e91b43b4241bf9542ba7dd0f41d41d9ebc18712503c497ca7bddfc472da8fc3eb926a16539b57b0042d837662d8fac51052dcf98352836354edf83e6e15531f351f0f442242f1b063b471f56162eed1afe5c6385721f2291466ff06aec63c0fb942f5ce1a25bf23bbbf88ca0233a8bacc7905a9106276ecad26d026c034ea1547b5075cf28f456c3350385ad0c6356d7a8d1d816df228d1fb2e9ae49d76e5255a5742d75c5362047694838fae1084143c5d83493e4a5498af1511f5706cd230aae8bf3d337ff37815395c109e6e58f7a002a8a8efe13e83802b0a54a2d70a899aba18ccc30520eeee78fa766c20020fee69a5db67383986a77eb241aef039e56a6613d6d3ac3e36146ede7820562912bf41e8007f8e4c99544de9f425ae5966bc382efe896416cdc50004720e3d2e8e72bd5db343a87c12779cac7d919bd7ef9f6ae6c23ca112588345aa78fb843f51d58fdbfbb4b04d0415e1b4631a2f446f621d4e8b50cbf6fd84fac199389e785151e6b7a7c4b0ea909cf5113f3f4ec44f5947a69f17f92abc637d0bb1fd09ff688bed586320c5456dfd78b05d12b9f1811ca32caa0535e4d38f7aea48e32d3c9940998057c566431560fab8555f6942329a2b4c07debbc5b1cfbb41f4082ba362a5062dfdd5100a2fed117a37649b40abf3bfdd793ffdb6849897823dab86fd98e0c089c31394ad2882248e9aeca84244c42911a6909c2592bc0ea313cb1e5fc4c7c0b2fc31229ce0771d0080084e317e7ea53bb21401a1a5bfb044b7075fd4541d1c66d4e3393c7420458796a00ca22ae7c9c27d7134e91ea79020602cc4d59121235fa1266e47d6d45ef14f052682f83d64cc7098a8473202498f88530679cb864a0cbb940a3398b981c27a04a417049ab4dcab9577d5927a7c70a63859163768f28383265754d36141ba79be93b9f88f2053ccef8af3321c54338cbfb25f2cbb888f065e40c4fc4aeaea43553e721627512d91bb5b32e350c988f8da08c9b26e1b577c40794f84707dc31eba941606f903ae7055ad72568ceb84fb25b2728edb32d1829ab00562c305997674a588c9ca779fa9aeaf1b2853710bd061ded76cceaeaceb30d14297a03c33de6100e6368f7c8ffdf3c0661e7a75063da58bc180fe5282ab1349953dd2f4cc65e9101b7784c4a2d8bc4967ece49dac708b5bda88f0880273ca00388e6562ee01655d7eed760d32a9f647641d1b5434a2cc211d76d50bca64b13c54051bc70bcb173a0ede1a108373d43be01929d14c192488e7d11a0f1b88eabe3b29def7687f6834f39f7a7aee56ff31dd50937b7161f133893ffe3e98f8726ce6064b206b1e447000f790bec58aea556a22236b83e499774e105af1ea30aa1545288f1d3da16c3e1effe926851bc2fdf203acffd56c13c1e4e824fc4bd16ab98bf00ad424a0ca591005ac96ea9c0740302b8791c5d0e91e77916722c3c0b6ebe1d6c1ab48ca6860ee6cf65d400f6a3432b8f074e340a092e482828ac2ac0937197913ba0ad902a840ab90fa2bab91048a608d29a6672e5e91756a43ada69f90d02f3dc21c3d70f4495619cfedbf9336e715853461f0d298a4f9f8d1fc390596c22d8d2ae0e4650461b1e4ec377e4c583d88b144b45ead3976808bd7429ad01c4a3959d640b87ab6782c9a92b5ac09b9b330a73583126106a706678de101a331a5f225059cfcb6befd0956ede7c137ba832cf8d05f20ee35320869b8c257323f0f68861e28220eb467f183e479365b0dbe39a046d8ed54a59a2f1fef79205af2dbd92fb1e606dc53390102625c80d8080180c71ed31ba3487832cfc17cdab726114bde6c4930553ebb70b9d0b36af9b84fdf8f8ec283363319881903b1e6a1c3288e2c6cdb3fc7a837e8637f6095f90772a9128963abb5c782442b15032e3c80a431078e19e58c2e8295c67e073d9eb170627412601cbb096d20c4950ce669b28826bb4f693ec27e7da3d3e91f83a5cd8e4251a0c1ced026e23903312f8f64fb9ed2cad1555b44466e1dcd878b02b2787164b52854e806498a06894b7508a7998a2ac479cd156885c9b27155d41245ce4d671866812a120399fc2181634ecc8252a59324d1257311b0e90b9a51a09f1a6e95ff6221d119b398eebfaee0e5530a005549634c83f611ba2a6b32d3ed1fee452f55d1c34910a057605514fa3d9580555c8e22335e8b833b85a5942a34c55dcfe63b7a170f48c1de0c5e7e573dba7480d9df55ef1ef7284712a79ad8e4161731e51fc60f95acfbed2a634aeb224beb976f81a880af3b22a04e83669dba299a04b1e607fdd93438557c19ad12ca4df6718006ec2918b5f3e9d423ebd68952f48e047df77176dd752513c5181befb463420515e597cd1a032504017ad29c45cf24bc8f2039ae278ea6961805bf59293a009bf711d5416d4e53be382a7c01cb78c7c557a22817a49afc39647ba9ab1e1d098d50ea539b49953e4829c4b439b6964989033cf33ac29904b35fc33531e5683e7344c6da282d209b4cea3dc19ae82eba688beb51d4a2a3a45aecc499baf09baee71eb92d2125351e92243f4378d7ee4fc5bb2d862673a85a22130d2449492f558eb7b57e446730958789fc91ecc4c125216d66cc0c4651d5aab51338ae9dff5e895f2f886a58a334438a84a86e0e26c56b03481ec57aa88afdca6c802cb928b3ba3251a4151f8eabddd66d9b8512163ad392d53fd4c761e65a213471e0a4e888a32a25a3b1ca7ed61144fa5fe835b00d4013b9666063ad55cbe7f1842b3f0bb8aeb5fad70c442fe90498147fa225ebd5204c4238b3b7af9f6c3c4acf3ea023cc92898ce6db8c1a9a92b7693b800818a8b4d6c89ab603140729fdb6fd404090803934a077e333e549686b5a98bf84d51b42fa1c45b967bdf2c5a97eef2472855d11cd32d85eda65cea0bcecfbd221f5b7835f5dc5032558fa80dfb7c0ced44aa2c5eb74604ad277605ce7b71a2efbafd08cd90c5d0f745c175978cf135e5b7ee23aa90e3bb87435f08a4f76191087667ebb48f1fb602700908b0eec5aed19cc86bb95a77d512910c04c6560cf5f5803d70eda1a83a035300fdc628910c5db01c84f115ed57391a7f96c1af04a79316a6bbd18ad4e3d7342feeced5a61ab4642aa34888e373e1aab704166fe11f83cdcd1549da375ac98b7034b9b2c04993676206d4210c5c9cf07a0db30066e6289d8b95b458bdb2df87f5d15cbf2de026a57156b54c232a18ab40a733f2230f807725705061cc407c6aab867a4eaa3da4467942e21fe12f999390ca5717bf7b58d9c885b543cf3f091c530a9a2e15eb52330a77e600b93903e0e39dd49264c3361f9062803385cd4045c2190e0b87bd80576202e8f5cab23f6a5de1183bebe0c6a6e7dfae95963a62b614cf7b554b3ef7b222049721ac694cb50943f012c1e9d2131d65bf9368c4184a2f33e02c60d157d155c7a9e7d1a9de18c853a7d4cdc8f48f75232a6cd18a6f77ca9b1ef68f4429b613e2ea8168a20cbc06e039269c338a592e0fd1a0401d9524a7a64504ccb12cb693483764689c89e3a5b00ba32a7f40bc41d3e483d1adb06ce98f6be3258afeda74f7b166bcda9f97538d5ba656d1f154f89088a5e5de74bd0a375223a487c7adbbf1f7493b2ae27033019122375b5e1d01236c352e82031d7504d33b99ff19c4941298d92314f21ede3a43cf4e8bf947d0b4bf21638b996bb36b5a6b4f74a283aa9b1767ff531932dd0ebb7fed0562396e5581f20410297997b6e61a6b369c1d6955d937f5a44d8b280b068355a4df229819b739c2a793dfbe74e159d61b44124cb44365c8dcef37f0614090a3283c5f115b3a2750b194a868ce78c8dac53c16cc7823c4ffda7dd883ec15b84db907bb1d6b73b6850870b31a20b368d05bca606e7bc2c66e6c8b46faaa37e70fe98de24950e19bd6bdb4bf3ee1aed822d62b90d36683403ca95339c3538908bb82d706dfa6b33bee0391a9f3846dd57df39c5c87c15e9e3dbc87df31420bf0228ee3f3033db8d4023131e6495582ba8407f45a3c0d42aa5d19ebda2e110234b55134953bf20ad951c6e334be86ba72f504275b4d1391895ae357bd74cfffda96286cda4ef002a962183e3d3f860281a9b4c341f6cdb9eeb343439bb7c4d6f4cf0675d29c636e1d7853103dd2503bbedc586e373b6e5137a272cb341bd4f5c501d5a8f6739101a32587a4063020092109947228e1548232855f866e9f0ddd1e61e295ef2e55df753c33dacda56d0702f50e3b6bc6fa15e6c2d6c3bdbf244049ee51abe33346de630a53e1a05a8a76deefc97f95b4f9b4df377399a4439be42639562fdbe6474bd3f7a1a257dc6ef8fb9e7845fde71aa1bfee328c8deac69dfe8be18c6e78d5c900824ae93af95fe1654f775a87050d9716678d811e535593d7db17ac0b220a87a6d09d6397749c19091a341591007200bf2af4dca5b869dec8fd83c0bd8d26b68b238cc479cb6949cc2e0f04378db0ec763ed3d7cc07086afd25418ba8f0a861b198b575f53a46406607537b0579fa557bf17439932515fac444519c7b4770ad31e5b53a625c2cfab032538ac14e5e1f052e0e3e6448eb70a665fd66b7d8fffe04d314d034057e1966bf97c89db656513c3a8c35ad4d126f0322c6d919ae63d6579f62bc5cf1ea1455c4c6b014d32b099c7a975327a495ced53a6127fd6e049c6a1742e270fa7d78010b7b3c25977c998742528dd34b46009f75e3f6fab154fed8dafca05d74fbc9443584a156313e8cdf9391b1371383a21db22ef20eb6a7c3365a9b715b8fbd50390338ac2ad69bb02ab50c987583722b03f019831d9effb06a910655be2cacfcea83232d2bed421e3fca5aa99df350212000e46175d7440a872134c9c564a73bec0657d96bfa5e4b75b0afef5ddfc380a066d72f78155888d88fe6e1f16f408faa51996ae98364e0bc383cf6ff2521c6c77c283decd1ca6f3470c379241f6d8f24ec83b98ed8c0a0ebf3f3f6c8407a51defca174c5039e2cb8af3a31c3e5f8e163ba35536520a51c68d20640144464b3920b2002423bfffe7e3928414b2e8166b598147dd9b7881ff33c8e2c91f5e9696c870ef2ec2943097dec3b2300fe27259ee7d346816879c5a266a48478bfcb27dda4d65114ad205c4f92120f8e71dc71393a629df3674c733e8d8329f2a0261ac3687a75048059e2e8bab4b509dd39935c40200eb0cc7aa4f63e6773a074bbb85e010640146291c63e9b000630d48a9898eeb1093a1aa7d52846c7c0728b45b56e511b8dbe3f3eed2d1b29a3f8e709973d18c4abf3b0bbe590c70aba0c516a19fe3da9456fdd20c7d7cac258bde6e2d69f6d56c8befffa48f5187a521eec823b34865dd9530069d3a211c1cd56a905d105eabafaeeb0e536a201002491bb2054a7bc5f5d5ba92f8646d858bdafbbabce8c80a0ff04a0302bb301db914e4c3c6086700c891081d8a4546658c00b478f81772678ee370ec048b3eb20bf8cab7e25f3a11140b0f7a513bb89e2db36a0b0ddefb06801036a8687d8325869f9f3a1de40a640e106bf546df426722240defb1ef9a1f3a1a81e8c063f01b10c09edd405705024ce11b00f75264198f5bd6521c68ae6d32c1e3003417ab947e334507a35a12cef9e9003c84ebf7b9ea80270bb75679303ea1fc3f640283781bb2bd8ddedd466f791b101446f7a26fa5aa5b41a5e8ad94b0e2aa829301f94898df6f02d00b1a0cfa779e9530ff8dcb1eeaa48dff708de59c2f673c8710b33fffde679bbe8c25e13a963170128edd7562c6d5992977efe8b6e3fdaf721ac2e31e483d57b0468a6c0f32acb620c0f05f860d0105b08991982daa095961083d6ee2d0d8341ddaacb01511214d0513248c0a61fbbc5febc7055cae10064634ea20dc15b1aaf7ff169bf911648934e605ead6fe96e0f45b37cdfd4c8ede7757c3708b38c09deed513798e69316c61b24e41bdc08f734552f33d71175e96a964f90ba07321dc6934061476ae6b5ecc344ed0b629bd93ac7f24f1cf203acbbe59ce9a357e8a171913cbb34cfb01c0f0e7aba1f429854de1e46cf9a00f38ae14741372cbb6317035432e4471043ce6cfc083db11c97945629cfbec2cb9a84b64197526b141c2f57a776325590d1c75d228693cf226dcc5c6dec4161c7035b114c589694612fa15dfe670b2c7029eed947e22bb20752f10ed1d4fc6d54b9ee568adccbaabe58d6c4ba34a0a0f160aa5edd322a3c451b9ad9698feb7b8235ea953241c2f49c0e638e076724ee58d7d2723b347d9c85b941ef2c0114e7364d7c6269e4303d4c9714099b612515069957140fd3456c96888981998cf1ffaa2164e38da326821593cb13a689ddc71ce1bcea26683c949d38fc47deb548c34371c529443b0fd45889bb1e10524e8e65e58fad88e630ad04a92cba11b700dc5fe71acd875a5c18da0b0e7ffb7de7dd7ce93ab696166c57328c933a039abc7b5b37bfd7690d8bf841e578b2252b8046ba54034d725dc2c5145c2a547dbf6a791dfe367e553f06ad19d9318e7b6c14467b816def1e459e6d0bae391823097afe08fb786635cd64f2bc41f0301fce6e8a537247ee3ed73069c34e4cf50223ddd8e7cc0e8d105a0eff8ac1b8d42a7b47035785ddb26d9cd558298123cbd8500d79fce6122f35d4b9ebcafd21ed1be7cd43b7284ef361bcbc51f738356c0418d2f690a314185a11fafe20249599e9e2771034418146e26a425a1bcf9da8fb478fd2f3994ad4aca7e9f08a459960f333eeebd64d6f6f43f0c676eb4b1bd68562cc2061c14360d2cad6c341538267b63258df2eb293f2a509c91625735046e5c11eb822ff2dc67d90ad2bd07c8ef18f5297636f8c17b9fd6393214210207aab183df350555c2c63064c0399253708aa637001fe8316be0cd91a7db685e33fc973eba23af446d24f472b24e5a905a70dba6ac731a0acc5db067b322bb7874a3df1472bb3318bdfd44d62b31f09b1053f3ef0cc0dbd505a321f07d636def6bb59011c5800fab2ac069c40b6dc5940ec8e21f0fdc8fac759ca8fe22586e6d4f975592736d83a02c110837f82ac10109cba4c5a46c397627cdf7605fa93a56ec1ba6c840324f8d9a8859a000d92750160ffb3802955f2f38e7774005d9961b8b6d52ef80ec925910826d165a2437034b38268c965b05c1e6daf879fe4bfa93cd9f0c05a8c18c00812133b52c7613fc599ec7fdc210c0f2957f19b4d6138a2496e852ff25aa460314bb81bcc473187e47527aaea1b41be94e345205a81e444381007f5839446c32d0d80e631e7a787156891b8bc0a6497c897679c1b56ee40616fe08a942f3244a6e1ce09c27b8768df57715d9f0fd02b4076c4d8d5502b674aeceea43786e03bd2980e09090292c1f256e0f53dfb42471c93ca5705695683bdcb57ce65f2548214e854203bbd17f67056af2b8ac78d0768793c53a541869baa30e94c1261bc7ff292a8763cc1ddd385f74505b4bf16a3748540a7dfb1f99545855b49bc926b8b6df56b16ebac9b1c70a8ad26f26b81673059d73e781ba4df93eb211706d7e158b384c5570b8f942ea030c465214eaf3a8748e246f59a80a2c6fdaf63249cdfa3aba944584aa0a42c2d40c67b7e52c82f52a695da4ed534aad6695e4115b4e80cc00ace798178e0591c9c41db18dca67c6e35288626cff21f357228010fdc35a94f7ee6cb822f510d85d948fc006983ae34467f200fc1cc2fcb19142d41086335460440e9a35041cac6bf09d53baed841c5f69323f35ec054820633f1fdda41fa802db21a5e0d2b1ba1180b9f555ce06aae1d3a2a756ac2168d647387d35e211c372f50543e6aadcae49ec3dad59ce979f5f39b35e689b7a13a83eb8b2c6a9e08afc1bd06d434f4cbfc26f3af220bf2bdcc9b784f7d8ad994d5e0a771811e41613133de66cf96646243b25f40a9026cc6937d59a9105261c033c948a3f94712659de4a1544089ca9f900c6dcc6c5ddd38e5828339b0483a5a60fb46e7e09c6a90ef3a133d45fb5d82dba8773c2117a030d4bca1554d05da9b8f5e1671d809e6977c72586ccf000c7108f7f55eae8c5b3b626faea6534037e7ab956b47920f458d3e1256c48abe7899f7136d0901c01aff0331ad1c7027591ee7b1b441f2c4e148f62f836bb9a092b0327954eafce0900275772d011fca4105c2b84a3c8321d2fdd9d8bf21c2c465f1dac959a9f8d2f942670b2deeb555ec83eb3a74f1ccff3b76f09961bc03216b32a4bb3ac8caaddadd3f13d4d556c528d532a4e6c04cce386f3f7c019defe813ae3697557bac6451fae93db102a4b3093d141d07f17747464c0cd959782f31142293e72a84fda47f89a6e3ff14c81c8447489ab29f252b12617439ab466337e81ed3f4b04ebf0b5a11aa6ce4cced076f781e1cc4c263b5496160174beb2e05ae8e2c3e9e28428902221dbe2d174fe9f65367b6b6d78f9a72e4932c60b23481d29d6554902a9c5d9b606ddd71c4a5fdcce8591a12037a51a2c5834184c5c0d782ae459764b6ca755b472af8dfffde7e2559febb9601b5ef951ab0fb237794aac28f16403d6a3e469402f3e07c9938997354f5b55860ffdeef9a438cca205fae3aea09e6d733dbbb6c813b5b058abb56db6768ec135176aa6356003503153d17a15fc3a96e8cc54831a04b3ff3ee0d63fedbaab25727a44fae79ead5122a00275ff50b74f65a0b2af6d9062eb7f18955c4a7256360f4bcbb78c6d7375ef8bd1682d33958a1a098e6d54e40786132e5950015261c209a2def1cc7683e9608b4932b360c4e44354c0fb804f2222958f7db3a779c9109811afc3947dc28716b881edd9fb06424e85ed714c2ae539107255049509a9f2aa88f837af4205e920ea3fbc2301e3f1d3b20ab072ec25b2afd108092104c93dcf828f1b0c155f8501ab78bc2f0432d3b78e632b30971e58b291199402594477e792878f4254c797bfc92585b3098428e959a0d0b83e5569f77c998e7296b82a1b874d3be12327c91654b1ac86c2408a524058f09b90f472dbb0efab1754a5fda91abedb66a85a35d3654a1a9ae9bc14e4385daee5723f487fa2006069fd9b1bd9c954569a0943087d5c021fc4ef3a372a3f7df34d399a5dfddaa62a6554520433721fe47e14f05226da0ad55a8c05c076f9d1a28b4335151cbd7e44bd8611ea48aedfc33861b25efe46c28074caace7864c8018658643919a1abcf12a135c90f0a0486c50411b212a79303a1510e76cf10575e1599a1ba5d1ce29cfcba68fa106990a815f2ee5d3a634c743a1eb494f9bca641a8928f01d6f9e1f73396acad2cc19668ac7e1c8e00af6b28db21362e597d8cafacf7e8444ce86e7ca8666afc8a7e7ecbc22b311c239108c4c9348dbf96870a6224c51b8beecfa5b2f894badbf8329fba9ca8a6f67f53c70ee62e63c8b8403dc32058f4f8177821d9155de350806ed2efebb4a4f002121ae70be5309dba016a59beb1b7cb5f22f082df5283428337178bebf8d4c1708d45efe82acead7b4410a821dca93d99c2d58e18c39af3418c58fdce8333255e142178d2a0c9b48ed86392952ef348be6edae7dd2a3de999cd7f4e059eb068c8ac26a6b96b537fc6ddd82a4f621a154cb5dac85a5e5b8737a1d9ccddcc021cb978261f41e99fa886d7082a152a3fec008c1c568b5e6f1167379aa493a4eedd95f237dd4655412fbdcf97e272c06776c1e0a6dcc416bd39ab7fe04c49cd6544ed39a119b003b23100249aacb40e81e8ca11bb221a3020b01be13e7dfb71b040a7211c2a33ea036ad3ba1022d5dd2271f8780a564072141b13e602892e24b798160d462e6246019c096894eb3343c5985620952794aea9147d4ae302e93238a0161e8976c1b4726f1f7410972d0fb9958851881e9462d4342ddddbf06b8032dd6da9db51fd7caf37046fa27ce1ce3ee2b675b34fc5452fb843711bd38b64c043a0158e48311d8a1dc1cfd2443108faec1c6fc1518596765952457d61e17ef51e7b7442e54b4be66affbdf8721c32cc17ed38f794709210df2789b403ad91a6236382237b4ec126fadab4442eda70413baff178abac6a37b02bf88e3ab76623a027866eede827a20b7a860f843e48960fab2ffb17e77cf3f3820644c98786999cbac367693b3e0ee50894f3df5f872d19f50c96ba4c21f165aed6987875e66b35eb6f6f1b964aea139edf0c3a3124b90978dd6a37460c20d5d2b48a7c48635f8d80a1a012fb71a7fdb527456fb9c2a5dd7dc56c0cd15ceaefd74f402ca8cdd451ac7eedbc70cb5ee3c3612c5f231e3e5aeb1d73d50f921941bdb12679ab2f5d5c37dc9d4b441bb733b7be5d08623934df1ed0519e2de97f4d842aff68445edac99e825c77f1494576347b5271b0b449b50bf4be2562eb20d1711d1d4c4e180f99c3693de6d6bd30746de09860e27bf300fabdca836594270494771da9bdcbe8694d1eeb136b5e488f8cfeac2f272475e530fa43744ff43b295ca95aa05fb52c1612c0bde325785cd4948cfcc3947aee6b36ce8855f1ca34a1b0db8e4d16f409c1b5db58e0b50bb7cac1411abfbc90ed5e958800b2ea2c02f7fa382020afc4cd1292c40c59040796c13602333761314504f37e96c9277fafe2fde60b14aee76260b08002e4305d07ab0559d04412319206b802d5ccd20395c40c2f1e6f9b4b2919610633ecba8a3d0d69f0586e153494fc48828fec3dcec0fea770010fd638b249d0ada780c46288910bade0bf9a3ddf8d216e09b208cef15ecede08181e1e78058846a64adbf75d1b49791ce3f6b04ca5093cc69bcce8fbe3104d5a868a47f466a76368faa1941dffb875dfbd4b204aa49a2ece945b64cd4b859cc36a5d99c7aaedba10a6da992699691af8318421b0ba8271110b0835ccc84e54dcb4f3c94145c0848ce7d60108257133f1a8516f03d4cf59e4f6eb3172240cf072dbdc5bba7c2d1932116fa628cce0ad0c0a62f4e133c445a4012a905dccf23a0e744d4ab986e60ab272de57439c42759afdcc92ee80f128614df0d927f114d9250df34eb4bc385aabb4128be5a3e4cd591a7368b3bd471d4eeeddd0d35bf2358bde422f7638364e5ca314529e56590660863b7668479e649f03dde49e8f1b7b386ae081a274f8fd68f1d642ba45f38f38a8a3935b05c4d7357aafd13ec68d0aa1fdf494bd089688ac9af3bfef2e573179ba511162169f7badcf55a49b8b7dd200ab3f89a276fb95c5ffbc36ee755312187788046c6c9fde95850b14db205c01d128c1a99d69d4c30751eb5eae4864ab59552274c4463a3efa9a12fb7e73515e640a1620f44fea1a64f632a4e1a1c584f7461108fa98b529ba4a9cf5f3c812f8fded36dc6166ecf3226069a6b512e7bd9e08eeb2ee722f0099a23173117b66cc4596580c8139c852601d51fb281562c05f29ff9ce8c05e3ccef0d4418520761a5836b79a9749c59557e704954eeb0282ad9c1130dc2c4dea071354a6deaaf4b4ce54820eb2e76ce44393a398d2ddec646aa12cda8749efa5a40959cdde2eb66c07e0c608082e1b10e7b4b326213cf1bafca22a9111c0bc6d3a508464fa12b7da7cdcd545915be3899d0293b9007342467ccea04dd378e12c8844ebd8b695d95a94993f5b46b785160cf9b4e4c096594a5e3b8e873d0cfd39bc6c0450f781a5311e69eb147bbd12c856c8ade0d2851eb29f0f3ea10b6f7c040250bc2997b6d145611e0cc096b0dda413386e26878d33cb1e0354fb0346f029bcca415686944163d912bc256729fa0f2d748f6c27f383da3e6f0a30329bdefc9168981c8617670f23f0e5634813587aa82721cf1eb100937e79661597fafe6ef3a74910573a03594a0d7e9eb53ad474a98a48c488de1d70f224851688f49486ba2bf33aa14fe3f17f41392dbcdfa55cebdaa07967bc9062448b310c5a161e7aeda517b139be1b777941e5fe7a1e14be3e1fa18302f117e17ac5b13254681dd1862804d49f8670b3062e94c43728859f3f286f4e2e42e7965594ff4f6211d9fa21a0f138869a140e536fbf8d012d60e45c49ccd5cb7cf2e611aa0a670b4e0c4bbaa423305601414415455fe5a0b22b5ae9c3c14f9039c7809dc66a28bc5f26461eaa4796c1cf79ea7ed0e94e7e52a642db77734ef34984f894092ef7c5c2c598404743d962858b8beb3bfa289e56e6107c165a853f49f7634c1995fcd69c2395d6aa6fc4c57d106c5fb9b0d12eba4a22c09bc9b8237a07a5a0cd951947fdac37a4cc6e9a835292a6d489ae91a6ed0756dc2f8db297f144c97591cc8d70da84878dc4bc05794368c6d6c29b2055d5b54cbf7dd28a22942e9480110866664fb30343ceaba86357c26449f12ef38fb18620170003ac7e1809386f296d8b0482cad98c98aa56e98978b32ef4f5e7001bc304675594861a1183c71637a4f12efadd85267ab192197510bb86d49211e9475fb1b85edbf2ca886196f274edf29e9f1ce1a5df5e237905aeaa49e96810a8abf2ba5ae90cec73d8b82dfca35a7bfafedb2c736752390278318c26ef19ee870f7c26c704ec30251668e448496e53218416a22df41a2d5f7114a3b39f4ee4cdfaf8896f96cfcb396971fb20a0a5c5c0e57c3a8386f69b186394c4e5feb4447303293d09fa493be3c3f14802557869de543a7882f9a140c00aa63702edc20290d44a61281e5fbe2afaf4f646b51d79122d81ca2549599fb0b812f5ed5f297e9245b8b7883d24161a5c74e9f3bd04043d1b72d652a75335d191acbb2635ba4a521f8195f15f7ea7c80f73214bd45767129ddbcf0acffeca859f1e405848a0ca4995e9424a717323f1118e091e39d7efd1861238e9474e60fd292e3b77fe58b655f76ba12507de5c2132cafdc038677d8b15d8d97ccfd4ab07c0cb074c2ea195143b173ae002bba737ab11d9f1c597fb0f839ab66d5ab838c2d53d2ad89bf06bcbc15fea8a71c2cddc4a186e752b75a3674e388a564bde04f0816aa37d091957ac627117268b1b5870fc0b30ca90a648ed7f42cf8e8acca112c55cdd6e640c61f4e5141ea793e5d0fa0d0f6eaab5514c43c5080b2be12c1ed7b98f7a8536d8d812a97b3b53cf45349aca811bf7cc7db283e3d3cec71a42372cc3d6c223f56df84d60bccc8159164ae134c447a348f3a73ba95a2a960613e3284ec3dc8ac0408f3fbb906534339e913bd43626f10a0065605d2f667ef8f70a44fb3bcf590f0d08c3bd3b72178077b0502ddb0a06ddad3c3331eb59cbd7318ba6032b7d3fe87a60c53580e46490f6b3c971db0613922a03776fb39c2590f14fe988f4b58f8ac5711b574749095968ac24a9da6c3abb1ce8f3aada2493b3f3244f44ce9f4f94b9e1a104601056052aa028f1f3f80f0d096b74921ed8f4903bdf9bcd9dbc4ac555397939a9249f731544d9ce9647c42a540528095b0a16146e3f86c66e9a1f45f71e56402d811b610b116c16ad23631890eaae2bae501dbe5e3a9759caeb29b0201d5164053b7ec5f1d325efb9a6fffa9033e3c93e137d5a0a07f22b4d537051024beabc820c656ac45054fd0603541171459a776981881cd4bb65e725eca4bb6b2ac1134375c05c3423609b82c552cb4c3bee7109c7bb7deba1cba16fa0013a83206ab53407f13019be2224ad19486cc1f4f44da4ee7ea266be4d2f660f22e7ae2884eaf530c9e774562de72a796ae180b6bd0b585731b118a9320868139d2d119a5129b27bd0ac187f347079941c721846ec7a3e81f6a8e6828fb7b38d5aac2cb7c11cf263b035588f630bf4c0fe3a533176e7749050c2a63ccc62dda3810cbdbf059460f935640aa46fd53dc748ad0d8fadc81ac280d3aaa8f59c3c8a993fa18595e4e8929b29cb802b3c28fd3ce822a901e5917c1e391820105fcc315f341fc40766e4fe3a6de6340339f65f61ab219c638ccd38f3eb3d65c7013e8d08fed518a78cbb737996cadeb5ef288b2c257575d5676fdb010404ed1615672f32c705d6846224229f282c77e8a1545b0ecf9d5a8915698a332df5c50b4093068a4a34325bf66bd58c0919c88b61687299289872216201cf4702b40f6871877c13ce0bb8c909a6781bfdabd56f1604a4bf569ec6d4637942d046e66c0d3aaf0a89630ba00404f7225c121f54bf760205685d7864363cb8ea034891dc7c16ecf12acaf0ad9f81d9665b9023c68f4c400d3b7e4ab73b4fdbeab2618ea3b96eaa03bc56590ed868d784a88809a6aa18ec76b6f2170b8db4c0c5b09b1f22239ed0e8c035403fa0c89a20d4ef46c9e56b96eea83080ec9cb8853ac965d19ad170f8c37d78409041fe6a484e12278a6a54632caae72121d3b7a248ce52d684021c74ac44940c0deac1272dd50a805ce61730f670762380c078cff1f0fa0735306c8b50d9ebf8f65f5584d3e7096000f0fa4738ebc710bd4fe313f9134447bbf34ec052767cd9d62cf4d6475e74eacf59d616548284f7309b934c6457600354cb12e0b2779e527a38f22f503d920301d5a4ad78ca881a8bc8c3ad3c3d5325e80df0d5bec9e8d6c2413f64ba77b2e77cbf73ff90396a47d9099c988bbf5077fac8d783b74b768dbc8ef76e5b53e90123e351982b7065fb37263d0cebbf16a11b6fc1b87a05f87aa1ad8c3d39dcb26da8c8852ecd5983904eb164885b73fb9447191f71353424f51eb0ec8d67d98d303c8c7f5e26708c96c9cc19bfe8a4305e8bfb5464737331154b18858b9fad5fd350f816afb1103bc3a3b38e43374f679b8a20676335cff8fc4fb19dccf8fa479376d60b21d4b0511239223ff397f6f9cdc90244f758131c80197d72de5f516adef6ce36ba74ba5fa71a4abb4d3fffea74b6ca83fb0a36e6096faf053c6e7e6c290aa1baa271062b8afe6c0997fad6c38dba1bb87eb80d20ba7e39f383acfc0aaa74b404908244eced4ef9fd90a2d9553fe9d9e3625c13c1cd41e7bb7c05d400357e891438ab305fbf63ee36b0f777b29d04d875d5b434362ad224649cf17c3ce0ab3e452e01dbe6449d930e00d4006cbfddc767bac3c111625af4d400fe80c40968bfba1052e03d1e1be15caf58453a97e677a47d9884a228940961d53b81dc77030a396249618f5afac1fec111e2dd138522e184117d59bb1ed93e198c9017ec4c29ec3b4c5bf096e1cc2877d023a1538cf9746e9e42917ffc4e89a1306697d4d998dc2e4d53adfd466941920e6ee016755044eb897075c8aa18f24f5b4b7d080be06a6242086e79d17548f0056994192ce0c650e08ed63c9ac414a8c5fac611bd5039fc6e259746f1ae8c15034a3e3b97092d31d229d47141b2a1d5ae28109366a50f2e99ab5c6e995ce7091e46f73c11a15276670b3141d6f853657bd1f2ba5a90821d2796f26e52289097401ba9fd0b576c951ebe046150b7ca800bbd7801e2ec08a7f1b8a924e14901c53c83e17605747cd2fd55e407d4573da0583bda9bb7d436abcb8850605b68806817fda40709774b198279055301e5d98878b054b2417a26141b949128f380343ba4d1daa4e8365cffbd67e16b31c388de36cb950084c62f0108dd065132bfe6c5d6a5f4e908a0c1c5fadaec151b26215b503415e5e26672606603cca3026032a5667e76d3242a34f6cad004bd529deb288a639ce687b296a957e5457244cc1fec03e28f9802401ff4583ba8db6c77558a3613c9af058aa96aaa567656a7d04285b8940165581da79db9c8c0899189a7ba1a5c3223c73508463f48f804b49a7b37d876f0f2dce6e4433dbc5d4a161ece82e81c50877862a6cf555032884a4a18699c8c5687b6bee12202c351afc8f5fe9ae9717bb0867d6977691f01f74a5e02b1fb3af949d9b583caaffc6adabc0a558297b2636bf291644a1d048d02064004042afc0f7559b82c5da69acdda464496d86902982f93e46cb8ce838d76904b5aa163391031a099aa115fb08efc34df527b8e098fb412847fc21db350c8385453b6271bb7780aaffef2ac8cbacbd50a0bfab08122a4c09e24c0ce72e84f1707f8a73291807acabe94d5cf4364be9cdedc05fd858f6e7c0acdcbd9e302a0c3fed2a3732d6f1214abff34ec662614850f4f62a2bcc8425bbdcd630abe27d6d28ee733b67a980ccd683888bef1d910487a846c0f3f5eddcc2cbe9e926b0f6d8f52bc9f0e8d65ddc61517759970f28a2c7bbfb9ccf564275f83d66484924b695065270b12f00fd4d74a7bd3b0e6ca0240d73033816fda09f8687996f19215019982d4e202eee2645917eccc1d741f75e62bed956e1f6e65db8efc7222d0f248e2d064610076f6067ff07d8e6ea6cc450878a6032804b25445b99c9b6add9ace5ae2ee59785c45ca9020b1889bc6bd06a349e0a282452675189181718ed3e511d76fb9b11177b7ca1be2757de44e9ccc71f7b7bce1a66e16fe65776de32e03ec4f8d0a0e83cba611ce8bc5091e112157c1327c302c3617282586a82d709fcb846c6dd56fdb1066f8c4694454038dd04d3215966c5bc7d53482470aa868bf4999ec623586b38b7383c814b5d0eb06046851943b27ff80b03760c11646c4cbc259aeb997e403cc0d3c736ffedb58432d408e6d3e4adf158972be0dc8d374ad80f9d0677430886a1ee8001e0b7f0b380d713a6880f481c6b413fb254ad37c5be7701d8cc3d2623c1d0d892e962db36d4104790795b2ef7cef04de1700b63ae3c97462b5e11f504433ca8da5a81e4287caf14848b53cac98b5cd0249a1936441cd517ea62afe9dfbed7668527d779b1d2f42c08cf7b134b45e99d073d2b9f35f140ce1323c58af15d32588508cd81a535c52583fde722b78459e943e011c963ca5cfa56fd13826a8cccf31b7c6982bda74730a1d751002315593ed9e4c57333d4e8add10ba56f33d4ab55e2d4cdfb3583d8d6f028f46ab9140927e71aaa61fe66a8ed14420e45c911e832e60ada91488d35d1551bcc94d93d244ca1154bff0704cb1fa2eee43dd844c7f153ec8a64d1758d2583d27f187b3d6fbe0063a2342a9dc1a8105cd7696825ae251e88141c066df100cd25ab7b49398f0fc867c8d31badc960187599a439cfb5d5649cae72371ee1826c0fd3b23183bc728637c31783d8148b8103a4382d3589f091be8427a39088015f7920051d0c944004ce3106c8a6c181297409049ae16d793748dfd771438e8c7dbee63961d1f2e648973ad3d9e66109d8546dbe4228ae2b147bbed2bf2720403317992bd2f336ac75451e4f300ee27340e3b9c7e05fb33d9edade87f923fa1c449c1ee15b144384c77bdf2cb82f7c454650e2771ad4ddf5893fce4cf7f6ebee01b973d8aa4bf06500a646c97caa6ed17299f8652fef80b1ac762335433703a7dd4d158f145f4211f3056b6bded9cf937d6a2132dbb23b6b01975ed9a869ec6b0398a83df80051c5f443f3d7a6dc99748cfcc4eb8a07fc6d2262fb2cc84831ce312e8a51be9f171f05423a678319b05d85bd8bb5723b248e76a565720f3891f87334f9b98d866512377a0c4814d9f3e3e476170b9519bf86c688e12da032a5f1f706f5fd0b7a20448679727c8073d9884c83bbbd88ca7c5a4d52c6c772528c728208ad62adbf229813c5d4cdde92aefbff8baf0deb064e2b8dd599aca974d678655656019a6420866c49c0ddb527b43eac98f3bb509a0d010660fc4a591fd10fe129127da4b9d8036b186c9b9739f6836e3e64048ccc5dbfe7af766ef6b65ecce2d6ae9f5e076dff9a6157bd9fecb52bee42eaf5a51ed0b4b227537ab0db9b708e22237c418e0335c5001e5568259cfc110aae1b19acff55ae9379ca6d1c84fee79c0a261e5e031d29a137c3d8c06d868404f54fc0c160d540af90f75fca0f8951eacd1acff19e9b134fd2979a3c0dbbf307607147cbbfb71e1ee6914afa3d4f1212b6efd1c06a642fa26f72d1a9174fd848b0f21114d0c1ac030d8944524f5fe7e60524e1361d9f0ed3b169c217315eaf937908708bcdae190bf4205cf2e8bd9bc7e41974a68455746027fc550421417c50bea3a418115cb1aa5e8eee070f36ec735ee2213aa712f94a365c15d6e14c6308e0a3da310cd9c68bd8ab0557f06acbc0d192b1365a1c0570910624b54ef59395af53b4a51b4d454654a3ebbbd6acc6de4b9c1772ab93ea38f06f27b51af15c14e232bb509c38a40e9ba6886aaeeff1ebef2f86e4f37498a2784365fe5a4300b4860094520cd2db5a0699e64546064930da29262eab05e411ba8d018b23c22093dd210e6bd0e9ab5e8c349ed0149df6f127538ae06334a70cdabcbbe27458a1cf665c6e242459086da73925062236f6a8087df022e08d65b327ec83f6a210eb7ffa71bec4fabc040f0d28764a2d07afa10e1b02bfa419e247013605ba4bf4a29078806daabd024bd32bef35d22e8236d0243d75be5845d1283a1187653c238adeeabeb7ec7a9b17e58d49f6c878400354d4b407a83a841eeab1a29bbab25345c71cc62597505639a62177fbd63250d14e4e2a2132a44494a9bb5d05044a6d03e52a5e7d567625850a037e2700c4f3ae13d31e92d67ebd9004ad782038929b3cffff2082bb58cdc161bc79e6988479a8e73fce48a2321e577c18f54e5abe012fe28c9512ce3515c310e547b8e259791d74d3292727e7cd2373873e86d2696e8a214e753e11eb64fc9f6e8fc0d5362a593ab5058d080e07f2d393a42c9aa648e097bf1644ff789d7e0b8fb4ef79ca6f237f7cf693b503edd15bf1d6ac5e85c2e0ef644c1cab0cc341f300b38eb4810fc6e48dd58d23e3b255b3c3cdb3703ddebe19aae5796ea03a24135b96497b38679795a8e5614ee552a781de29b77cb65d7348e048913d5b9d92052baf05d6e26bba4e2674ee1721b83be5baf63d59b0e84ef4602ea8214d1822009f729527c22d402c80bbb9120cdeba216fd9c1390f06bee746c7abefdd74a6a564cc6bbb261bc7961ada4d1cdf7cb45cd6955c1f9c522a7018e6a651fc193955d451a49eef794289bb8ca54a26642ebf16488092a54999a1e866997628d7ae5d79f0222d7bbf727b5acb4db125c074d0138adebb9fcc1e53518a3de989f42801ac5ce29bf88b9d50e6a0280d02dd753168afd6589a49cf5a468dac63cb83e53a27469684b2dbc3c189af3591f9e4db5144318d9822dbeabf5f5d00b3cc42aa7509151b8ee323ea288ff54a2be6b58ba97234b810d36d90d1d2286ffc79bba8bf605153bb3c15768e8a3b0d77e5eb90770ee046890051f23c4f33e0b43fa2e72455c3777bd950151ead8e4017f81f9532a0b8d8c1059431c8813cae7c284b170f214b070b30b2f423cd884ec197e309e5f0ed352c781a323df0a6de68c8c71490e9323b02b318ac9ba61915204ef4f0239f1dec68730a7752d0203b28b3d03a6575f574b1ed3fb0cda517aebb57348a71601888f9280b5ac6dd5dcf0c2a0494a94aede69f51a5166e6d3db2a70c6e218f6be9c54ca6bfa4b4b3faf329bb8f718f6b2792cc1aae0fe3748dea4973fec949b2d15e5de15c8c5fd939cb8eacbe91d853514e9654f4e7f5c053eb11bc2586adc376608ee0d539e3991f1192be7cd09811a78a514d18284aa7737eb4c595fff08bf70e2e19b0d8d631d20c442eb5db81c5bdc034f55cf4fbd43436d2c8e45f2df201a6c1e6ada8efa5530dafdef48a223fa51171a58fdbff5dc4553fc28ff490b37601ddba33d05f8fc9a6b335824a363c6a93ec3f867231301af935d1c43842e2bccc19326900dc84a4434e80f82a4d2a0c804b98b9d90d8e2da34c8383228c67c7e6a95b6e075419325650dcb48cbe5d322d4923cbfcc6e949c9057916d3adad76b406116f1294700548a225506084185816eea6cf656cac7e8b6a411763dc50d1417a384e98f795f6cd3edc7c65ac2421cf232aa43dd5c28f9d1d76d2726eb71a5c3a5d73c3bde557a70704c45ff910cc6703ea71f67b5b3054324e079584438e31f5c54c9b8ddefccbfcf6b4ef4e30cbf35a89ce355b6284eb464cbd76b181ef3fc69defb10d3418da718ef54148c429278063f2c0d27900f614f43002e7ee8877612a3cd66aff0b6a7befbda59432a524658d079807200829071283bdd447ff0ec6e60face7bf20119f28e9dfc13066761d8042fc6486eb0a37f3a0c2c1af488d0ef9b8a952dbead69f566efd69c56c527fb2a08d77ebcf56697e71ebdce2d6a945498a5b27945b6713dbb74cad55b5039b8a39b1d6af34493ed6d74fc126f030e153fc5c404360803631a702ad18202a0a91f3e57c399f77914564b57d6153f5d6fa46d0af5fd32891f7957534971da1cb1e28ddb6176dad99e82d358aeb726ca3b62eeeb44cfd51105604f53246a9d02ee04cdda9cfbab5b266b8b41cea925b53a41b8f4099ca40d83455579b1b84019ca99f2aed68990ab78850200af2409b1c3dbcaab1d65a2b8e675ac1b4c314b30367eabfccc0a6224f4dad70ebd79a82af28f56b4d451844a0aeb3cefa77d68f30d800b4893c70a67efd201cf0bc2abbf8b54ce524ec55561777b62a36e5ddfaf57d34ca08cd7ead0f43a3e4d7dfd128fffa1adda4e626d8f5a5a93e0d7c8ad5c4aa8b55d5d5341a3c006db8d6971a3665a67530da77305a674308e29d00f72da227817cee27207adb91c05ff422b097be4855d328ae83615107c31dbf10fba2e7ee07d7f29cd662b2da33635cbf457b510783bd7cd1731dccf630d8bb3ff7fcdb6336b0ead287807cd163a61ff2455de5bad8d332f59b8b2f4058ade38f1a56b1c099fa9989c76881ad81bdfa58b701f66aadf25788e153fcfa1e9ff8ebc398da75c0066d3988d2a59b589ae08552ca8fa66a921c4d7e35eacd5289c4b287c160b20cf35a86d297cfb2a7af781d1c5da4f0030a56a070040a3ff80f871528248183091c597000218527fe87ef01052aaaa4808214986882c2141c3da099724e89420f96c6853e3350626806477866a0021c26b02e4b7814581bc68f6d09028e30ac8645ab881f1c4e2c76a14f114e1881c98d2014d102318ae0e1055663020a4d8e60c4ce30a018e20731ecb605163cc031c412170e2fac76a1cf92289a2c79228514587aa1cf129e252d560f1595e2e4ac66d44862a1a10bffa1b7bbfb084208a194f2c55da4b4724a29a57429a5acee3b2d4f9d25f22a07e16aab99066bbbe36476c3205cd52aa22dd376b784dded2da58b9452c297bf5e7ae4a240524a29dd9d2b7dd8552ae594d12b9f2084104629e527691ce08aaed6ce882477f70d0849975eea6ef9524a29a5f476f7197cd4104228a5f476f717fcc5dd45da6738e37e8324638cb25ba8bb7bbbbb8b2a876dd956356dabd9d6188427ffeeae22da32adbb7bbbbbcb36fd8d31babbbb3b8422a59cc15fbabbbdbb63babbbb6180aeee96524a29a5bbbfe04c72e8ee5276777b77c774f428eb5f08a1494a192f8cf105f61cd21015a08573a35b4a986e5fd9a87d42b7cb5992b2c6a441bbdb5d6259367366e4b8272b0963f751b641b8aadddda5fe6ee952eef4bb9015825136085b5c24eaeecd575cb51b84da0b75b90734c2487506951342295b3564c94fe8dbdd2424babbbbf209420853b27561dc34936cb54ccb581d48960f20090da6a494524a299b6df409de5e82700559a34e1a544ab929f149dfea004b492925b6424e2561dc2da5949dbdbc94b2835fd98b524a693b25a594bd59ae45246ae1646f104aab834d09af2f3a98d95c01ba4618a9cea09b920b2184d294a2a9214b7e42cb4d09cd85b5660f3b0835134dcbc0cf4c9b948ec20d181b27a080430c91c66d2ff0257eebb44eb7cee865f472e4e747e7e747e74767f4327a39f2a363472fdca689523001c0ebac7dfbce27fba317dbc1231bc76d3f9a88db7e7e7e74ba75b24a658c8dc1992cc31ecb1eeba0a442a9549d6e1d2a54747e7e7e747e74ba75ba757e747e74462fa397233f3f3aa397d1cb911f9dd1cbe8e5c88fcee865f472e44767f4327a39f2a3d3add3add3add3ade3d8e865d6d1cbac47eafcc174462fa3971fcb2f38412bad9556e933d3b88d56d28834ba89ddcd8df4491ad12a7d9246485c4a9f5fa5d2bf1b5a699dd2bf4aa7f4ef86565abf1b5a69258da674d20849a553fa77431abdd0ca6d5a267dbab8b4bc7410ceb8b8d82e35e4b67c7cd2a8a573241bc76d9f9671dbe752fafc2a95fedddc6815a3b25239a3369d83927a51312f6e6ea8ac4ee59c53522faa7b71237dd22a7d7e2ea5cfaf52e9df0dadb47e373737b4d2faddd04aeb77237d9246b44a9fa4111297d2e757a9f4ef86565a49a3299d344252e994feddd04a2b6934a59346482a9dd2bf1b5a69258da674d20849a553fa77432bada4d1944e1a21a9744aff6e6e6e6e6ea89346a49123a1feddd04a2b6934a59346954ee9df8d0b47a0051a261d93ee8d49a7b38ab2164cbab671a216ebf23222cda051a374820d181c9b7dc10eca32d443710a7b0c240490a993d2ae76d9b0612326870d1f413bdfa7daf9543cad562b68272868e7537d9f2acaf701e001e03139bad2204ccecf87862213162b264769686808eeece4c891438500d4e895aa555fe7a860fbc5252828c8c767d3beefebe9f11d11f744a5d279a2d249c19403be9898633825ccdae6b3693e97bf5b445ccf268b7c65f4ec3cd9d979a2d251a974b20c0d05c09b310508c82365746a3232b133134bc1dfa4525c320218f9f4f40480eb41df8bd0264aef89d1fa4d22b73c8bb4b18567912a1b5c0fef7ce8090f8f8c4c15c0cc003a28688705950f0d499c1c66262cd60a333609efcc0ca045272767002c888ce0b0405364b5a221804bd5507066beec69942c4294993fc990579abb53f5996aa5c2a89496affa5f40d4c401cacc9fd87c412fa67cf129255b7756e193c6efb59baa965b7d21fe5b57bbfead1bc2438359721a55abadf46f35b9c42970663e363f9bf3aba9528eb029bd2d1e3aa30793410d95bb8428d9233f590589c3a9b584c3ad4fb7a0403408daf4a54aa056e8172948966c613d8da24b6813f9511e59c515eb555ef68a75b962b507e2b4e756dac3a71405a29811f99a2c385353944597401485427b288bf650166589615394b2f894922cdae2534a3e419950277ce2afa54b7be04c4dd19e281271222e52b9950251200a448134dac92009c41e907ca2bbfad53b29d432558e82b0f4e73b2685d87bc9d7c48c60403c7bb53ed0ad4035545a185e6b4c0e88f2d132f53fd58266a58d70ed5bbf7378b815875ab300ce47a3b80986726bfd6eadaf59e17259b12edac7f04988c6423c74eb0be1135761afbee6c278268133150e6954ab4dd4af58e58136dcad0f5f70a6fe0e765d179f2aac82a5f029fbca2d80bb5ac72e38539f5d4248006d34ad6e6d822de3eedd0444cf3dd79120d364fdaad5f8d23e058726a7398133f51f006df88a7e088fee60344deb7ef48b3a21a2ef17c2bd8bb8f99aa669a2a9bdbf48fbc6beb3e7e843c05ff49ae987bfa87bfde8e73a16e2177bf527bf3493cb8557c3b4860d8850882771225441d8abf50587cbd1ae5cf9377dcba6e477a9fc52295cfa548a04923d7c4ab9f4e1537f9488258e01675cf2638fd67c29401bbef383c0d028f5e00c1236055d3ef8440370e9d37a01195ee013fd1d30f009ab948b700f7086c6730500daf0a5f437db28f99471a4d08f46a12efad5f0095eb1dd3c536e3ffd5c544aa55fdda92cfad18f7e4c6c8a7e3534845cce5d9f410c64852c03493a3b13bcd68a9d9fb239342aaf2a5d039f6aa77d35dd13826c73add6a69ac70703bd126c8a73aaa906f6868634d618a697db452c70a67ffae4c1a5c53839fefd4156417a06f87ac23c56e2eb7bc557fce217bfaf5141e20787e217593c156854d734a1a3681fe9d2aa48581778f2a6409b29854ff4db7f850b9f7c515cdfe7f1e96b54e411a90cdd0bf00973b694b907f064b5cc0df2d28730fef5e1d5a8a9ea58fa5d1be05fbbd490ebab4ecdd5eaa6e68a9588e6aa51737e73f5cd15ed522508380f5137875aa6ff876c4abebc96524a29e512be2ee04c3f35bd5cc851a1b247f2aca03f2579be19421a632a1aad2b57e2eb15bfd8821ae5bfbdee9c4f9b501f30bac2a81414ca9d953641974c315c9ae88a16a138748543576ca2aa39c45e7cc1192454358766960d673332b3549f1a45fda12bbaa2aa39278d1691972017e7e16902ca4c8ec7e7a75c6ef7401b9756c29d6da5bfb8b3b5b8b3ade819abccd7ab4ddf5df36831b029fb8a559a49b7f834bf597ce22210e0ccfc8f53a04d0301ea8854882084f8722c54a20e1fc3934085e7d53cad66b137e74fff97a0c9c44865a581a04d5077941e6a2c5da55f0dd4418e83ecdad540fdb5ab5d0dd4400dd440edea8f3d6f577fedeacf4914e676d23ca7f840a906ba2c28a26b08dd900f4b2c2b2e7ffda08d076db8460246ca0e06029a265f48f65af7431382bdd6c5a114276936d5179529ec55a0fa65f1c68b552b7086bff6b0d34ff1b87f0f9fbce35a6dfda00c9b6a0f0f7b4f2a8ffcca539ff44cae87579edac3b2eb167bdcacea73b902551f3e71407887bd4bc4cb8d2f68239fbf9334115047ece28927bc9707672215e95dfd6a08d51d2490150e08dae4880f94021a38b7657715f65ae8c55ecfd8518b694508da6cb76316d08653a1f59aafae75cad7ed170d1c145050411bfe1e3853864dd5da1b8cbb2ded70c00d12ecbfed51025908ef9cb2bbbbbb4bb7a7742734629624ac1c9d26234a793572791999e857643efd28b502da5c016da450203ebdfc7c3a8506512af4359f7192b098b49ae4e8ecf08a524ae995128de7e5e82461f10a67d6dbe44a93193bd851c7aa38c42b1d172c63d48c0175c4213292c04f6a300c1ba2fc84320eaf28fdee9cdf952b34c610bad30817e92355d897f7e7934b7fed82362d777e5bd157b494069adf414d854f2d3fbf5f5d45d5a8193f9f8df014fdfc39e773113ee1d0f8f95a84d0a65b0a2833bbabf133b89f0433be067fcac24060c6d7f81f33baaff1ddfd20cdb9914c71a865e6d730f156c30479cc300931e2c78ca7d10999d1c154f6a29f4fe223d0862bd7c35ab1a387af8db8bb3b3e62027243bdda6326204f12b22f9dc721295c4c6ef2510c03cecc1f995c2e4d185047c4820431e0e89f8c4c3262893aa40e9085f9f30477feb4417f2fa6fee0cc74e99a87bdf9a9fe449fb9159b5df826d1632291c845afcdcd8aba96ecb5c83dc000377bd1d3e7deb91e5c96752f4222de90b458b142801f7c8a3fdf833639ee7c1a081170e7d7397f07974a3fa74f122a5d4491328589934864860ac8e0c11075a29158240b962a9ee7b56b051a8f3dafa58806635224d79d75562bee7cd904a2e47cd98236a23ba514774a2877ca265aee943aeccdc984bdd9627d7d2b24598d8a3ce6cb1635c91d99237362924824fac428a00d8c4cb2ce08367f0646c99fffc2d7cc9fef41ce3261150deb52a5295907e1ec2094a95daad63a23eb4e16c925b5d0f8351a48c3365e8cb03035e472d7027b4c2c8420b872056a8d721e2db4606384838d1fb11b55f2e0eecd03f4e07bf3a6e4f9a3a8a31208219438f3760721843142a98233ed2dc77cb011e1eede44407777778fedee2e65c38e3c02393202ec329e52c9daa8ea25d8c85cb8943add55a44e64ed00556179d7dd192b08964e1162c9baa453c6a6a2c7db8ba6f81f495608dbf26c8895a7f52a02cf0a9e973de1c952c023448b9192f5960987fcf832cac601a58a7d5a5afd24f6628c5b7d00dfee6e1b2c1c5704f4b22e5fb6df803147b029b9135119938c0546c6ea8e4c45fce1b24d2cf400072a1eddc1fce8c91bd76ab78881e208202892bc2ecc8f7ef903450c543cfa87f030c28dff81a823def81e8dcf08aa01c6162a1e90eb614337a109971444450a06818a1decc52a420e06164e8440c4144c9cd800c70c6ccb853e4e9864e144e7c6090a7068a1c4124a344cd06674d9dd5ca2feeeef3d4beeeedddd34ee2e2784dd10e607e5419faf12da0921010fd832f0575aec6e686fd0288cee60de708607bfc04d503531a89c70bbbbbb3b084b04b10229404438e1d162dc9642662db0a9f7dbee8e85cbe5e78680107eb2c0c245173808c208b7bf85ee6e958d5054921851aac0326484d7111dc60f537477d7e802bbed20f1a978c4f7a212ad515541c99520c2802a38429c800160f07089255f5092c1cd914b2afd853e3e18a1d961480e2768ad6c3005055830a90287e0174fa02c09c2810fc018028c22533c64e20a1454b820079592a022089195485890be064c030ca24042900f389c0087fec227e31f7ee0018c5719485c542cd12f1da68c01c51346907165065e38b1f0a788ece4b3b7d861082c7618021052486922b0a8097cecb0f00b6ac2698a13229a1861c7ca0fba0b2e9204922a81a092c40b2adb2551564b7450224a1742c00195245840970c396289244c9080051026f8e1228913b4b04724b89c5c02c15c8c78481046f0f0426284942b5c3084204836c23e57dcfc800b1940c111daa2090f2a330b5e0364bc9ec82204248220450735a1a236616b65245868f28493900c29ae218e00c5ca0d8f109a30219997307c1de105173260c242c2154168c282162316fe34e9017bc630acab10562b3bc843d6d84d40c8420588a083650a1f2499c112405002a9073665b9099a6922b1178f38811358c8a8010dc228a2a7dc405e2572802c9f248090041537653f8abb0f5d3792c30d0db2900208238cf40f8650e3babb3bf763a1e56ebec2dd9d5960a18afe217c052c7d401b29a53c4205603cb9a2ca22c8071f3d824dd5aed0a67907070f095003113f3f0c60a58980931fb8e841c90fab1568214416f3ae27e0900f434be4e0a5a3e30350112e0953441aef22374711543db8e921c9122b78780fae3bbd3e0409091e458331b545099b62e1fab35092800663522dc298c42073593a9a8dcf725cc7b54e4c92459cd5cdb8c54e988783b80aa3e0163dfc6487a3b08b81980af7ec2479805fee3597d26c45cf9e1b07c3712a224764b999dfec230e3cb223b0dceca1cececd54990a0920b28873b3e762f3d0cd3e6b1edc6c323f6b75d34cf4b555cb641b6b184cff90db5dea0a9be2d6cecd3e3eb7fa33d1679a0986fb2197eb52918db097310e7bd9bbac92d5f31075bcea8c89b007afe47a70dcabd8cb9ea4848d5ac0eca39665d91640fcf267594099ec8170b32e6ef6358c9b7d86e566af75a9146e06bb28650614d973e69f65cf3c580f9f349f8fa3441df066affdf69ac6711ab741ae3feb3ad0aa162034e979313ee112e7b08c7c51575b464a1b4bb098c6ac31db28c3ee7801061ffef27fc0f8431b56b82d725b8c0feb128b7de4b6982432a922648487a6759565b12e1593dca8137772a0052c84e2050e142f5657fe0b8c8a3c20142a3b5742d900199390d82716890f7bb0d440d4065846fe0e96915f5db03ea5285fece9b6a2478a5addbb943da2e7eab8d0c788d5d5c1a6aaa5a1f1bce7ae31d5c0999e9b1376e353b709655ad4409b78bb9bdbbb695208e84db483f521832632c1bb8313992a7ba297bf03860eb2c775cede2683a4293d56ad163b6d4addec940f6f9683bd9c8fb5f83479a24677f4a8989999995905275276cb26a4d854e94b5e5d4a29da81fa4f7a834ff4fd4b73be53e9d3a794fe03d7f66f67e61fa2c11808eb1536552ad5d85b8d6807ebf149c25be213d7d2cb6e07b75c28465072211743cb1dd260cce8ad8d4f01f78f1189622cc3a6fac67597ab09cb7f833d1a178521c6e71e7c1481363d401dbc02237c03842b4a2fb282a843f670b30217103912e7c675f7a7269a76bd4c10d556b4ebe3b7eb69d7412df3a3023ed873bf21771c4b0dba2e12f996041b5fe39a8c742cbca9c7225723af264d2d17723e88c0b62de34b8033fed2e47277401bfafe310bb45902d4c14476b8ee40e29d2e8d10893a241090057f97415c3735109c7157d71f7bfe32e4b58c370fff68a18dbfd7a80ab840d48532ded27aeda4330eafd8f377a01ab443c5b972e50ab65af11057451aa757bdea557ffcf180f18b40415746e9f1f97a64cfaa87bfcf07dac828721075d4fa848707888a382b68339968b596b08eec0891e3c32af67c1fce87f3e17c381f0e6761cfc7a3c8c193db3c2b38d3cd434b8c25af23414200f9e052f1107b2f4352c61e2158fe54af561507c3696eceab0f50a62587c103dae0e434abbbf545717df18b2e76b107c49e4781365338155107fdb6ddc327ef7e8287a7496b09eb7677370e77d76a6da994e200dc5424c25e47f129e00c1591856e9f1ec68133ed034f129a2c791d091262a562af5fb2d8d4e6de38ee6282158c853f76f1107f71276f7167abe6ccb0396793566b4e6ecd39a598f38333b3359d70cb0ab79cf0c7dfc71f0462207ccce9b9084d31a3e7ce8a6593a74b039a3f8fcc9dd9e4ce2537003d53f6d46cca2f02de7500f6fc406ba50fbf524ae9c35b6b1696bfd66ca21d6cc9cbe86359cd9ed2c7b24a318ad54a13b3698f91d8c330ac7ef6a95a79603dcb32d10eb6e455acb297fdf615c3b60cd3302dcb600cd67580e609cbdd7d060eac7777373b19fe9918dec8e4dd8527ee2f5b2e69646141241c0ad2a8c862ed780db4f1e6621777d80bd2a83a34df2dd6c59d9c1ca7dd955a86eb11737a3ac1fac862d27ac2a79ef86598a9729db1238db55a3b592de33d1805d674636b5e9a5daaa412c29e072109616de58bcdce65978aab9ba201065dff56640d613fe772076be00c15cb9faa2e910f7876a46c677797dc524a29a594ef52be94524a296f60fda54bcf8b6850fb011422e85c98e56ab4bac02822396cbfc3f3d85e052796eb31956c38d9967dc6f5e08e94eb1169d4342d723db0af26786b85a9f56bf6d534c3064c34b00c65d87efa55eb60304cc4c1006fed60ead7fed917e07ad0ee077bde26f7f7c127e86307d3dfd1b5966ab46caba9a9e1b11bb8a9eeeecdf3c8a3e28d4d1fb9285ac2a6ac076933f009de1858acc6851d3bd88b2eb87f199dd7a8b21674655282ec79676262b78a8595ff36547052ba011d4e6fd9d21d07426f5a80ad815d007fcc41e396ce2f4d36c446b90d2a5d19379691953df9339e587fcddde3c69eacec49fff8ed31c6969dbb8f9cb0fceeeeee52d4c019efae420316fa30e1bad087891e9a0b7d98c0b95a0bb5ddc1ea51d3ba51088032feee3ea306b6bd183d57ca83412bad49f096d8ab1d1cf558faf3629faa358a75f6eac367ec2b1616eb207b35db0eb6742b7bdad7ec53376ecd3ef5b76618d6b116b7e6876f44839a0facb317611800a91c72a129c26f7f6a1a722334a522cf8db0bbf908e7d8532ac10541e8d8982358540b345bad43f40e181a55034d73df2d32954cb665ba87ec615f63966559d6f66299571a875c3afd77b4f4ee8e5f618f0368da924ad6e5bdc69a6ccb78f7a22e6edb6b730b7263ea0606c03bbbed2bd7daad42cb729b699a38532a7ecd1eeee02157cb6ab5d1c29f194b22da7b3425082124e9606d4c7f3961ed640ffa97cef931a63b720c80974aae47646e2663474ec965b921087bf231ccc5650994912d5deab56982f187773a94ef5c8f968f4be0a95fbe7caaa26002ec0e96d97efb9e8d884d41212d8884db026ee86a10c29e84ab2ec59fc1cb2213107953910524de20afd3b4d0a81a646b816300bc94bb1f72db943255155ad4b1d7fa7774168362a783ed74b0fe9a53f1b979110465b8901b18573e4b0a210d7bb2863d296b825aa49962eb097b7f99bd1814833a28764141b38370a6bbd8622f3e614f46155ab6c6ed875c0c356e74d2a381f8666a164b1b9161bbbbbb03c2d6b8d06709a0ebef2eb470dd9da56d250410e04299ea224db0e746ceb5f8caa1a1f1bc0f7ac2a6604f90cf123e57be03f8028c823ca85cf934352ab4ec3ba3462fdff28913f0f2a9155c3e45ba297b65ea061556a3461d6c99972fb9946ccbc8954d55dea971a3b14b19618c31aef0971d3aa1c11877f70eb83455f69899ff7ae73d285fa803c80dd5a1169f4d374420f161608cd786116cea69e2cf5fa1053ed95082cb3fe7c77832d026fbf933d006fbf9357c62930df10e513d717d967072e797ecd3c0e7ce8fa1997f834f28dcf97075278f29bafc40ecad1d4c0ca2cb2fbafc295c18ece1ed000cd6352001971fc88e5b3967af0bae814f740a892a0df3ad089b82421127fbb8d5906559f614429899da44678c1833ecb0a71fa5acd873c79199233316dfb12c7b2df3cc33cf9cf498c9868e5c8f28c48a42426a0812a9c4d70d3048063eb116b31b66f81142086117a3297bae2e61071942860c533490628c94d6c0a72cfe74eaa6e88a5280d88b1d35d1306760cfa50c0bef297e8481e6e253a9a6086c2c55e0b31466970b173e95532a155ba584e56649d1dc9a2e72f9532e3491183be9fac107a8234216a2f46f68336b5c6a6a950f9f5234e0f049ce9cba693b9e8506511ee3e37da3471063c5f2a7dc9a547062fb21978111194d2043e84776e0856b075e7cd73f466154e4e13b7082c8f5cf5f9b0c349674a2f04e0c932af63caaaefb53d30d41d8f306c8dbdf9d1028b441159c5858e572120dc6fc604f63933fb39b2c7b3075032764f977749b68d8e4ed6e13e570a38d91a52d0046512245bb308a922f8ea1e530410e1da816e37b2bd3604b0b8661a0628fc9fe5819cac88f22b8fdb132f4fa63cd328aa4d22e625d8aafa45a4691cc39e79419b8edb30caf51ac1aeb035c7e89033c15007a8c802eae7a76c0f3b1c737d80c82a75192e7e3d35ced009e660a78781a95790165f8598ccb3f57dd9faab5c2cc95ca64f8bbea686b702e06cbdb5cc58e73c5938bc16f9c371055671097bfd223754863b58fd8082302e60a853b578d2a95ac9dff2d385cfeccd5a2ca5cdc1176ae23341893d256d0c69f7ecd3ed629c3027a0c5fee342aebe1019e32177b9c8901a14cd6d3a80c287bb187053c6560402f036a94ac02f6e032d06d69a1ff49579499ad2fac360af2c85e7c923c19e73958e71103a2602a4bc2739b7bcf1e3e88d46e5ebac5a046c82256008e8bbde00c17c16a730a8410ba8416bae4e1c2a189a8140c1b2b7ac9d0d000000023160000180c0a864482812408c238dfe40314000d719c4664522c1a0883410ee32808c220640c30840002806106d0ccd4b0246e0d20652ba8f095c1d1a3ec35fc89e1759822a8b81ad8903a06f03b78734c089949820bdbd4deefacb93cb3da812aefe34c918a670ec0a46f9cf48545754dd2eaaec0fb4b17a00d0099ec8aa7af300b6758e93f868ebf541ba6ae9c661ded331dad80c4d80e50d23cda86ff3982c21fff96d1156d0b8823fbf7d89e5892c66089668cb1467ba5ebbcf9cafed965745d4da7e6dcbaaa4f4bf9adee8f73692161f0ccc7c4fd1efc0525060b7c2eb423dd38ce969411786a8fd6b0e470ecb9467c6d6ff63fcdde623196ef75dee4525c94fe27ceac182c3ffa17142e3d0779754a65157bf236c9cd5115af12bdcb7d95bdcdcb298ccf3cf0637d9e6357f5887176284ab6a4aac15ad1c2ac389fb30d97215231a145c21485e5ff41de83116d2b6cf39b1c76aee4dbb887ef610308a9ed76c70bda1f9008c8ed931e0b5b9c75cd26e123c35c6483b85e30d21b35e4ab875f91ef1ab923dfcba2ca28883df317e3833fdb4ebf877fa973b22718ea047da7ad73451d7e26ca78a152b3476d9f527f863dd669b458ca942cca94aece4212c377b051bebd183ae52ec3cfcc1d586e88c8218d4357a07405cf1909a500b1ea20835c3e1a6d49e51e984019f12809566990a2c9e6073fd7066fa4401111e80927887f857142a9fc9d8ae14a05401c67ee1ce4da9267f9d416c5fcb6950b600ef928e83ed3213ed3de9eb32410a73c31c635ba56b95518f5a4b4a6a2fdea973009c1c6fa8664a18d394c3357f58fcd8d693938f20f96688e8c5e421b8dd2cf15d879e918f390f8eaa4def9670139a9b0d0225c53b7973cc4fbea0d859c5629eb1c1e9de71831fab0b85d4d9b9cb60ed30323f4197981442a1eaa8a478cf6316a300edd917840c740830d21115360d557664f873c11850917a70dc455d267e8efe75aa00693d08f2853ccfdbb8353d503dc74b715ac46ece0eddb9c131a09c65ac3cf5728ab5d42047988c499bd9d7852f20d23c6f5b2bf7cbc26b26b43f9cdadde78fde7955b73c720b3afb1a6eede153726f25973baaff72aa7c4aa3047c1ec553294105f25ce489707555ab535d5ac9f8bf8bb9cceb950395358a70d30a2d374ca3212a787117ebc13b8b2330a3a51c8ec16c1e9670b18f6a84bcde71dcb4bf3db7eeac791d5f095be30ac6885a9052bc81cae397811fca7ba03b4047ff0741024424e3efa18230e626614d8cd07c8b68841a30f28f5e415d1fe265d96bc31b2a6860fe148d52efa7623e2255d2809dda28f801389cd1d626dc7803fb270279045fa24c70b7a3cde8a93a25be5b48fbe3251e96bd31196595c286e753157d023b2468d648656c454fc144dcbe8bf6e53da7b3e835d03b5ca568bdbda3702bc15c0b11d7ca3b189c51a4cd80bb505091070bb57800272b38d7b0d2073b0502aa62f3595643a0f3e6d3b753fd16d4aae3dcfa18554f9a9f8600170619b49bc27586bc5f1145aad4dceb6806a233488e94033b81ee5fd27f2db754bac394c32aac4eac06941337aa9667b63070adc674a931b6217bfff1033a0bc01982860640c547a4d360ad0f061025d07f79cf4563f0a8ced79547cb0e43701b3679e80fa5f077414713b710934fc2f8b3092de4a74df27d20676fd43b83060ce6d386b4629932544d209c8db3b7202012b89fbe9336cb02d1d696b9e3b3dc837cbea921d04ab7f72ad06ce152b0b826cabf8f25d9d1c0da2e3a367f169dcf86eccd057c6079bfd131df694509e6faf895287be9d1f3d574400acad06997b02bbb6148174236fc861568da06eb32bd5c6cad4e57624a3764a3d224557b1210e4a7db9cfb6c9f8bd8e8c95faefc67fbdb2bf557ad0b3e87633e11df661d7ed5ee28e3262ce1f754afe4f23659e6090403322a23b6d56da16b151d92f9755bc5d8cc85a011cdb30404a3ce2f9e0bb84a279d5ef3bb215ddf038a6b7d90e13a375c29953d2adefde4c2c967147a1559651de09b9ac0f34a5e284c999219ea1b7fe1793a56b05e4d2064472b787bdaf0580a0cf525dec59dae74a70632dcbc9bc65aaa582a402a2c72e45a6941bb236cc58138c184a9ccffae354e468893faa15ccc1f56159f743419c9794c229b799e64f5b1b4e7aa0353b3b273a3794f2dd0bd254838bc735650957568a64e6dedda5cac4fef9fc8ead274cb022d57303644be6aa8d3b85082733446334f6afe903c853df2e365bc3f83a20ab51b0bb50eb93af286e85d6e9a23bb5560d6ab4b31b3e75efdd9e31d80ede9299ec00c7085368a54e85760f64d901175814124640bdd2f20f88b97f60319307202d1735696ddb0f0b432a9ed22837d86b015a9e318413c68af13ff3c37d72c776a0511eab2a5a6a4e869163800ff3475f5f32945be249c5a1b88478d288448b403c2afd9274627a233443c54fca1f9c98b1c11c1199c69c72b407d87f691610d821ddd3b694b5a9968e2d72851fc959abbe7a820903f61c92b9bbf3ec051a1d0ddee799b26f9f9491d09c06aee1c0cdf1ea2a537463faaaa647184660b21d344b55b05043d2fa8ea53374b28aafc977d4688653c8351afaf8d66073b228496fceba61d5519d3271035af63a8958a8862a060ad323a6000f170d853ddfa127feb039d1f55b904728d7fdb31b5ec0514db3ed3d76ad780d761349eee99019c43f799045b54622d3ff798532547eab0e2acea29ddca09748b7ac6861d1a37539894071157275a229cd64d0cf0312d1900d0e7b9e73230c56a6354876579489f38095870d73f815e9a16349c0206883f6ac6b3c746755e6fb7f1d85952ceb559d2a1bbe4020c6f157c771eccfae293802c6202b62d24a19cf192721c46e21ee6783c4d565f242681627f8b7fe1e0fee8755918e9b0c72ca77f8b03f4d69831ef535d30ac5025912e885591ac5e68e0a67dd053f72fce759b99428cbe144a62e55d8b794d098c32cb54254001152c0ce10855dcdec7a4770dd32971efc35971d7bd4d8bc63ad4d79026c737b31011cfd5577b78241779b3f01418a4770c36befd3766f5acc217ff8b9adb0d5e9e01e8bd3cc2979a7036da02f6ba29e8512e4f802f82c04de0e4ca85b362324ad88228972fc8c015d796b9a89cef0e2743800605008bb3fc0cd186bf317b0dc1c31b2c20d2d1a04991b84fafe12b38cf84dd11998485686d457184a4430221886aaca718a3a5ffcd37ee18eed5b053348481ba1bed21800916a4450f8a80aebaa202056f5ec4d44a91b80270367425b91afb47d20fa37f3aa79f4087104c62a4f67917ed7a20b230402e9ba1d30c13bf968fcea4b9519692b0c8a2bc7e3faad8309380e031f4748c31a0e61ed2c3e4975517da771fe309a43c819f6681bafc0ec012940b8ee472b0eae2b8f88f9d39189912d749c31958636a9f4e9a45fede6aaf039090355a31e8a305b6055afcba3e755a5592d929648a969d6aa79d193ac16dc7c47bfd802b334525d254fbf4da5373a3656f6e5e1213de3deccc115bbf7fdf575d93e78f87c0d9dfe4288c76b7b21446103f203e638bb28926518716968647484edd651b97ab6a35a5724619d7f9b8530677c469f1c373adc2643f9e02d2ff74b2aaba5c7a8ac62d70a08a824ca0f376417d75720fad6f12f037e15c7f09ba1d923f5866f6afc1108adcf89e66adb75c589b35359d3e4c7262dab1681c8ae7d468dcf694b730ce99c1df7334e121471a2c5fd8f098d42ef3f3752b8e170f4c9daa82dd9d2cb22e7e4de0ecc064c69c3a5e752ec2c57c9baf2c036bcaea7e53aa6fd80e98dbc54103249c9c8823def52fb767f328ec083d58dff86c22521cdff813585fa2db70323105fb1e48a04e307bc9a8865ca514058146ba4c7e9ab4586c48899b3bb23415a347496f223f211ed47140632f25b2a0fdd9f1bd7c2c5b4c71a4baad1e5fafa4592271e21e30bb2860b2a3cf8546754ca0eda754ad08d2cdbcb55526a513907b04f582840f0d5e73cfa6f08faafa958470489c507711144acfa6651229ae8d4610a57cb6a3419de5d2bc52daa20fa08f42d87a86edf1961c83715f755b76085c75a1f159bf239d5be7b771ceaa8f8a6544a7ae65dd97adb34483c1c7380160af42853789557641960355edbca298161caa082bd9ca4b541e1cfbbf08db73c1c28cff51308f0c9ba56025494adafb008fb78e9b959cf639a06db03a8c83c8cd6bfbaf02809153929608b10fe128fab9586ff31c24be0c62cd84f5555cf2645856fdd5990ecd7fe63e63101922354ff844880d6385cc103cc3aed05d3c74c8f0e9c5515d4988904915f454de2cda5420a71ee8bd2d09da5534677f6be4fda7064ccfc101a7194970c9b3d55459e579e3415324909ab1b073e4a6206ef3809b795b4bb6672c6778c6491250e858e1c3410191a029a413c6f3a16093e0fd0296bedeeec4a720bd85d8ec6cda8d0c2b0d01d598b4d9679a175d0cb6415fcae4b20a760759b222f3699b43256c4e9b4e4592bd040605b8b2e3d3d957836674b7a63f5322aeac72014e6ab7eee11abca7feeb12b85f0ab0090cff285b5c0cd56d107b54982532193bd7d98c8afa3414ec222bb06d97d06b4d8e0699e46816dd802224ac4cef9e6920d3878f864d4f96c1d0ee2f7c8a814ee0919df363e493326e252006ae46c7561d1e43174b8a01df804e25e46ae4d4c9fc166ec854c652d0d7ef322572335e3cebc781fad0a9595b8fb970a35c5c1a3c79bcdf0e1fc1460ced2bb6a0283b19110be25070931a42fb6bd2c5e3ad43ca2024dfac5e4c24907d9b367c97787e73a6719ff52dd49b33f9c41bef8c4a67944de9419def43cac50c0447894d6af78b13c75a5c92f73b37562f4fc28b449769760489c231429103ca75d5af67560d0753dabce4669c9561a3c2e5737debc072e361610fbe000a5906a9e046047356cd8725433497991e037d52e41ffe103a8c970bc83083c261f35ee18482765402ca377d1da19d5c7f2b28014f6f8937ae59e3b14d09037b3774a22c706ac7fd993f784821f6a1ba202b925894baeccff7f22b31440d8cbd6aa2c734cd698483d94c583ae608bac5b50823d669574ac90a39ae346b674d3257e090c41b82f1d0f23fb8161f48807b4f2de6173d28d6241dacb47e2ef2622b707a1070fc80cb0d40703dc407096dc79d5e24803814b7643988a2536dc4af370cad372e45ab5292a50a791a1f0211ef29dabd3b411ce2e1ad8baafa17a2545d18aa46c615a3bcb2929d532b9f151897672fdb5a47d4bc83654963760c58ea5827ac6b64455ee07bb7ec15804cd0116d4bd04344b51c957be825a0e526de6d3cc2c33b41555c561b3f6d9c718356b747055a1051f8a6b8ddad6cc17841be88733fa3348fb19c944d14b37aa3fd4248dcd720cca2396822862fcf411092e6aa16329f40996054ce04009142eb6fb9c0fa2167e08ac99c19a0fa661ac1479ea020833f252508769c12fc08db31359863b831c8aac352a83c094d43938aec054a342e3d9fa8beed2799946178f06e1c631e84bcb24b136adaca8752ff1b70a8ba4cc89eb527194f13635091da168b4503359818999f2ed386e797ce48d723b0ffda9983cd5457aa7eba6919da211eeee64a2feb60fd1c9eb7903d198a47f62b8498d7c84836efe3d25a4262a8be342899088a6109c99f43394e383b84f242ebe09199ea861132177cb0fe5f70636b510cc25250542885399226d4662ee6d7010b274327772b3982702faadb8e60e1ab13c25b886948f851ddb35244f51e6eefc03fb6d62310e149747b504d8ecfa3e219edde48e140ede814e884f2fc0cb450265d28260b30b06088c4be5441891172d2455e04be6146643d1067f49d5760046ffb0f762fe10e6d7e04a82c0b0d2cafe0db6684ede95e849ef0aab1b9fec73f45cd5465785cc3ff6065c69efc2aca537ff6ad495a24e6495509b30d2268f0f136d5418c4edd6cd5ec19ad702b660729684cdb81097b6733c17a6df4073261b134e38436113d6979e738462f825e7172e4fbabb9403992321812a2e270e45a3c2f01e27d7227f039ccb18f745512fec8417f2c4fc48f44679b607a0e66ac8e5b766f7dea92f44546d42f63af7bce65080d2f4eaaa33a7fc8d73eceaa502ef4c44a61f2384f1225c909e7310436e1ffe06962ec2d23564db89bda2254b548b74fba279b0ea02f4e1a6a7059881669c19ffadb4b8881229271d3e41bea1741941203745a384821df442b0516902403c444a4422bf5d4aa23c4e69af363f21bcb9c1c5ede8d8e73477168871968239259369150b47b2e851db6d10d84a75488bb7047f9ea54663ac65a0ee8fa9a130da26f2de11b60c70bdfe5055fc992388120d3f45b4d4d6b0d9611c03d788339e24063ce2e669dba48c4df430941f3f05e47928c43d2dc95e231e05d28b54062f510759089a58c2fc387a87d56a9933b2a03e5d2efa18acc3271b65c41fe7fbfcc696a3d8dac67bfd6d5146d81fdaeb48e28b81a596a28d7dce43935448c1f1fcc9dde4f730b267dab1e8148b25681bef54f67dff7757be4976079cacaca5edfb6a7a897fbf8e4d6ca7a792c590d382cb75db213c4f1393c50a1b1b62da169d7367403a2d226347ef987b9ecd209d66e9603d1605591ed0bfa291651735034cdbc331d7cda059197a254edf47e8c4be922c5fb853194a47a73369714b66f82dc8ea536eef849256ae37314b8d21702c122dc61e00db0c32cf9e9e09af12dde2605254fda86fd079a060a57521f8de54ac21d479407ffd4eca1709118c998b0b98a5d993b87f4941325582c33bd4c72efa2b7bc07a885d2c3c60260f5b514a22f012e0093eb8dca067ccc9ee6250865633d931d7cbf8b3057913e0e76f546296924c6138df0c70fce37c376f60ec7d7781c04d1ee4c5bdba76b6d40968052ac3ffd0fd655db8567c93331ec89a659a01b35768773b1c5a99930a0ba3995209ac944c4869d60d82da85127de3fed53efa5002c0927bc6e24c4e2c61268312068af8c50f8c29f0a1d6693768a0ca9c1f8986faf23aa601b7c180773890809931206e5686f3a12a0d3182910413ccf2c7451e4b256a5880b9c7505bcce3ddaf079a80df2125d8f6887a1ee429e20de3d09c51aa992a00ca9adb5f611715652d732dba67b481d701b1850f13d3306e8114f8f4acff9734a25a4efd41340741570c02e39ff50f3979a92b40f3749611dd54f507cbcb5922c5af087d96780a6679da15f8ac0abbfe115706d6614bbc9d49d45099c4a6936b4427ef1369e6f841284be4c3653811a4a5d2c48a36e0168f40951834a1c240c3a09dcf0a039ef5be46c0d2a48710e7ed404f0c60fd9d59248491b2fb9341c4205f5244bcdff7abc80c3d5172974594b0ac8ba341b90b02fd4b524f433f5b18f7e76612fcd4d82d0294aaa36e4ebefdee6396f65b2062169a6c9240b7aa7df59d2796c9e21624e4301428aba44829cb4dd67a424af9c4f5784ae265ba79466a4c8fab508cb978251dc373effc3a79ef4a64f65a54ad7ba9ee4884277b0fb8863db415e135c54602d2bcc4593406e0c147dd565bfe74835893d652295fa1f301be604a2f3543a27aa479a1641ea699cb65d31b5c66eb32bb89ace7373591da38fa8e07e5a4d48b548b2df97bf2bc8e0f0bf397182f14386c59b28400ed3b431a465cd673880e3dc87db17d4fd4962b3e1433bb86ce212ce1b644e2a96e5c13aa0ba8f826343d0cb7f2eb1b5e71c63dbe9ab027412e31e2b8c61be798b200df993a31c4707ccac01cef07aad7febc0ed3c459c4df5178d9604f06b764204e6797483539454179977400c6871c1e19ca01ac643ef2d99c5851898c428a47e6122c95ebc4c2ed0c847b56f4991039cecdbe7435f0c3ebddcbe1e8a410c4b6d9574cf4c3f4731cdf706f3350414d5066c86ff082328c77263200e4213c1264a5bdb9d397dc4a002f8f1c562c7a18cc925818241f0e0a09f848a8ff537f1522807ac66e50b1e0a2d5666ae9220ee07d61c5e40fd30265222ee5bce4a14d54d564ad4edcb2304be909cba1db202c4c1b2ef876e70289f8b0c4567257b1567340ea7e2039e5e81c17f3876c2b98c99fd46201a54ac678c2fbf133c329d9eb91bea6317ce87e384c83b3663b712eaace87cbd2c10f169772ba49a024078785c9b1773ce67fd86243e91b619329b6e0da966a8304fec5c77787c22e5cd4401c5beec740f0e9da8a7ef17aa2844602801f52c7498d77cc8f234a8b5390ae0475b7c75baa882c08e1dbbb78edefcaae029bdc69409c8cf52f00c3842b2052c7b7903e8888e83ce8120d11fe6236a232acb9f2f860085a8aa647a3301fadbdec035d47382a919c293630020227abf2df75cd38f02a0541dd341136277b269bbf70718cbfa6e2a827db3bc6bc9ac3c310249401a8d0667b4578bc4a98db2a7bcf890c1536805e2aaf8787eb77bba45c15b6df3fb8c08fb439bca3657f8cab7cf182061cad8f8d89a502d7d43a7c53d2b5625dd6a8782ed85e3e31544507223f658d6c334c5df62c5369c38577b39616fd93ad17184714abc2616e3497e7a459041a2a747075ad01f508a2d18f480a363f07fc7eb3d76362720b271ea70db8d95b767e6fdf0a28dce69ceffdae8d41ecbc050e68ca2900e14a42b466bef941c8b1304bf709b12fedcae1c0fd311375bf00942f24aba9b4706d2bfa263f1fe1beffdfb53503afffb6a1258fc9137c412efa112a037692cc39ec1fd0f48f5978265a547309c0aef7e55c1a44047da451e9b27d4b521be3ce2c0ce575bf243e589412b19308d587f59dfdc700eb0a76ddb2d029a0820a4cbbdf31f71d952166fd575c0973a953bbd216e00428474fb960d6312bf6f2a8946e6ca823cc3eca31fb3d161f16cedb512136e9e43e5ae8c9883f396b5f8b10445ab628617e5f5b1aa23af1f5fdcc4bbb842773e3e469b3e26fd461a8e51a44218e93b1408ec4b1d14a0ab88df9c73db1b66b2ce4e0b4372d423d59316760e5d880230b099dffa45151a5b411e6608eec9aec038b07341ca67f5da6590a73971591aa25ecdf9f93d896fd3d56ee88b8fb9797c1b4cd3c772bfc4d8e148b9dfe25c30b950ac2837c652003348941dfa312fdc2453b68d129159cc7979cd8420e3b562dc14e28314bb9ae28e62003eea1892968ed2401b211d427decad48f00e72e8078844816869330e8c5b3306702aa16bde6e2baab80709ec2b8d2d45f97c2b8b2b815e7635fd5412ebf760f6e958585789da424b2e5339ed6956c62c7b7c582d7e0115f76c930d54ec9c1992c2e0a6605b79ffadfc85fe101beccdffcadac593de44889aeeec0f38edeb8d1838649820e6b481ea5d17c8394ffd78d2cb217f1ac8143b1877f379b0217d6c491473b35347fc8e9e1fcd7ad5e86d63f796083e99cea36bb88ba878e8fdb1d6b7a626474c356ef545d43c378b416fd4d0a7452de79fd90628219465b9597644da8c5d3212219c35bcef42d3bec974aa51e5da9d01ececb742dce019e41be78180334fbb63f2cbd729aebd67488ebbef95ef121bec1aa3ec76c21e23a053e10f3599e2ab4ce8360e31500dfe2c4f114099e902ab55fc81c990025a7f8cb98ac12b821e028c1abaf024d440e1021069d901017b9aea09dc2da159fb1aa1035f597a81bd313d201cea5443bdf38731a1e5de2681325c8de6681d93bd6ae01f751f5a45eae76ec6ce6bbcc33af52a634115ee4ed244b46c958120e09067172e18e708a95d48c433cf3f5afd09964b1fd770208226358c8d80d2e96cea0acf142879197361030e120d57b7195c0dcffe4591ba5cf09a914fc8e0b59c92540e7bfebd7bf8e3945bd34f9f038264314dd0e7402a1e1720788653b1dba7cbbaafcd96f0ce966ebb5b85937ca758fb28cb1cd2086d3c1cfddc1228816a5224e440ca53c680a9ca1b33a432f66ef7dda282c7baa678d3d44301912925eb57835eb3037da630898bcdfc5afffbcc32576cea87868dbf787ba335f4be2f21ffcc3dfba8608c9e22fc1c71c70846039322f54f09ec622e5f5d895020075d7ec04881c2af15b1059fa9604aa45a1a0cc48ff6576ce1099fcbcba394b722283f490c5967f092964ce1ad28d06a2a2436116d50bd25b4192eac568cbf21968f1a90a7c385bf353d5f5a63b4eb9c77a0f071c08dc4bf8f1ffebf77b3e83c442de1ac9422d71dfa5359e9f39b259540dbae871ff152f53b0a882d3465f40cc8fa94ca5a9bcf71784ffc4ec276806cb9c44027fa05a180900c682aecea3369ee27df1f5c5febb0c02ffb7dbfbeed825cb85c040e41bad7d92d53be038083df7270feefe14eec98118c615142b07f661c8255ae7531d0ebcc6ea172abdd41cbddf4c950739c2d7f959cb1d8cd6c8cb948051460b6de3fd2c4bc786827d6531654b1987fcb1449704cf954a739ee193e043616f472e657c789298d1441388114db6506f80132d80bccdde6b636249be29ea2b36e0b1119dfb3bc842f8e1e213ace37ac2fbf00b31fc097f9ef832716357de6759ee919886cf3bc2e70f15936e18c4c917b26fbc1d66cab8f7aaba25058542fabd1c278f404bf2c698da31d9958e626855c3b68ccb4dea9c958714fb3ff859939034ffa4f8711e4d1c02152beb49e2c98fd1187ca96bce8e1ff00229969670ea2b831087c26414e7405b43b9e4741d5f88ce12a62e41f89aa96d1a33d6194db75f3875e9416e336f2c657bc464999f39e545de247f7a055a783bfb525e3c90b4f4bf8577151ed49f3322f52228b109dc271ad865e3efbc8a83c438367f07092e545be64b62ad9e1f234c8a98ca5c48af69787955302f0d2222d4c367d9a03c746dc602244a3fc51d3366089cc8760b8539885ed1380fd7cb1a35f39ba010bf00ccf4e097e0eeca98e6313712826d2b1696777d0141b4842fe3567d3e5ac61b526b64d2073a5a866850bc1ab5903c1f839de92c88535ffc987bfeee7f2f964f5f41a1ee83ddd7a9d8dc85db6224863d412007d38fa151b3282b2a443595685a6dd4b9cf85b5268231f8c40ceedb048591267f26c138db016f55ee8adc00153f0889aa5b7dedbcdd4a928b4309b9d82b7c5cc75ae1282fbef4c1e505bd43bdaafa98086fddf49904a66bf47b44d711ec3436eaa75a738571ec703e561bed207580857451e77ee96ac895fc5bd317af37f44e54f7688ee5cc0533c9d3e57bd4b849f7c83b432d81382e7b505551e37df678f81a077f7aa17117880ec70fbc4318e6e83e0fd1bcc8e252d0142f2f6331907190c25c6e220e03c02fbb42cd1cd245d5b55b6db46d351eca715bcc81790e4f76cc246ae6529d45518a42d1d7854edbb12303db3100fb716f0487dbd7b6c443c9e8eb2054b7fab6d88fb091f1fe914445b83f158216704ab45d8012ab205c71f6c954684a6264da54e36edf8a657a60d956f084e04eb6ad89e2c036ae899a72afbde7e0ccc35ae97f128d2d0ba4f23ccc022092a28f0f1e1320491c5ac40adc1a7492c5d42c3a5c892b86b3740ffd90aaf56b944b9f11d5bb175b5c904450f08c3ee3f4967ebe65952c0cbdc4b62dd6a33b2396d70549abad1ea38d886b04e6430fef4b55fc780ced22afdff5a20ca6b48301ad063c622418c873fd0e0e3d497cf9a3c837ae3c20da7d3a1bea9cb4f4814c04c17ebe3611b1b9b4a6c2d6b77c260f0887385975e7e498a378930b4e2e1a8c94280313b5d88824c824d061be8232928f89575a16d9bf9f779fa7ab166caa198fa60184bfb26d761dfaccd3956d93ad36e5d659825080ab037faf16239e6886bc010f6b08831aa24076457a4123d703b5a4f70029e1d8c6ba21d0909287632ad2224c5d6be4bd2c8abd82d2b7a1fe0c37e00bca21e7c0d17ba0ac0e2a9d3471157fef448d4200809e0da8fdbed84f5f2df474abc8f728c0ce9cb576e6f9afe79abc3d63636e85bc696ac80c69579a55254d6a9d69f04b4b9d59b63e3aa7a308cfdbc2a481a6a390a5e93e9f83a83ba2da9a9c384ed5b9e818a123a2577344da43bf2640f4708f5cd3e0b7c90de4500371e714faaec4cc1f48bb97a2336eb06c0322f6798dce6a7d4d44ebb7d751015939941004c64562d30b8d26e11011462900a6b15cc4b0ac7cbc9cd2208a5a090bbadc09116040dd5a7ad3a000374f877f93e941c92d45b811d6d3cc853e4eeeb4bfad8c505a56c1f1e3f139e97f8c3b3dd5d9c241a45bdb4f9ff8a8347057b448d91a50e578c3e6a09f7f51c1b293aa2320dcb27b0716610eef4aa9afde72d4b3863329a0258db7416464548ae610187aa7adeafeb0d6b4a3e729cddae85f711e0d49dead3534af8330bc06b05c7b860729929454b5233ca20e0e2b1d4d239f7f5ed28ea2bbdeea7333d68dd9275c412b47a1adbc1ce3c88869997eacd0ac0e39af05ba834b4195515e93ce39343834e6a276bad33c814f8ddf0509a494bb37f549484260551274e0da5d50b11051c4afc6825884d5873a8b2dfdf4c51031ed60b5650d0fd0d7a1ee3b77fcd4538f6e3adc06b44b3a261dbe168f5cda18a05c2f27bfe154ad6c2eb39c54516a04ce8883b2e27d0810e165377d20257e8dc66ef566fec8b6b4d4a1718ddc7258451ebbf72a746751bb203a49a3e41ed1ff26cb009f80527f9efc4a29a9817615f942c44db5382c77210e7fbf3ae142acf039ed526470ef1b0907b63d3ed99b654c8b0baea0227cf37f5c42583d1cd9e404364401dc232d6ebfe3b9e8a114c8a60e987d38160ecdfe016ef27b94ed22c115414f413fa85a6bd8ba936080d121403ee49041db9182c62f32f618ed2c77ce71a8fb379ca1e9e4ec588cc6cecbc92f0f7b0d71d271c644a03b0d9af3279446b395dbd6a799af1f0b6d06ef3793c5d60945f45102558b2609b16edb5eaf42a2080126a10dbae659ded65aade0bc91c7dbbce66f45497add1182a5a60f4de12a8ece47e04521450e6e55aa63a33409bea80ce7272eeec43d01f9ceb9c80f607681fca02c4c2d9419fc171cfc855896b7acad0514f7c4f040d501197c48f497739526c0143792ed75562fe1055f5c22c9c07050874027b93316c494d046415d07d9cd94b8b0d8844901ac1f3aee811f0246568573b5b70031b57d92580841753d32abf0eb3467d282b40fd34ebc55188fc7e69a59e8bbd13162be4161e16445911b7641158725609bccf87786ae9a8a9166ab28778e3f5196246918d176377da8ad1c91f22dd6eebd12544df480c6c47b042c1ad433decdce55d34def568e3b3576466e619a9182db456c69156d042a0c53d9809e253a42409677fd159db82964d8254d77c7cb6348d51f7b99098f723d249483e4289d6785247d698a6038c5b9b93e8ce4ef534a62282625c65a69cb0425477f85913b0525f5075180829c4c014a8fdfb2fc097aa5a24aca2975c187d39a56682f27302cfbfaaba9fdd875c42962e4b4621703d2565ddbc455598e044eda4eabbfd90e2594764a455f5bbf1c6b958deeb85752dd6934a43e6d9de862d6195823c776b155db5ee9c7a5430b0825fc0cc4fa1181fcdbc5f9c270bd3b5a1fb9eb2ef01935ecac9bf48cac4fe01b173ea6719f3c1fc5761416c9f656cd184a5ec09676b3f302bcd138f410656b0674fe990b79076401e327ddd4909aa1aba2f50d0d28fe1b2a30ca16a38eed70fb9294351c38582ac2635ad29b1e8f4c7eb7cc5678444c2dc2c55660bbeb96f064a1947d90a18b24189acf21627fdfce475cb1f43456fd19e9e908dde5d9438ed5d38c2168e44773eb631d4a738410fbe40640a218b66368887899d1e26117c554c0a933ee21c2690593bfd1aa4f4ed7030d6e93ca312f6caa574da68aba9195a8a98b20825f05cfb5205c46775b2e7f4a2226f5a3cd1395f69b4ddd2c5b2f35ca2c7f3ce559608a9fa20d7698cdc48608617514443f643a7eaa4e071961dd715a85688666de804845af008ad30fbae730614a0e1935834a364d355b3dc87933e07c230f532d824a87547f10613819f20b23119698f3c618d07105cb64fe816db521937e3688d5ab3d066a11d58a3e4e8ba2a0a763fe4f1fc54fb9097e67f0cb0667a49b421fab2c4906c598374047603a67e490c01f87b872b4de8ccedc64286b702b9ba19dc6079c500357c0e6170feb6bfbc41d018337e2888084180391d422a0a339e3bd211d0c5ea54f054b7a2c034093880db8ccfd2e91b7bb4c4efd1ae63266bf32fb861a680d9e8beca82e18cd582a94a20fc0b9bd9e512ba34a16dee067b6ae468d6199ded0710dfe3a296eab2f4a755bee54fcc4631bd58c6d56a85e0ddb6ec404c68cea2059363ea8ab005e27dc3039ef00b0805a3656a92a41554b61a3dd6ee9aac27da34fb15d226534798fa2788912133772dea2744182cefa9ce4b125a712e1f7d747302b0a8c38667baca4037515be7a017ce2ed7bae2a817e8edcd8b877d934591d0f9fa98104b13836578ca73c0fdf60a127e7890cbdf98698ce7b9a086beeb5a6648de039aa0f41d5b4452ee0090e64f6b5b173a8f881a08c401287fef2d7aca09e8cf8a7df34ef6502bf656d89433216230eef1c7565c9e4a262137da708e61b715742d2f119611ba380edc00a22bc7066cb33af0d8ec1490ac8080048d55c2e604360c71c263d275df1f21116a23aed8071c099bc3e6a6294ccebae9141e7a4ffb9ea23e1434a506d4178532d5b9dc04316304468d321166a7f397a8363b0762684754996dca5e24a674b53812c4f0fb82902ba80d47ce940c2e82dea29bc11c56bfa58d47c47b4b64f100d7aa0830f413e0cd0786ffc648caf890f715f765601f465eac8224c978906569ee4b8c8fe059f99a4703561fd8504656029c9b04b7560a87cf7f6639501823cd3b2e6b284824790f121b8f781774095a4c00284e6752a9ee1c8853a28fc8994f58632a752ae322bbb8f6bb60918bd164cb81d643c6dad07d7ece59d631dd9355924129cf908bf60ff88000ca0f2628b83a9551845659752c755d93c9680824bef67e10a9c079930d14c283ad300fa3850301ca9906fb044156b1b06cadd6d5fe2e66d9fdd626f6b140881fb833c69eab35cf74e20c1a5915d03502661e03494b6485822ddc0bfc74518ab112385cee134edd81c601989a3b8c90b81668c91dff3d1c9508ef20faa91beae119b7d4d9cb192b601b9caffa1cd9aded04a15e28c75f28726b3812b87bcdf0235d5945c0c4753076c0c6b0f0f623838cbeea1328da4ee76ce2fa38b3764de771abb85bfc2fc32fafe8d116e20c5320f057b56487e198b957a071d4419e15771abca83b084f15cbe1c06462a3299720ecdd741085fda62872549905483a053746c2cc1b52a4636401321548cd1c6ae6989ffb6ab3448de565c8e0d7d4418d9d678223a57544404150f647604d315afc537cb652ef4ba6218a5166e053256087f1f7a33f696bff797252e8d9dbec7aaeb4a37b74547f19af46468f9da2cce5ef362e22715a48d4015f56c55dc0207edcc202f05bfa4eb67b44d9261d1b64fdbf929079d004ca05538bd612c0eeef0541404983b05f6460a48eab81240eda3ee2e1ba1cf29efcf5d55963265463cd9b47dd1002cfee2d99246ed3d2dec7b86770ba9ca427a912c27dbba26452b988103c22f0e2d314c16ea6425cfd53b3e7d89325362a7651cff2d8292c471360178b152d8165ba7bfa93360c5d35094e13c30e8c753a6ab51179ad65cf97197465f352ca94762e39fccb628246c193d6b079f1005abbb3880e8f5588548957e8435c71f98b5ef892ba70e02cbc1578ce60028e93c702bdece314a4bb2d00dd47f33adf62f655580ad1c0c4a2621f483cbcec34916b65f32bd0f63c7ff4c5465263b1711c90ab079c49fd6d24dad528e7caaf2465703ef74d43b1856e0aa4b80f57de504fcf05ebb8f38822141bf5eaa51cfbf73a2bb961126b96c2f9a5c1d8823449206487c5879ac517931e2c14121bc937d7b94d5fbbe7008e524f44a027cc60cdaf23b20f28d2440d2c37ea045bacc1517df436910ddcf459fc19c0cbd723640f801e43dd975cf31e35487f81e56716e64745588b2e0a71b3b57ce73c8204db206fdde0085d2db6eb3b02525e339583d759ca1298b74836169146eb0fd431319fdcb6eb8a93e060d41e20e43646640fc87078c760d07483cb050e72f881347611d258842679d853968443563b287c0ac8d86c69f715f229106e2acbbb4f61bb976282a7427b7caae2ffd17376df9c1069a48332e567b3211ebb6135bdb62f4beb2791a393bea981495de42f15c7dcd7e4d5b086094b309a8e361071579203bbaf3e8c369d90257ddea5b14bcd94e45d25bc5fce32d85092a4ae9364db839b96f468b0a5af0d6b95609faacb6899f6c5f2b0ff85934ccda97df4081b52728752a8c3069824329fadbb4abb91202c9db14c1c2c0211fda81f5c4d2345137c916b8e451585042d691a87efd2bcab98629e6b949d57f236a1b3f764a07384abc3f3b072807e15097a88a8da10291f9dd038d71e271f1253903d2a4045c997d94cea1e8f12ae653d21e6c78766c31fc6330e201b862b770e6d1afde718a8a32fd0652c2f5adcdfc342182bb619ea4a1305598e7598f2cacc46b169ef11ac117823659c1be87a0c3aa247723095ec3b10c102fdae4f31935a41b377b4b5d8211100cd2bc642c344e7b2ac362f830c6750c31311ae881495ce01892f6a1624f5c229b569e2f064b128eb31cb2368ef882719cef2c19fb87358bfe9dcad12a2b6aa5eb13e1a1b0b4aec3b13c6ac262198dc149585c413dc0604deda8a6f94f5e2a7c8c66fb9230dfeceaf3977042967525e0a08297fe10a74ee31f5d71c5704a911413d4ce30dcbf37d2e243263138154a75c073f1b1130b9ccf69df01e42c1808227079c58881173362f32616ef14954af8e8c5fb97fd951060757f285b2f3b21706424e70223a05f3cac17e6005b085c551ee8b17e9c3dbaa13820598fdff838504740957f0f67414cc52a82115caaa67025725a8884f0ac47e116dc0319057a23b4c58c1a72ab36681236872d57ebe0fbf6b3847e95fdba19a4036e7a03ca284a5f85089065e7c772be2799913ea03daeba46969b6867ef30e9359fd1fe490c55aa85c207b89d29ae752b9aa2ef23c6c648d0eaebba5e63823c9dc44783f30e74ba05c41d8880ecd8cc178685099c86aac1c564da8e970cd6f7e6fa8789b7d60c4bb05bbd439c151ba3851b93800f4520f24e61bc603f22967acf87b766b45b845ca6d5ce57ca6eeeab3c9854a3b3217e6e8ea543755073e5eeaf0687fd2fdd6323f8f86b917244d57842440cfc7974c3aca6aa7d2cc30d89fc972a08a2396b67d1a1458f884b0c44447ab2051723e43bec218ebceac017138dc9345db1a438764e2a5e48fa72daed1d9a08ba7da1060887c247df3bcfa79437df3a6754731ea91fde4b4586c794bc3b850005ccee128ebb55646119f85514e412e21331dac4e49ce01ccd8107c03ae23873c9990f45c8e1a0904bc73bbc4db00f16f618adfb4505e56dc8f004576de3679223a8dc56ddfc90513b3de9bf6af15d39d71c8ac9a816b30e44e9c92e6a7cf250348621285f7c5a45a264f39860006bfaa83a307606c61a88228c0ec08431c5f67491d41b1e3a25c67e2413a0888b10f3566dfad936b0e1761649d83c3cba6ba298c24416bcaada522c4340c37e543ce2a33a550872aa884e294767c6be3b494ef207f4f3f57e3e40d05854965ac3f2916a03d34074fb5524a9f5b18c30b29f83c23f2ea70f83b151560913dc9c2b465713df9c3d4d5a8239de421cdedd139f9a0c05906422965e6eecbb5f39a0d0b2b19503ee70c7a2ae871a65c4c1c4b90bf7fa50616792fde022add6b017b15590db154a95d19a5a4590e30589ff58008c5ed1cf3c808d4adcb530ca1f5c9f74fb68ad80cac278cd82c420e765b4280643ac79519b274ce820a33ee2bccc6ea73dc4fd1d27818372811d4affe988843a83636d0e87bc9a9c9fce4e8faf22d2b16e2c44fbc72d0bea2028aafa2dc42e98048e11d0e61150f6fa5c7b6a82a28023a649d98afbe07f3cd450bf96f36760b5f04ec9335a024428aa0d35fef0718d8778ba49093791693b70f4f76ab270cca840fd8b44d1bdbe97a35cab0a7591228320a0dea03c73af32551415b14e8c70c8ac82628d4543731358c9a312ffef468fecc320d10921fdf8176a96dcb4629dabbbe4ada5011312613fc43fe19d590adf31933996b9aa1b18516a93b5552cc983c7260529fee8c31784f867d4e65027e61c09e8fc53c4cc0ad0c26187509362f8103aad6026f6cdd2b9f01a13136b0b8cdac42b243325094aa54c15e4484a3aac3d00325352fdfba8e8c9220af078a5fce64f4fa089227414b63c6ff85dbfa7419885e05fdf171e1d3fffb2bb2994bc48a9784c48f7ff8e04878fe15699bdb533e71a7d88868c29f3746ec6bec332214aae3c00b1fc834952f2bc2279804f9d2889ad62637750e70f96e73a2984318bc1edbee232d79e486528f1725e856739c62a070ba8e233ae3091bb8c1df875467b3917357cc90623adb944ef11ef14e436468dfa6ed9cd19a2ed406d25817737b903ace3b96d2e5f5ae3eba278071778adfb356470cfd6faf449846b8037b1de1dfbf4d1b0e6d18623e0aff60a913656172d7f642a6ba01dfdd95e206c5157c24b49e854d0d9e6b2623e3f672272fa0c036e26ca8128e29db3593734ba0fbca696f5e58b01ffe6011305eb77d832d6a22cecc5a295c871886dcc959a666c69ee6e5bba4aa06ce81a7da3e6d14ca013afa751204769e2f71d86f3ff413b899aaa3e5427bf3397abd089b6f7eecec97a0bab972de67be00c5755469472fc8f9a2a092dab91a18c9e48058aab28c65277354878e8301c239701a910f59f1c119f085c2290dcc9c4e5ddc612e29c10a8e64a40aaaf0f7bec649e3a4fad6c9fdf7b069fbb9a6e46f5ad08be6ca39afe14430d1ac2d0c8b802317393cb257905bebce626579cd5fc7b9157d82df76af9fa88e4a81ba4d89253904dc435857884d6951cf47a21a5ff7c112a3edb03f7a5c9029752b8f6bdb859ebe1a44e595b6047a272ed43f49e938a70d8040741c0ab437b239193000778c46308604d533604964962848d91e16f0a26e5703d83ae5c273118273ceb46433fd34a86901b05b74c66373f47fe4c1bee67466b636bae057cfed1a7226b5ea3eac776c22fe49a97028fa2a21648d937715d9a02a063912e4bc8e92930e65cba0911239e2f3e4ff332cec00cbcb263b780c385d5bffbb3d82fa639f973d298f101dfbbbfabcac658e8b58ceba5d8c1c95645ec8490119229bbf2d032327c7809a0a27bf86dceb0255a866f09487fbaec9e80e048327103e09aea1bf67efecd9bad8188b96a0f9a3ecf7f8c9b5978712e6e1d32dc2c62907c3c68231a2bf433a0d9672df068e44c1406328cb70ec72c3c803b5683e59470101233cfa8f018c478677c3eb630d96f8f28e755b45a43b365e07735908094794fa41a7fbdd906995f4a7acff0268f649154cb022db18ca61602afdcac61575a91a8ed5c05604fc3a81864fd3c2353198108461d5ce0a6f483fe7a108b073765927e55d6989ad85b5089dc8952a6eec862c60f3c5d57a07d1c90521c4f85213e467b2ca24b0be271272a3c02f7b326948e157525e8d2b091f93a6d7b2123af043803620e674e395020e0399ca3c11d48e5808f5ba78c9e0308eecc4b7e32e26694c3438c9e191cd6e4279e27a04ac4f1d4858977a27470920b7070aa10e04726320d83b0de33626c9cfe6236a9588fb1694489d44be59e462191320c1a832e95e58a07366e616ed532daa6f40c16bdc5ccb0545d177dc7f2c9778bbc9988aa1f38feef58f6a9addfb15584b7a33b1a60f1daf68dfb80af1a595d3af92f8fa28ae954234b87283f536708ef2a42b5fce5ef8d4312119cc5cb27d91f19cef2b11dbe26a198fa8aa79d3c6f25f1c73ab606dbb5607868d3d9ec1b04079cc8705a2549faa790ab4d7406a84b85e29a9ccd6f91cb65a8f4d57c0312ee134dc301eff04099d45d431e195a5e59acb4984c0ccb82436625b6dc014e934ea473065f9e695b0e71256feeb5087d5638cf6b720f01becb7ba1792bd168c7012bdce04c68c3e83cae1f27790e5075fa9151dc2f3e81f2b5bc294e4dcbd5ce596c31bd8833472c9c9c6d556ac5906489779873bd317da0c298e01904d6c574925520fc3af2ea884784ebbc93735b54de8b69528389dcf4449df745ba93a1ce857ed46437b6a67a528cca0ca1d35755ba92c9b8c6443bd21aa6f4755f0e7ee4d33fb58d27960fe1eaed169ed4ff514047ee8699b3b39350945a2acd2f97d77ce8a14128f1b9875c09cf9104f7e129dedbe526a24da632c0dd47e3ece01a5f430b51c116612d6ff0569d8c8af1308c5c52353d9bb34e94e838494897e6d7dfbd3ff6d6f9f88facebf7894dc68afa11f247563ff789a190e641cd62f2968f5364ed0b898e3808b33a53306664c742f30844a4cdb7cbd9c46b4979060dcc94b090e2b3c8b4c45bf1cc20f93e23eaa1eeef8744f32f777a3d3fe8ac5c4e1766a474c5afec55bab39888fae0923b67791082621fe0fa61378db13a36365c3fceb4b15a9d17d4bacdc84273d084a2ec220f5f33c8cee0501b07399e1ea5732af6504363903dcffa6c598b66d478ce68559c6a525438fb825214b9cf53a42c1e0b2c7ef1280b2e9d082a7901ef67db7a88cf588af0d4c64b1e1c48a38c58ae82288f848e192e98029489923418e060a555ec3352c80133b5d3e9d69905fa6795e07ecdf2c5094a6e6c5e6c66974749ca9a0c6c4cb958b86bf8559e9b47b466040ca06d190b43e1344c3d65715864aab2410a8a7eb2e1706ffca217fc212bd870c8c1f5f10d1a78540f5a44c8ded5f6f9ff4e504b6ae13498bcf799a936fcb18a87cb78aedab8be2ce0de587fa89b2c9786216f6703d8f87c789499feca24eb4fe35194aff016919bbca91552d206b2d30d7d8b9f1df6c320a00eed4409025a7f9a5d8b805d549d583c91d56c21bc548f6361ce9d9a2069f5776c0420ee9146e305bb7b3a9c06de17e80fd16103140bee39a5e9a58128885b5b7e3ebf21d76f3e994544c886c97ef0123bc23e8d443f83fc6c61fc122aee40858ee44e75905029c602472e8122331709ebe539188f49e3ef28a32ca55853db68091fca0b672bca2c3c17c9842e278e9d272854ba12eef4427f50ecd36cb9b4acf1dae5713a1b784a249537c0b71dc1ec997ccb687c6d59002692a52cb3650ced79abbe250747c3bb3c4acbf4ec4767957da2266180c3d9786dc421ae403718e7fcf7568a014e74f16bcaf53d7cb5bdfe17b13d7060fcb72373512a17ce30a1d92e21823f89ef07f00a6c6e9283a813cfd9c3c9560eda0224d896af8102591295a39a86c06944c22f89eeedf78f5e391e03729786824a9e3ae1e19e4e1b08ef386a0d0e743a7f0bbe87092d2fff14d4f1896ad09248bd81ca662fdea35fe02f4c1c81de27ccf816cd39438bb60bcda3a0039411767d7f5484ef9445d9db7ebd186cb862a1e5ed34af359441488829ded939a058e5bbb52425d77ba6987a85a7a96898432ad063082d8e5da5ac79ec83fd84dea919ee396fa0258354f9b3e90c477e312e0b10f7baa0bacc21590caa0da65a54f8a269e43c48470d9e9c6b9989669abf32737ec12999005be003b59295dce0cb0694337fac5f49e0166717b8104360ce2a1d8bd8c8c630a134645a4586b5db2b80d34dc2bdb7d71ed6703b0bd8bec089c3a23d77c3ae5f72d35811880154362f0245386a08f2eb8fa320ea34303b21863e073ca46ab0c4390ad1b8d2a24bb335755da0cfb5c9ea121812294bdb0f675c6c295a815474f351d2e1ee7f1e441417c266e2dd918326fefe8acbec9034242c9bdc2ae202ad788dd964c51450395c06b63197b0d6be6590505883d397bc3692be83fac80edf09f94bb97933afc6ce5cabfa7df65f58f907dc42ad61f80235a540c3718b3544dbbf885365944569dce3163d546d72bffa681d04e55571c2f8c44450972b8e80315ad1f171cfd15708a5cd97c90d73803294916b1e28f43d442bad894708d1f075546fb631d599a3fc5d452344c38dcecef7873e24b4666966a6ef5d976134d7808f3582f0fb4eaa1d1522fb4b680a62df8b737d480e93cb8b1c36074502654b7d6a3ba55aa2358c60163a6e39d0cc32e89ae3404c1ba96bf759b4948cb31a7397fc3f7950f1552f63d154d406f10bc255b9f87bb97895a88c592fd71ee3c404340d2299c368a08c4d1275ab3ad05d6c4373bd470a5a725325ac483112e7d7d0c5b8862f92fa853dce53f32052c289b65da16722202a36e40c7a4bf69806244ec53d2da92d4d0904911eb8adf4494ce5674bfeec0193bcd18ddc7c6e175d73d3ff6060487ff2024b139ad5440fa1aaf3f85f9683dbc919167f2bfc1c67b31dce02ec2cf34d96276e8fadd9f01cbb02619b88c9fd031b68e43ea0fa3e09fb9848bbd9816ef59fc4c8ce76138038e43fec617bb3be6a08ba6ca0133ecb925b261ee4a3436aa813d609516424e5552c97344896bfceafe2f2765a574149c7ef5c456ca61e9eebd16998a4b23b513992aa21744503aa5fa3b8a7950d0165090549b3042421360895574268f595ab610dc6084b04240989340b3351b4da016155122c3cd0a3880a456bc4d48ab487d6ed9d870e23e8693e55ef22efeb9262af8849b58a7cae11d31da18807abc557d663b5be41d33d9b531a77015619af976ac41f3c8ccc596302acd36322bb683a2981f11cbf9a3a974d48958232277ac4340b136e3541b4929899564bf5d7e306df5efb2891c404f1fc18741234d850c970ceaf951d54dcd0781815b4fb79392416fc95937060b335f14472dbb8b825661bd417cb39f9b2afb12880dbb9da948c28bda2f2e8834562a2b7ff929854b3e12afedcf71d747b222d32a49899b74b2a0ebbb96d997481abde8b30a7a8e9958cd1feb90b809e86818065b7e68739af4c04506f3df8d18917f8818eb685b3b0411e9c84d936ed0cb5246ce37b58aab013dcc1323c8ee00d328238c8a4f1532b58efc734319ed39cd0aa451c3880923069e55d5259d640327a76edc72404e2ef91dc52242cdbcc523c1a091bf4b3d9ff51b94aaffe5c1ea75c7d83540d9569b751c4ddd292ce774c2ffddc9776f47ca734d2f77ed1622a90d0cc54200a1adfd4bdc12f99a2f5f9c9c1219c9acc745fa26538fbe75cc95d215180731c4c5a5d2e26c2df3ef15681ed8c985dba20a59b9c4aa1a9ea6e655751733f39b42a68f742152bc3302f92f82ed83545ce923d50e07427e811a2578ad51ac4d3c5d6a1c2aeff88f2d7d82997e1570f68fcaa6e282dd204ca8d48362027cf4d2aea62ef07848b26488318a33ad295c4417d19140588211ed372c4c779c450f47991f4f20209a6e02350d20f4b5d7bb7d8d87b07f9ab2f4f58faa2f5491c6a9417cb64930d99807f67d605bea332910e5d2b08569540169d68ec47d6a4a6a17d41d23210bd4efae5bc0b02b731a59765e1c60cd3765b582353731892ad175815592f3ec72edf1eec97d447c6b8ee4c4b9403d146b1ce2efd8437e9d3c598dea18e8dd02c5f69814fe830d24a0463609684ec4ff6856e9dd41719f2a6c6509cbf4b6822cceaceb2a004ea9f625c2f812ca851dd04ccd5a3be818dd47a0337b8075c90606aac27d7a3591ea282fd6ba95dd9275365f229fd9ac5fa8ff4e5fcd610a1df63597aa2207dea09dcdf36e6f00b9db54b3f6822417fd08b5d4504acf6893766d8c56e65f1caf5baea3e1893bee37ae56dd58b8a248a6e409ed18764664c5b50c67f3c342e91f21b1e12a90b5241c46757ea40ea6782c72fc76666032a9b48074f021bcb1d952c416a5d71347d83b3c88499ac4138308b36f29a4c5d8334e19fc06471a2267d02b788f88f24fa4b91b87b4626397ea46ac3d2ba84a2af57eadd592c431f4c63860bc161640150c94b1f108cb2eea5f9888fd463a22381ba9e736b641e23988a37ea6c89d45620cd3b5d7424d0ebc91f54c91c633815f694d99214adc0016f4ecf3e2517a8f8e55bb3dd13118641fe9f0ddc361c566c40ec8d4285e5e6866f3271c506a0e0966c9ea68a4280c78241b05487d6f21c5ffa2ad8391dab1f6933ccc21293f425210e991bae1df5ed7b7dbcd6e7934484f57111bcd28cf47d9f2d4483f6c384984fc23936c63576bd9ad8b80bb22a1213f375db55a711a3dacce3d5905c8c913880aebd3874e7d12a97b80c98a5fbad154c66c710e5f4d9bd6aa8f1e0d6baf2218e2cd1b1de061ceb55eadbfbafdff4d3d448f03743ac758a19f4326fc01db927d0f5466341334b8e2ccc6e92247a1290a85f847c6733bad9f46e15251a61ac25f9196074b057e4e53494f9bf463a3f8b07d18fa46c549a31359d8a3334ff4d13a63636193a2a96c714c1650b06709ccb24bec60793f9123b4ab24c4ab6fa996585c0e590e13c5bcba5f795af6383f6052dadc189f0a4b52cfd13c5453190166a0b6989cb39fb2fba247f71e9a09d5443be3083291ad594fd6fc17e0449e6880b35d8fe6784f1ef09a1b4577214666765c2a20df96932a48ae0f91d8fb23afddc64f9afdfc89172628e5deb8cffb7a0fa38da90ba1211d59185924317a9703b5b9fae55734ec3ea43978028fc3797033909178c46dc2b515aba7ed58b5e59ad53807df8943b2b95a269af958861ab6d286235a2348c41eb61bff222c9ebd37096cf98cc3c8528daea9948b32a94e04f74344a2f1b9940b8bb80cdea62bbc2acafff32c10f623b411a8396cd8bb800b02a141e9423103a0bd373d93dc1f44c4105890f84794da95afd2101a1b24aa550a31ffdcdd75febace2f52f1672b35b2fb724973fda309edfd4b1945c11c1401091e41f300a9f1d7fff98806e41dc98dfd9e3ab119205594e4d8faf7a28df80bc2825c457130144c750d24ba1e4f469206d2d17eab72e54d23b7937ae878464b0ba798b9429e71202c7fad4974eb83c7e2af034607113859320bc90cc50c617e35d2fa57461cd160e2935c86b9c520c9fa490e6b0d1e675f360522ab9cf2a1eeb26c4dcbcf5b8acf42e8c2ab8bf54afba878bdfd5881807a75f8845a89f97edea749d752e051ba7d2b1341425f1cf58c6857087f89acf6936a63d064983ce453417c39c5cea6ae6f7da0d39684d8f1a86fc44ffbfa9c210b27dda65208df4babdeb77169033d45a7222b1f4c3a9d5a6725cc0235ddd7bdf625028dc70667aace42d1bbb54689d19765d3ab39ac00a5fcbfb82769d250e88263c6a0b3b66a2bba751c662ed88e9a9a6c98dfaa27b7b23bcbc09dad93d5a918f88bd01d6aa32e934a1c727f4eb9dfa20cdee13aac7fa240fd1e7e6cfd56bd5f254d95400d453389622743db2d2a5a287d669534bada5b5c550e13d1176a6f30fd16ae69c56a562c5f9c39139f812d92451b0bf87d02aa874ed0ae1a6bc1ce80be0342bc811d3c0a916a19d933a4940e6b7a46fe1172b290a9fce3e92298a1a1db7736f7f457df5946f76cbd59e97687c1d5af4bfd2ca6f2dc1f345ffe580722996f966d952a9be8a4b5b1437125cb2cafcca5e05d19f4edcc05568bb82f46a453ac84ebcdce660706cec272e703626b202a30086c2ffa6809e5b00405b45701fff53629eca174b3d00142cc6091bcabad4a721192ea0ba541a7fd71063ba5da5bcdf9cfa78a939f3ae419e6554d715ca8dd9654d153ab460953213abd44e48505a36de95a0deeaa0746e8fc7103cc8150e018ade3e2affff0844e8158d0dcb6fa0a7301e06b3e945e599d2420d3b0148f253edc0018dbb641f04c145eb5439ab2a6acba7f5ce45f0f812b8fdd2ddd2cc5bf2466eb17ce1a36db634dfd201d2dd66a648033edd9ad2c3f0b8d8e149f89e4c3c4f0fc62ecf711b1ebec2f6804a3693a6ad850e00274b7bcf484a7a7f1e5c32bd44cabc666201bf57aa571666179e0a3736cecb2e59de55393ac62531ca7098f008404f01a20f6ca5997563647455fc1afc0ae5819aefddcdf4b02a92765331757c6ba074c3c286fa50530ed85dd0cd1cf16f0bdce8f8e00d8e76f151f0d2f009deac13dc0cbf06852850dc474d55bcc1615844119d5345b55c72577ac3168b00f48f1db94f0f3420a38adf95fe3e09ddad9986c3ef8477eb3855a55077831b3a6c05aa4bfb0523f20755607f5aca9a39bf0bb8a23839b77b0d99fc0731dfee7610a854ff4a22f8349506696f103ac4c5ad3b88222d93fd4ab0e8665e2aa46b8632a7a06e1d10a40f761770b3a3833739bc064ed71550dd077b713cbc9903a0db50b9359b037217bc9163b37c0bbc34f9056e76b01edf8a289d9fc01b1cede3a36061660a7bebd82c9f0a093d23a0e72f11fcbf84f2ff957a4fce84d7b298ccb7511fdf0c3f535534542af5cb4e3e4ef38130bf8057d2ecd26b734925ec6da35000dc0b52a9e859381a27318f18f388bf37067a173df29fc1892be4c990a49ec892bd3b537ea90097966e54ec09b21aa40212c2c7e6f179c14c5ac0c5e992ee6e43c40975e40b4e198da4776b647b57f2dc2463d636cafe5d5be5501b9bf3ff985fcaf0852f61d7c621a0afb5fd602973db1ebd8d645e74766a3b7e2b2b13ab7412d54e74cad977dacbe4f43238b88f17f472721796540a3ecce0af53a91c37264d45ad3d1476d51b52a6c2c44064b446b62f25efd63b159525e59b53e88b1c97b299f58dbb0fc432ee427b6a869b4a7d79988d2f4c445845239df8bce1070d8d05b900d94491d7bf3f3b6ab03c55b1abd8a1853cf1971f6bd23c79e999dc21b0ea602e44850e640cbba4932a38622ebb03e83e7428619598b7deb2daa8a89be0591ba3bc9db29eec7029db59d3f83d421dd43d330839f3e206dcc155427416fa079632f7dd27e852779941c899835b512d746a07ef8462d385c68c4ad017ac5fdb933d2e649f351abf67c3d619839fb8db963f1994d25f05a26d4543e9fd3a1b4e78f0e4de56fc7450d4e60a886c270ae5f7d3db6ae04327675be0bf06c3e7de11581dd252ea9ebead051e71746d1b3f5b7034f39e01aa587eed70016ee00a801b70d50935aebdd8a400faf09c2aaa26f2ae9bb293d781272d683a608d47c62e2ead6af169e7b9a602fe87067faa72db1b05a25b631aa5b3b12a64a8eeaf29c308ba2ab54b82b242fac25afb1821dee1acc989c0a4fc924a02f1ffca94f52bb99e64f520b717b9bbe4d28b588897b5e1e40183a017c077ea04c757a140f8d183822e0dc595a4bd6e81bf5e05882b5ff6850ee2a6bcf6f3a581abfa8a484e69cc7afbfdaa04b194975c38c65b70794181dfd5aa30ba6707c13c49c6aa36508cad563c9efa2ac153e99cacb52bed62a3db2d230825a9a5c9fd28db5ef082c6a9585040466143cbcfda992e570e6b6b3213d713b0814613d5770d4922cb8533d1b9c487798df312749cce6e440f0e54b1232f28655086f920f393374a12a546e14e648a7e3ad764a17ef4c147ffbf5ee60dd1730dbbe2a3048a5bbf7a68b886fa2fb3853c49305dfb2632044e45e4267b11072020698ff3935a16681f17cf5ccfe97a9b743162c5feb9bb229e6569fb1a92ea1f5765548e9b4b89237be48205e50a284923ad84ef94190bdac2fbcd69d0a0bc923f3fad3940d69e0947737286e23820202b8ab05dc7ca4934fa3e023ab02204de222c2a231cf7ae0c52920624a96033cb99cb55f9ada9d92c5ea31edae468fc2b722fa15b2e5cbc2ce342f50a7a70e5f7bb8c678e4d8332024b3c32a15b577d8578ad03150b9fc0c99628daa7c095afae2162e943646929a4743d1cced4066f6835ff23d85d3166b1846f0a6e461e8127fcefb676108766887aab02c4ccd882d5c6254b23b6928008e2ec9d1f81668ad8b09ee424c300ca075040214aa60d1f975a03a7fcbc0fc99944a505433895f23e88574bc06d34300d5f3fe7187cdf637a3b535d2568c5da32700a88175d8b7f2dcfe637239154680646e293feb5db7049184bf68112f2d891bff1ab8f79ee0f17c922501badc4ddd49aba38ea29c2daee186ecc06a73c4d26db02344e039a84b6865d301416710ec9796e67a89fa396782ec6d1fa3cdae7d1f6bc79703b52cd69fcb291967d173c35392f8775c4932f1d64d62cb6708fcf41fcb900a1d7079478108be53afc56330691d0d1894f36d0ef3efa59d35d51b4b2dd448894524a198d0851080a082252fc29e18c5144304aa887837e8a4b88a90881206212d5bc9fb68c64080b71421f75f5b252be7cc1817a76998aa55e90f08e85e462788576d51863b78c9ae024b84b5076cdefc3e4d8bfbbc59357ea904a96d2e374fe4221100882362c3739b3e0b0b8f6890857b3b95361db749debb6c4a1ebfb3f1abb05932ba7d393c81bd2bfcbb73c87fab9194a65a1a054f3855b2914aff413b565baa2aeb46cdff41495b59be8d24bbf34093e516bdc7721c126aa0af75b485048bd719f8504672058a3652e4e103c52572e5a57432a4159ad71ff658b3048bf614e6844e7a1082acdfdd0476541945a1a95663f8e4f8db98f82f2a3a37e2eabe4a39a62177575dfa502466d5663eea750dc128fd4d515b44ea4aeee8b42aa7987f45b2ff533852e49a14952e84c7622439c4c86e8358184b0104797a2eca5093f4199c7bdba7d4e02a9d7827c8274fb2b25164214033c929f0d1b210054b061e3c60dd0065b1b7abb2b8d4caaf35294e988ff8204d47322774fad61aae1956806ead496890bd36ddff6adc4394a949a94d444578866a81f1b37582cbc710f0bb4d1119f05eac832bdbe0c70972c282570700b8481b06dbb2c13ed6e215496698a6a9a6ab805ba4433dc02f5602e6a4b5483babae19628c52b30bd2cd3cf9541659940d7a9d24f535f296bfd86e559e2a2f2e4a2198b0bf5835bfb66516409872ca12597782373d0819ba628dd74da65953a504dd0855ba21675f50302d145bfef126935e63e488a3211d4835b6291baaac14c94e29628a4aea0883b3f97252e51cdfb57b47191a4a78b47fa05e19f7e86b25004a18f4ae3be890c53bcba5f22c3180e7f84e80db7c2170e61ac17257017264c5c6c97e561237e00c1373fa2e09b27519c3c61729d38e91775a6d07e3f855e1690d42bd5026dbc3285cea450d02695022285a680c85d4e01e18d40186ee57b82314c8254d898f47ad759dbbbae9e6b0da943fa1089c565029c1af3835b26199ef5fbe7899b8148dfbf8bee78a7b991e9fb0b287b016dc0212ad0268582b2149a42595ce60c14503881c5c51203da803188e06788e3eaf7c3d765e1fc08d110c8659d3c70da3528ab34b59ba92f047ab254e158ed1e566e06947c39f07c4dfe2129a7b035a49c4041417dc1e59c8340440e82308220dc8420c6450aee45696902ffba48ad80c91296f400094ba618b2048a16db9219fc58c282254208f132043e0aa34508bebd00724b25e12324a92614f981ef26504b6c31d8c29684b60d41821165b381a0c7e6431425605c16c7450872ceb945a684c9164aac5042fb6184ba42c9109a122556a024055828c1818283509af0951b1259609193c59127b2e0e1c942c89bff6266a1c3930510287f04f7bc4821818f94f8e0811452a0c000821230008209522c00c20f66e641e2e45f78b080d2bf5e7980e0817ebe8ccace6dc2e807114678c1143d6e508af8008a1246261768a002248c647103286e6a7412ba7e6eca359a874c3779239e266a3a6882d64ffeafd1ce6887e79c73d6dfa3cb0c549adbad0dd7bc37f0b866066ab8e6fdafe69c6bd0ba9491e25caf69830edb6b88826b1c41ee9b956707affe9755517d89a745b75d74307ae74cf0418105133d44b62062ab818228823ca149e1811eb798283c554049a180810d4d0109750746c47e6a68a2a8a9cf2d0bb5abf0ba0299acc79021af0fbc8137f006dec01b78bbf7de5a036fdc7ebdb7d65aa1c881b7107e7abdd65c73ada587c578734282fd516b504eec7c84e3d187203e1fddf72879e8d1c39354f41972d7755dd7755dd7755c596b8daecb39e79c73e672ce39e79cb9aeebbaaeeb3add755dd7755d97bbaeebbaaeeb9e870e775dd7755dd7658ee4776577119fdd6f537e787dfbf7455fffab5b93e7973f606bc20f6d6620953e907e871e6a48ff71200cfa8da4fca8f2e0d76b82e5f9d2f58fe0733faabc4af9e60896e635adc601f4677e0be0aea301d2cf402a7dd88187309e50c2c517509810460de97bf450432a7b8c3e2c4b1c40afbf0b9138d8306adabbf24547f42392075be3430da30687a54654fa207d0f1e7af4e0e527552aba7d12d983879827869004145c1c39a2a726fc1e74aef0c0125c50440b3ac0831a1289f4025cd61a432a7fe0b8e5a354d363071fa59a1e3c90c2300cc3300c476118866118fe07666ac2b2c7068a383104134130c209d29ad1af7c0f1e7a5ce04624481727b8010b9450133e0fa4b287e86f6e6e70666a48ef036e82ab06c7eca2b85c3538a61ad2fba0f2e372b96a70bc1ad2dfdcf8e0e2e3d590de07176ca61ad2dff850c398b95c1f306b48ff81991a52d923fc951f4190c256041122f0d4dc1f912b24af2bdb43ce8d0e6e8e30124508b59a3b8413558c20084292f8a8394d5de58a96a00dc119f2bae0f77925ad498fc5c901afa65784d71cdee196ad0991df83825c33f43807c6a1232bc93d8b410fdafcb2422c50c82bc26be73e07f672c876075f7d6102fde99587895b95a2571e2660dd82369e0ff8cd39e75a6bae357b1e13ae5e796ce063ff8ea0571e99ad7f15b4f17af8b5f62de84b41997879d6072b8f97fe71159c9282b1a9d794514a4a0ac93fe5b9a8d630ccfa2bb949153285445d937c31614819d2068e6b5aef096e7b1ecb171b5a4f0736e79c6df0ddefb9f75a7c7bbe165b7c6fb5768acf4be25dc16bc846b15178984c1b250a7effb44feadfb2076b2d91af7e597c40e0f943bf5005b25f86d740db86efbd1fbfddfbbf6ff3fddff7d90cdae472ccb9e4f706b43f33be3f83217004c107c7ec7df67ebff7a25d47d7bc5db6a4e83f4bd321294af9d3eb96669fd81f95d76f9f5c5656798b9ef64990cbc225ef3e4cda6e6f4eb48d0d72cdef73604e863e07562173c83d8534bb1a5ef84ccef3dea5794d0deed2a241f8697f5e08edbc627f3ffe8a4b9d15f0735c0e04962308f4a095726565eb9feddfe10520acffc324ea9ada944203defd69aac1bce62e9f06d6a2f534c16deb0b35b42cba4b348ab6a87d8be6bb51c0df02b140427a5bf4a222273eef073c94a53937c1f1e78c1f0401e7463b7efddea99aed9d8e8ba097a581bc2e0b96b36397955ed678fff7ce6579b22197c57359dea66d231bc9658db944cd3a8ae79a246a086ae79af83b122543a5a08eae933272b8eda76a76a252d485543773f49a13508089496165ce90118306ab35f3a25aa1a46101470b64005cc8d16bb2e63c50e8dba0cca79a75ac9a5d96eef8553c68c7afa25d96a9e357f9a8a2dc164d4d65e4b22a4f14381da35458d8191116f453f5f91089413f554f543aa352d4ce65e11f02c4f888ae4d828bafbea4fcb85adfbbbd5cb1c5f6d46b95a1027e7aaf6edfbb32e860bb282d4b0fd6ad2848e7e2848e2a8d8bbb01d9b40ffc550751ab5641aff5ac3c1d7765a85663f0115a4b053787ac69315ac0ef2825e17057080c1659a5c95f8d84c322048c4ec2f94a16598dc1cf5d1de3e72e8ec36b1cc928ad3425d04f8dc1e449d3340d06337c3ad6dfd2e2ba3204bb5ce99d9c33b99bd4717582414542b08e6f0a1545faa992034ba1c2e4aa31415654725662b4805f54ff2c0843aa97688dc11f23097e9a92cc78b1a4e5ed8a254c493a3e59525bce78bd18e1f7cf94ade34fd92acda8256dd9b18fbf44edf7b3bc75fce5cf2e6f2a173fbd8e576a1d7faacff0d145befa9261be2ec70b69c718f348f1ea1863de7176cd3a4c0a5bef388cbfe8f892b59ab67cb926d6f1fe71cb2b7905f94599c0ffd2c54909261c0380917961061af02fa0664f061ac38bdd2f7739cfca00829f1a064bf54c460f7e6a184d1bb92ccce5a43b323ce2fb4658d3df5069be3eeaaf4c32add2943a7e9d9970958b9f5cd7b85069829f2f3d95a645ef689cae87b46c53f43075cebd79dbf432849f2fddd46db63f53696ebfffc2dd5bcfe4d681c73dc04fd334b91f19af3462bf3f81faa4f3a8b51e85fba98d2070a5804583141f51942087d7c1ebf6c9fbf6ce60569a53d72f9303b7405adff2ab82ef0741ff95230e9523e6f487be1af04a7b1f19226b63ed19ebf734fcf77d8a4c68e633fff19fe626eb48c3fed2c32b1a42e7cc83bc5206afeaabdcab5fd0324381a0b4efa37d1faa25f5a30b687fa53fb198ba2ea6338ee2eb9adf78a285b01809566b0401853924888242ec9ead04848148dc9a808c807e9356e42382854084382031e79a222c8485b01016a62c5fcbd7b212964699993d0ec52b1cbc729d6e371d3869515730595a6426a4ae7696487244d6ef0b164bf10a8cf1580b19e69048241710a9d3d2f2c2ab103682be51e974e4543b15a1cd64e20b26c6c4244e3430166b496b0c0caf46b61acd8b93910ea2e12f24be64b1ef1bd3effb4a1017829d66a010977522b295882a00d1c614446b496b8df1556e1f5193130f0802f105dab09c7b238491d20f44ea9064f80a8b808538a108443ea22f766d57b17bb60f38048882b09eed03c64872dc4943225a44f82ad2ef8722381199c95217527c89b02fb8a882317501913a2e2e20da888e3835667c8d3f461fd5bc3fc24e444eb393ec34a4df3f89b3ea23cac4b48514632211e2eb8b9075898d631ac2be128c8140692ccc39228cc1f6298a9cc6be07913a2854f9e4ffeed93f808de9fe62fb887c445ec06025092a516bdc2749f005558524ea8dfb2309a678250367202dd602d1c421e28e18a473b1089188ca42886369549aefc7d7e8fa8111fdb24622aa39766044471fb65a8db93fa6b8755a52575ff4f423757559a722222f4e464e3f63dac5986a31a635f125c2ae99134b65331a5e9d68b1d847b3d1686108fafd96948d10001fedfb4eb431155fb44dab3544307088da0a810051dc0a717a7c7a7a897398482412d50006b3f1d1be3284bd700b4451f155bb2cd1ad8aca123511f9e0d6f681e116881602515b5bc9ee095f61112b221f1f910f882682d144b05204838daf1136a2a71f51931188e876596310db07b74e608c383b8cb103dbe7d453634eb4d3ec944449ad5fdc3a15a9ab224e3cfdfe894835efdf938f929316b4d3927e5fa41521125169884688303126a6a24c9ce195f86ab969718de9988ee9988ee998b6fc18d37e7f4cc79d5136ee8ce9988e3ba31263aa44eef2a804fafa40df07cab597b8be45d204227548659813c2742c16644cc1d8988e69088b85b0302726befa7d3116135fa211224ccc89b5a4f4b0727528c1a889eb7a180803734058bf6f433874413d211caa5fb197f6faa1186ed5b11ad1eb8788a82cd33561b875fafaa157a5117d254147200032d2eb838a54d60da05aa5e15f1f44ab34e1d7ea53515f5fcf706bfcdaf25527515b3943c93fd7bc4068970b8c5e3f77515bda85b27c7d0dc3add4d795af3e4adcf89224476a4bbf5afa89baba7f665aaf9f89d496969d7948af9f855496a65db3566952b2acd2e8afbad66f5ef0854eb3e780830e5ee74a5f7f06b74c0fa281c4afbbeb44d2a809eec12cd6616181a2b5b6250971aef981363a2c208b8b44b96a8d217775c96bc9141711848148986ea626a81f5418a21a88d2dabe81b0b8abef416f4d35d1ccc485e986436eb441e2166d1532d54dd81369e58b09585cb8e5a125d22c812a2aa08d0eeab44fb5d260528756429e7add1bf73d337b9c4bfcc12bd1a7477c227691a4ae44b3184c489a445de5741554b35684480c86835729291494a5488e22596eb8457d2412c94b6dd3ee1fa9834201a1040e5e81682c2e178bfd5ac0fe917bf4f510b981571f591a0b71d0578d096144d4d57dd026ffa77a588a3224ae476b0c8a57a3da4cc6cb531cd2bf245c8977c882324ef35e2edb05b40971bf474d706e97a01e2e042271438501ea49a1e4a3b506892ce1e0127485253884a83c653a0805726f843821ce0f3f595c28f4b2f287368f7dffa08dfdb74f823a72cf9814b9b8bce9b24cb76b9a9a54f3be687603042a0b05256fdb16c2be812e500784812978035da00c447349a1d7546142e8b1882cb99adc84a428999ec660214e885ec1c320ca3d7282a7489dd48752a09e10b74baebd761abf02ece35c7237daa66ddaa66db987bbe9f8ed65e9ac9f5c96e65cdccd6579383f3c232e2be7b2469bd36b722030dd28d84319077ac16bf7ba0f67a14cc6f3df7cea92bb15bd2f5d684f7ba10deaad41adf73662bfae89a26a46801ee881db886b5e12dc1b4575540d09aa078bcbd233ad7f03d940f6ab33a14c2fff1d957a1c28a72922e02b9fe2839fa5d8bd3704a14c25840ab5714d5c2b03d05a841deb7804dc7edec17534e01a279730bbc64f545a6952422a21d187ae6a8a8e3f8fa4e03b4219064d53ebe87ad638350695c184a8ac5ad1fb3826db4d70af949981cc0a796ecf074c735083bcd411c1b01cc1af5d87f46dd007daa0947204953aa32f0d214b3b6429c83549de08d536a55495826eab31f84bb1d90e5838bbac133afe90e7b6ca50a69a6d5435d359ab985c567773a27a7259dd4f94979bcb1a73b64106ca84d39c266db824dfe886ed3467bd63d4aa0ba881f6f202e4b2c65cc271cd991772c860013fb5cd9471043f55332e034efad19fa19edba81c1150e280287e0f35f6b9cf0ea4724400e94d6f2a39307ad2d79ad15b70c7080162c96f25ef19913eddfec7711dff715dd9cece10fee3ea3a6b43da26c313e160118a5ba357cd78ba15cd5efc8a66b46e5fdb6c221cbcb22f964468b72fa264276ad66740a578a5843321f5d53afff8403e512871a3f3dde865dd570cba679d4f3ec3adfadd7b2f3704c5a3b98c5548543da9ac44edcc88d496b6d54e08b595ebea7e938efffb4c54d7c58e043f51294a268255f05335d361560ae848f55ceaf0c07d59a201465ff1ca82363252ae9fe33c7963f00007187dd5585badb5bea894f302de35cdaf200574985d87d9f39792a2e2a936bc29f7c56b2d9943bea08e1ebacee8732932a29ae1d6a8a4a9a8d83d3ac277a0521cda409b92c93531560da142553ab514a54a553bb7c57317fd7ef6a934f9ef97622693fffd19dc1a7de923febda75f824adf86037e6a5bf97359311d7f19e5b654466efa25fd0edc1a952a9ebbc23fea0197a1ce3a5922b50daff0e8491f136b390fd4ce5d0d21519acc252a3dcdced2d6f1d5d7ec2a1ed54c45134936ce3f93898504da7839ae0ce1ab7c8e7b411d292f7afe211d5c46daddbbb9a51176b08230bcc59787a9dfab9a5dd334034e7ea2f0891007e10e422a62a1527a592974fca59ddb527dd7aa662223d7b4b670441277bdaceea5c64368a5114129f8a99a699c7eaa66232bf8667259a3000670cdfddab659a529755a57d56898c1ebdf688bb0a834a025ba1559166b6b12e2d6b1e75846482ac8721a4fe388fa50bbe216575331c6da36c3aa99adc53369349ec99a0b6a65954e520839eb24ade48252ec96189f3a765fb014a9b2037e6e14955ed618425f74909798e15369ee9b5aa14cb57b29f6eaa7a983d92bcf90b66fc9ed16971d32c2eddf7b2f396697ffc81da282db1fe5af55f06c5fe47875f3d7ba7dcd411b6e5ff60b68c35f472e3df04d5aa5d13544440da2fac8200c1d4eef4e97d3d4b9f6d133c903dfb44d33b966680a6e9feb9b595e548a4a513bd7ecb744a545386e898f3d5eaa78ca8dde8bdab92db154d1ee9091aa87a7f318f0d193fe0cf5747bc3add2e3d73613598ae156a9c4abcb5ac9e155db56a0706f446a195e95231e8036aa598dc1ffad7cae89574f463c8851fa5cb37c724dfc556076868ee077bf2565c8006df67fa609daec5f7d279ca09aa9662a9e14686c1919d026e5bfafd7078de020a757235244e92af46ae40647e837f60dd026456f4ed5a3aaa9681f685b500717e98fd3bad39dd65f17fabeaffbbe50a74272cd1ed58cfcc1adde20d0f3f406819eca6f10e889b8ca732a292c1668f3d50fb42ba803b4575801b4f9be9eba0563b860141cf43a4e8340afe374e8bd8ed3a2170fad435fab05da74ffa96c0bea08bd8c23785a0011ec9af64541c81f4859a5d9d9b4c39602c2d0f55dea1db8a57dd495be0fdef7ae7e1079baf4b3f69c9e908eff23cb53a7215087d973e9e3dce8066d442c8881037c72250f9d76efbd0de6e774373ab242080a6e9ff4e28f30a47aeea3d75f3146a5189562de6f05f3986732da866d24148d340af7e52e879aa16aa8192a45a5a81d940cc583a2a1765043503214cf3569a834f42f02ef8378e81bb150c38763d7cb32778c6afcdc6828c39ec7b203d5f7fb432a20f855a80a071dcf2a4da8ab665515456d953eb5f4f1409b4b727510f892049f8e4548b8066abdfb941ff75eee7236aa9a8d2355b56b62d50c54cdb08a87a642a2ea0149158fcac835552a24aa1e151904cf3c745afa943f9be4e4984b9f7b962cecd10e38efc1ad9186d18f08288df74b6ffad2d79a51c97b461a483ffa51b9c388801107c65beab0fce8c51f7dad2195f59627b775fb2289329128161295de1b585413f574fb2212b951728cc192a30cd6d56fb95ffd763e053f37bad1fd42a59e673295626f9aa598698f74c06fe5be5e96a8d7cbe2444644b56e4548ba35c24296d27b03bfa9143bc0ad29c5bce0229828e7b2f487b26dc46da950bcc2286e893948c7391dbfb6917e54a2642002465fd2b65369482767240f8a5ba5d85de1c7fcaad052cc00ba462c512313aeffdc68ed1155c14f6dbb8ff3630edde86ff46e58948ef1e7f413350445c99654166a89caa4b2503ba8740b3edeebe1a89f5b8021d567e46ef6faa71fb86ae6ca656f895f4725256a4b2cf70baff093780dfaa82c3dd1f19f409b3da282f78460338fd6edd7baed09c0ae3d1d2fe0a2d7c4badc418ac066d2cc1a6e914a31266acf328eeb24d2a4795f1f19e1e7463789346bb1507659b59af9f1874ce452ac635d9a341df0ee4fd56ce79a18bd2c1c027e6e74a33a5afa0c9f233c1ec71e365dcbb5f03caf6302c694a93497ab71dc0c34b87e2cc0c90e9f1f7b93f54bc60095a6fb6a2a42c4fc1ab5253303af9d95480f4ff4fb33d46ab5cc955f63c8ce00cf93a934b7ef21386c7830d8e49735f3525bb57e5dd91b44d70194c1ebb93c67d8d906f66e3c0414c6e4384e731a87b5dcefca915e35ab79f2e899ef9ebd9c778edd78e0636faccbc26a58dc169e05b1436c0cdb59c73aa3b6171023befda1afbf49af9a3837ff67646404b063065b7158b7bcfde850830d3eda04215b8846ab6111a29da19e90911092217a0e3697668d095dc1cfd02c340bf1d0e0900ddbccd98c4c97a92b3b53571677669fe93860c0864ccf250e3b7cc76102cff9effc4408074ac8e5c3049bd8c42636abcc016a8d8957a217e880e62b3c2f7b9ef674e6b6cdf64195c6d6017750c8021d2883a540bfb900b9e78a6fc529dee7b85e2e3dd005fa3561604c10888b2dd136ae2d8c0565e07e80755dde7b73ce45f09591b151030f1ca49891995a0f1352ecb24e8f945ed6499291829076424afa190aa29f3241f45acad8e121b4cf481e7e92601d3f0946ca412b8db74933ac84285ab2ce99cb9c2ecdef60ea0c466b6cc2983041c0981c6352a41fcf342ead0472c96dc793b0aa3497cb1ca7399d5729a480313ec1b24e6be678408de14e605655a430a6a9bb75dd0d3c5e393ad89b09581c8b763a83adfda78d0ed34ad12a5f55c805dc15be012c65c614f0b3d6301640bf8ff1cdab2bfb20b2d270432e390668d8618619408f9f861790697333ce49928545545b2bccb0a080c3b81d0bb038406c983da9e1c7ba74e672982f1899c171cb1c7885abc993001e7ed69ed939b38004c8e059809bc17171e49c6770cce0807120f0b8cc719ad3d9f3383799fe4d5366664600df13ee651e803c2e739ce6740679e58b4ed76d5cc54bcd3ae7ac5fd0a11170dd5d9cb5ce9a8a6c49dcafb5176fdb83cd15a7d8b2d6989c71ffc173a609b3c3061b3f3c3295566b150b0c5c966b077bf32bac50a3c66a55710c7585cd910baec365ddcf4240614c8ee334a7bdbaf23a0e18e8f7deee9ff3dfb88adcddaee33aee5a30062fdb6c54a5b93a6bad73e6405f1f5569b4364d180d734d1d50f9c25d41e159862baa316104ba7d8cf30a8dfb231eddc680b7691feb8b2d4ee13e97a7084602dd9af65a54a5b999db027dc93ae7ac37fe1c3e5cff69da6ada37c11872016c0cd9e5b29546dea1dfc79fb5abeb32075d63ecdf93877eb3056ed7250ed8be59a24a0faf701d650d04af86e338cd699006afc62b6b5dd5d8dba1bc8d3af1d59717ae69abc1061eb52837589705d2f1737a478c868ac567ad57f10aff4e013f2b0de352a77bfc03284d5c411b2fe912ec1334ac0c2a0f4fadd7af2b9f05475570ef3baf1c6148f5ae5d35a6d3ef7d9a04bdd779dd5790f79a4c758d5ed6a97152bd2b358a6df7dd7ddcbd462fcb2b61c8dfd9aeb49f039f79894c02418b7cf7eb4b1dd57f8d7d28b30f3e99fc19d723befa32fa00c73807e0ab2fa31b9e5f675df3d59aef2a7fad01ffea03d7b906c2d716e107ee95078b9f6eba6612be9ab30e18c05765f76a735005b75f5d35c6e21eb0a56164c5c88a9115232b46568cac185931b26264c5c80a11091109110911091109521252925113396c1a638c31c61ac735f5abf4cf8240a215b4c7438fbadcec4818cb711cc75d7e4d8d390e5f72cfaec9734d8d31d67a764dad675acfd47214df37e2814d7acdbad36b7cf66bb514f8eacb5923eddc635cebe79c73ce39679c31ce18e33a42f5af0cc54a8f77d5cb99047b0a4cdab93f374c906baac02451571c4c2cc5879f293485a6b84fbd702bf4dca7b84fe5dcd6f7dca7825c562ac835c954ce4dbdc8547acd9d6b72dfd2fb77537abd2c9c52b9650a9631f75fe5ca9fd2e79a5df0d2a77cc2cd80e5f7646672649f54e7f259794c1cb76adea4c6d1285ee50fe5239d8b752b3e4047527a3a17c3ad15db5d71cff1155be742b1cefdd7fde0a37326262bb61f3af71d49ce6a0c974299a4d06b721fa3053ca350ab31dcc748829744c8f4943ea0f294494244e738999e1ac3e53f67c03a37035669469dfb19b14a933bf733b89ff1e4b246d0572628d42a4da9730fba3030b2ce717fff848975ee0a725669bce7729aea35d29424be2fe5073f5f7a4a0878ad2b7b71d65ce78142df065354b6ada2501422a179a66b86a0b5365cf11cb062bae608cc398f484fbc28249124de6ebe2ec49258e264209392a964d24ea4f4c0c46262b94511d330ef9b82e5c472ba15b9c1bcd3781af1cd93dbedd6733bc297cc60ef3da2b47879266b5251515149b58cc26871697151d9e245c97f48465ad4b4ccf42e1f03c160f2430b24d974cd959595952771f20809b9492412892cb7787d45ca5d2a954aa5cadb4db0b0b0b0a85ebe188ce338bec488651b0e2163868c195bacd4669ecc9bb15d5c5c5c669833cf5c99ab25643056db7271c256a954aa13504061c7881123060a4e6c19a4c5a41093421164450822bbc55c5e0a3452a011d3020c0f5482860c0d990c44a6864c8deec56b00a006006e68a783444e0107800a00f854c84060fca6820d1554d0af2637dc13d33569d0a041c3860d2dbc1bbb468d1a356edcb83df1401efb0969cdb45a20da1612eaccac3033d32b4f135a5f81668515f692cd8f785acc9a40c1028586051a4bd6afa0ce35c6898f1314989ce0784d7e3e235ccf0b1670b0c0821638b8571e2d6e32901f29c1d1020e1c4560f2f5cad3040631ee49365914dc5e672248010eb98516401b5d636a4c1190f473f338e9c16c0b262123324e8638d9393580579e2d66ddeb95678b4a5369aa29009b0516586021000110957061b7d0420b2db8e002c90439b60b2eb8007201e7f01cc02bcf1619543dd3352b8d14626050386165ce90492980ebab2f678c4fc7d703c17defd59ae346dcf6401ef89ed51de777779cbef9e65247cc99bbdcbd57670fcc2497d18d2a6087d5598336609ed5a8a40c7638412ad5cc08000000b315000020140a06448201899ea58198dc0314000d74925074569849e44990e3288a428818400c21ca18024000684668c641004d45f237f4d674b779a477583b0691b499f681152a71519ce4f8e4b85b2e6a4743248b01adf5638698b39b6626e50aaf0e092c6c970dbd63109436d4f959881282f0ee6105a5b1bcbe41da753aaafd201e64195d2ccd483ea98529e25ca32edf87cb427480bd8efe55008a6f80ed8851c314a38aaec8b647d7076eef45dfc2f12aa59fb169373fe820fa612999167658e3a63b8e9c1303e6e7b7eec16443811b4378431c3494123f1320a87195dde569f2896bd3b04af380953731aeb41c6e63ffe6941fde280131581244f01722474a052d0bfcc4b22d5e8b0f7fec8a33f0db84c8a83646b4aa37da8a5cd3103e724f96512d00c45e20601d1b6aebfd5546e99987b2d88705a39d967d48bd8b5ff24dc5c0ef2ee322f680d9a64cf88f9c8c80e10aff397924b661f8711f1478d9f7173a9b3d0b78102c47ed2f6d202d54f9941ad4eec06960ea6f002be2f51cb14554be43dafc902e55cb6b2853707b9206d680016214aec191fa77c41f14db707afe17882ce89a09f3288c991090a6926099243ec50786aca515fb26518b2320e5228114c575b28ef9267f5862b92ba9a0c472a384cf0d84c96ee256a076b45f2f338691d04371264153e39209fb880c36a340a9ecdb831f702d65532aa655ba91e3212762478738b29e767b6dde970b084d6cf46b87945b8a171f687436a0fdcc6e4073b75a734870d974eb72d9005438e6a424a25f0fda0292f8782056499284791a78392b4c5561db6ef489360b94ceb29820d2bf87c4a2d2c557c71c810b23ee1fc9b37085eab5787330a40c71a126efba5492cc55ebe58918b0ea2535d50399a273582939f353408f258058378038933104051504b2002ba6d6411c562fe7e45130b9bed7c09611a1f1e0e81980d577ed6ef49d0c3431be5d382e7581df5c000c5c083dd74bb113de3a7eb62c0ee14e9e7d82517be26a3442d9049ad6b827a4b9a753554d25ee647743b02f914b3da1443adef98274ef47ec2d4970fb5e8fe2f616697b3fd2cc5e64740383683e593bdcb3976cfd259adb9509dfb4196f64d137b26d06ce80ce801ef20c1a43d0201c8d3103baa6dc50d2c71ab742a1c29d4f5f2203dc6b2f202a1c9a8c801077762b3a397f569910a94ea644fc260ad825c0ddc1eaded84d6205cbf645c7251c46b613fd132f1739b8f9d48ef70461aa03fd3e59d5a806b5d648ecebe997f428a0fc1c1fcecb636b0c05449140575963dd1d244f200555c0165cb69acb94dffcadb7cb6071699eeedb73a521a7c65a641f7836875d3a2ba9f43ecab5d053e690d1c3cdb2e5394895184b02d0a0f8e98416dfe6160ca1f462b4a102b44f5a648b3468ed860f2b982e4df3b79da81ca450c2368105d4aaa896c2994f06c6ac67e23c3094af15154341600d502736403da23bbae80e822880c1f626f193391651dcc5ff41c79a4fb94e1981166b7075ccbae73512ff558cc57d028d24945e76497fba0651d9225c1385c62e6033dad6ae7dc995d57978daa4e9091498811accb4fce4d8a3596783eca7ef52e1daf0133e212277d49dedbee304f65878229b011ad5e3afab3f4562c38049d353e84713f7dfc1ba0f882de3812c76036a24fa7e31116f9949b008c328baea9e8fba20c57658210ad66d53c03e8032044bfcf114bc321d248fa19916bca2ef6bc85f10baae0871e132753422d5d1cb109a2f2bb99a991c403d3ecc5892b9cad4852ccd324744612363fe3d1f44594befa9539a6a1ff3b888dcd65a6d3a2bdf5c6c57cb4fb64f1ef407635e398db879d23a34a7e0e4107ac43fb639a8f60b5303bfe58c1808931f3075c0985e23d51edc4e477ecfae08ab6ca591139c0be92fa2d54ed669a820b0700bd1db345b0fda74e47747541261ba62c4d84b519f18717c76051b10511a2be2d7e81b977f116cb9c78163a82df4c76f3a8cb8de50491baed2a6d540c22888424619987ffc51141c6cd423ee0eeaf60cead497d794a6f024998ff0cf8a4668205d8c134e305dde30a9e7c4c3e2931b3da4c1200ae2d0c15e137852e64393f927f5c8f62854822529c9630948f135a3043ac89dff58711a4686017abd872b453a36413ce23b3791730523d3f485a145f9a18ef1503df17b24f0c9e4d90af09df35dede149e0bc07df610cc378fc2efe23deb18f69bb77e0f03ec2b83c5779c2674185b95d138f296a7b7c0c2feef33e170449885773414c1a2c82d938177be1a324b94b46f73a2e99ae15fe20189ffc7ff8a7b63b7a42f94eb4248e44e6f4bc90184be45b992107fc61428848120a9cc8a80595e67c15eade9b1f36d0d8f72eeb4f00a9704114b346c5b030f90701bc519d1334f09ce48b0f411d22aae237a31038b8943ea8b6f656c86c4f765856e350c0e75b7345066a9f25cbbada174478a60b1b5469120a992fb6bc1d0670e7d3bf84c480ab7ce0e95080fa07f7db966230a512d87927d09e1ed29c39aa5985e1101eacf7dfa095d4a6dd126b1cc899e63c5f348ec5b2436ccbc0c009d3f9e0922d57342436ca2756e3be64cf8705e4ba4825f3ab2164d69b191988650e4bb078502fddb6be526eff1df04e356cb70799e9a9e32ea662257e9954ff92a183b689bfc9a3f440338d45049ef47da4e7451318abc01c48a16f0313a26042a4fc52f87b60f3ab57864cf0be7ec4bee0e3edbe425e7c9a2f2e1e7ae92dea4202575287df96bff5e854faea9c59ef0b16bf103e82f98593c65ce38d8e1de1574218f0d54bdd6b90cd82b55c9d2462f84bc2553717815d9aa27342f79c07a0e39518013bd24c4ee6d0f2988d2f7b7800f9e79cddf04bd85d7050a0ce599ca76c41898967f12347c3971180cc38ee69037636e6571e6d38ce1018e0a887019adcb69c75959b5942e8085a6fcc2b6e4c4f24747596309fab55ddf270aba4b705102ea3ce02ae3bff4bf1ba9421ebe75f91d1e617823adde607613be14531a2f7950fb19579e6c1d02431850ef4800b53916c6353b456c604f652a2bd8a61675559821e1ba54c223ebb80bb3a2cfd74f000c286b2462a73563506fa6561c0c3fca19503039a5b21d46a14748f2b4693f3258a0023b7da4845e314416b26a5ce6c9352aa3aeef62eff1f32905c564ea943f2005e9e5d21554a5c3f275cdea10e5494653dcd590c411673ed0ec04f99cce31dcaa38e1170f0a4b46f75302573c795447bd53de29a37a6e8231b6b64cc9f455e0fec5ea05db01fb5039db09286d35c2b425acd06a78fd2536ad421fb9ca79b1623604d208ea5408e766448b7ab4c650d15457adc54ff811436a01e44313da42bd72216897bbc0ace300c0286382f536816b3a41d839aa84c0565bfcb50e298ed88c5656359b2e3fbba2b447618558bf678de86620c7a89016758210c34e89c11034965004bb544ee911d40ce71d61dd82e5f42046e48ee8c18271ebd11b05ab4a563dc45230a9afc8d4f8c8c68c634d999b26397fe95db1eff5e3de9ccb656a85b2d7410324d66361dd6c40a933ba5ce40ac30dad4447ba1d573b6a7d7af4223695624d732cc7aacc4493487fce4511143cf8ae56cab2bc3342eaec230c3aefa4f1739e9045d9afe891c8c428076d6ed6a198e2d05f705128dcc602579e602c7f4646364e9fdf2dda4d82b9b2797e09e164bbbbd296f1b4626e4bd3b5dd48163ca309a908e0c703638bc241f9f3a32dcb61a5452696b0bb6858fc85290268bc6d7888ad71cfc17c2ba8f58f3db2e15c9db28927742f95f24e7485018b2dabc18f0a252d60a6df770711933cb9b319a216bc8d02dc68aed526f22e928032408b75ce4594df6acc4f59b0532a92a1507ed568dc9a82a069586cecdd950d194699d62a64e2d4b2fbadc5644a2b7901e8ac57a8207c1fbb6e10cc1c2a038978b7f5390fd2d66af54a136f290e605a92c0394b94bb6458dabe88d574e7f47ce01fa0936b62dfa600f84fcb6b79bcd5362252c610a18868a8900f846711054115e6780079bbb78a7e59ac70103798c3f8cdc58ab2623822a5a89997afd130eb3297aec3e02107cff6aa0defc74889524418d3cc1c4f965db33b16c616f0877a20feb8821b1c2e717be7fc74cd3bd166cbda09640297ea224b4efa24670b319135f4c264779000c958bf2df634fbb264810b843db0b0f5d993b8d45aee47c2e9ad34b17a59adb8320c664d3602e2a082993b3d84221e609fff0950610d6459b2a5d0656ee14bde3a58498728ce2dc20a1d34d75bd75b96737b9776300656fbe95d66112bab9697c490417a4c3234242a7d452802def9f0fd664447094aaa935f79e89b2791bf10ed6402baf146e968725e16b585833ff3ee7f92f68f482d53686f7acdc1e405187e00d12f21c19c91ee542084620e82a17c7d41ac31c0060159ab9ae4b577022b4f30227f36658a12c7b5fc779a38f30de425bbb26877446e1deae07b753610f57fe58eaa5e2211ccfad7770fc9d67e35a3f4777ce0f7a6e51570fda94b19d23cf5954016d0bbc05d801b44a017a2f228e6d9c047a2e51c7377626e8142a2fc638f51f34c781e969420c2257153b1570ed80b63824b09ec4d279102858bd56f3ffd111706a9b91fa2b0b07f02f0e37586112fbe47d35c8e68651b7be526a8164742b467d11def594c3b1ebad65e38b55025db90894f0d7e4ef1570910228c8772ca0a8ed5ed9b4a29ca28510d78fcf1751445f6b6ad9e8ba1c0ede7db1c7145c890dd2c420e66917ada17fa953856c308bba1b732fe8813da0d4716f9e1bae3aae8d158d95137212869af7f324e94a8ea93d69355a2663e8310fab0caa9dcda1c77731cee80c8b1235477de491beb850e49de4ab06208fadf7b0f8ccbe8187c63c61fc48b29194029e6c7ea972b397642ae88897314a1dfba4a1e901321d3689ece08960d870502683c32011c215cee909e263cc2125611a07ed50aa9d357e1446c120e895263c461ebe28d11870fe3f156b30300c299d7ea214719735222be949b8c233af2d14fb3340b244cea806e9c5beba133df42ca7ef587335b2005771b85026628bd499262c1abfb482f59f5005ef5bea154cf98160051b4566f2069be72684d9c9dedab219dabb704e4e69d47e4c308e2fbcaf48651656adebd3b447998ba624c01fa4349b6ca141b14440020f399d6f83d474485e96c18be6cac725374a534539d8bcb6f68d95df306ae9ab39d38b4990b91bc0d7f6c3dcb828421921e2218a15756cee8233803e5ede3c571894842ce979f3424e3d5bfc33dae7617e4e40302d24b3632fabb84dff62d4532e533c87873009e87d44ec75fafb2c4a76c2a280a75553f11bbdb42742a253302572b4c31ba65f3511474e280d63f7bc08d599f6e713f771e790967488c1998356d8bc9095b980e092d8aae020e2ab147e7f57163895561cecdf1506098e5402629a2c089fdf950af6c3c96d0f27050de279f67b3b4acc63798d493b485c321e4909c6215c263260598719dce18d2e766dbb942e334cf212e0049c5dd970fc487947a638a7bf39c055f4340cc9e8b603e841e9961682944ba26d2118df315970544ff3e277602d383d30a0a4f303c009c9877fd3a27139656b5258d7bf1e31a9656686110b002d44a0ddcc75400300b15d7397cc541029ed2004471e184dc66f01628eb8a24c3b909601e781549b3109f443773e6d4f9844491488ab110b82855c9e450200719efa05ffada5c47be8b32c5cdac1980ffbe1fecd811ebd4d69e2c8d8060da5cb8649a45e634b176f97ed1081e1673996231d87004b991c8362ffb6a7db95da8dc7b626e3acdec8222d92625b141ce2b0eba7d8bbf232a3e598fd29903f57ff57f8c93f2dfc88a1da5d8f81250b58b28e05cb58638d359658606d5845c42cb063083ca429fb8d4ffa1ec77215721a167a85871e81314c5af30387481580eade3efd08bc18d01ff355e909c07f23ac0ee962e13ac86348a7918cdfe2150b70046f244bbbc32a1c0c4ce7906bcc5986a2c1384bfc980cd442881426c0fb7f87141085c0636be2318263a2bcfbb10578fb03ea369cb19c9ad379fd014ab39ce3c47a8a3855f3d970cf5a82d07e95aceb5a8e85185c595f6da70aecc10b738797edd39d867d483e665ec5fa2a71eefea558f429f5f6ff5372219a72bd2953af7add9386e9ee7536bca752e16b95dd7fdb8e0a0133ded54b47c965c88e4c18204d875a13116bc70185bdafdcf2b3b9498d92c4f8b24f19ae980fd655a033c0e8bdb4c7c6f14472b806e84c5bd6a003ddb95943d0670b17b07adafcfd8203a43d2dcc77eef4652259a4acd7ae0904958cd6439ac90ce0a3cd36d286bdd1cd4f238ce020ff32f0175458e884c30402f60f80aa9910fcb2796cda51a1ff18995141eab871cfb12b86110090ffabb7d853026fad827888dc9e1b6f5e45bf07f88f3b1a6130607c7d60a991c35990c53c4e16ab74ba8751c08bfc355edca9178131e8eb6c1f1b76d45cd61d08904929b8807c9e5d353df92130fd7d2514da5b7970511d1be8b60d098aa10a0b33d804d3b27e6fdc52959e26f4cdbd62f43342b304448a1518e398e677a0511d408d59c1220145aa30a6083eef48ad8a04584b63cf4bae50f03f529f818d9695ee0f3945f6d52c27a5fce2bc8eabc78277e2597707e37ddb4e8c6fbd26673a72c85ca6217190cfbf924c807a7e3f270bdccc41420116eb67d348c85d9965a2a1c8fce6ebda96dcabe48eb630c1d941265f77100489ea8d415323bf78a1a923bba243c965fdda907ee1fbe978742ca380854d0d4c96e0be6ab9bb2b65f748524d26d8bdb7ea98f860ef9ef1d608233898af7004c164ba43865db47d6fdab31621bb48204649841050b34400d9de154d8ca83e023896fd7fa2f0a0c433f9acdf6280cc72cab8106e93b87a14ce1ab744260f90190db2106731855614701ea3c7168342fd0638936d70cfb8f67e72f67a091b8da47143df929e08a2260a3ba4bc9c4a1cc1cbdc498d69de77254909cce41219841379e78f7e2d30cbc21744d0cf7d257e5ebc95988c88fdc539c5d1ac24f16dfc477a2726344793340252e05dc8045c884a0f70c0041a4dd1921c00324384dc6c1f539203c36a6b2a0bd595239fb791c94807401b89646c7547c2d2ba36e99a1d17a4e0a0455141e77b041991471f4266d6abe8b7fb6dbbbb866165469bd56bd3ae358ec71273d3bab8113fa2627151d28b85bf5b484b98cc3fae651646345525a9c3a165ca63bb4351cb3e2750320dbedee971b6a13ace50f9c816bdac2c42d87a70a1fdbe8ba2fb37eb088936d0e01310fd352ebb7c17f10372e196b8d5c4268fa52d8818b4923b72a9f88b627a6fa4240031c93fdc314a4d6e2e4d38e0a518773fdaa0d4a411e9c0ed4ca97a0c1de009d9fca920085aa2811a3c606b7a47d9e1043776fb139d7ca08b20939d26706c8abac48911beb4ee136be89e98fc1646cfab8c815399ad5371b6910861b72bbf3d95f7ab24e47d885c1396234c53c55ba083a81a997054da4484929b62f32f12a9a5bc1b786d380e2d4bb4e1140eed9c4559411959ee6a3ce724c6776eddbd46154ad41036886e8ad19f503b85a69cf23f080016f93356f88b741e82034a21a798427fd8a87dd8d36f8d100eaa438d2ab05015c43b2e4ea015d78a4d5c48a59c3265c819a97b1923d74eff97322b1bf14c6b05c14a08f31f134567577fa6c88cb53a3c433f5ccf5d5aa703e10513fb44353640c37bdde9fa79d1a8020c811da411de2df16a3dcdd3736b3ff1f9ba5e4961b248bf71400bf9a91f187d6ea24f72604b2369afeb05b3af13adb02705e7afcdfa6fee38a102f5f5a2266ecaa4b9d80317ed1bb95ae8f47b539ea3a54144fc815bc68d49ea232641dc7650c24a2f93508bc48e9ccc5750d24a307ad694bcbd8992e5e4de97cc4eeef6ad5d020e051aab1f4cf268507ece609aa1035cb58a71c823193ed9010a1c86b6168f6c06f173d4d34fd9ca1f8608808b394578cdc3a256557c580bd7fcaf40c41b84c5a211f047e2c09110e6158af96b273acc6674656417dbbde1f4c79ef68e38a2ae7b184789b33f04da185b2802f1324b0bbe3eb16ee2c8d213953545d157c2773ffa75712cc16d0c9185002c14781f23609c8d2a27955c1e6536a7b1bf84054bd4a21d1c1d4127e109c81408add52892f5c7d57e6c9773564d30026202433fd762279c3e6c7ce47a618fe1cb47385300b65a1a75ab0963aa07c8ccc681c9f614efd2bf0858bdaee7858bfed316cc736b713f87b7f7008ab04850ac3b7275a3f79189abd4f1b9ba2a9d238e179f870ef03240ff893760b04d18fa4a420748e32046ec71a03bc4239c0b0c966c51d664e7834dcdce539014f7d0741e41c2d3e73bd69ce2132e01c0a949d6e1157bc0f5128013cbec60e1b6d9d171bdfca5a732b6918fb04768be422a83056847379cdf67e79c803b7f280f556709ff34ab5f4c7067a9771712523a8b1a14338ce1c00fd58f4ee25e568aa16ca9ccf58ad0944c53769c7c73a3aee6c0a1c29b7b303d8e652f5edbea40bb0ec74cf21751d73e66176ab959377983499d671a61107b6aec173be87e9a670d58e6db86b61d9f2841518748ce7e254b3019a4fae53b4eef04d9270cf127daad677482ac71a6cf974d9bd9497fe202dd7ed8480f93323b1812f942116171e80b76baa16bf4d009e1e1b8d6d8b7f47f98def075a3a24b476dbd01725b2c1a27420aad62612e2004266224fa73714acb038b1290f77724a14a0ca2c2a11c5ef556a413a484086ff5cb398d081e2e35224f0fc7b277d6a2e144ecd66b61031ae6d34f2bb63a0967ab89e89c590f36505b8d9873e1bd3066a869a5fb3016f0128f5385b238f019deda700d00268a161b38a021c8f7e33326566d485306128475c0cd40458e650807f01ad57615629bc7102139c302ee12e666981139cc646d37e363ff326b6eb4f22e5467ab1cf6af522aab6d5c0820bee3f33854d2d9ca47499b14bbc3aba5d61683e91281f585119ca0d120f082adec3bb625084b5eae7ea77c30973635043735a3871b04cba42951c44250b2d7d997f7efbc0127ac35a24439216de76d4cbdc09ea7cec9bceed29996c71e834f26eefb9c71e163e7853a3b1eb0bdf3f900f6f3c67f4e6e3561c6b4a95f32b7484f5ee02f0c4009bf53b132a599d0c59edad9d58c9978f37967cc3efda69846ae86f6beb459a5e55b8fcf262a86205243e10a838eecfb7d267c67c21964d640eadc0ed7fcff69dee506b0712dfc49f1b2228ca8da3d47b270e159c03fcd395bbfa6a0c98395825241f79fd53b5409e55866ac886ef5ebad40a9f56255d7093782ae92a7282ccb0742b911b9b0281c69a8b7c1ea1ea1c8437f1599c64992e42598911a7c7a2c45199cb8f065bc6de856ad963235172f4ea06c0a396a953a9ccab31fa07b8bb66fb8c9167f5818b41a2fe2eb2c66bf25e0d9f1d88670b713e7eb890c19034b3832edd9331fbd31ab26d46c04a43912516e3857785ffc053d1642109282c98d273e0e0cd2fdf0d7bcfb14b107fd2a2e23f521564a183e0904458a90efb9496f29537447ad51e677ae9a6bfe415e0046d8936da33477383530e8869d27974c560b70137ab304d51d8540e37ad0d54e9b66e21bdd5d054d267b12b56448883b63c4314248c945f86c4e98f2388f3740be1c33160deea59c59783bd01b7604965ea85a89ff121a1d0864a51c300ae7d7904abed21662d6dc0e82f1fb73b63fe1ad752052b9f356254ca360a0df11211617527d645a5c2982725dfda7e37fafd609c39488aeb100f292c5a91421dab3af5249c4cad932b831218cd999390f418df42e42fe13581816a9c87fabfd8db4d1f5b40c27c0d190fa67562557d43a6bb7a0006ce61b94b330c376b08b6298daec07c93dfbaa6ce4271011c8169399f905e3481d14a686500aa3d7f7d4905e3d2848bcf7d1b044ae79d8a35670fedf19552f760b29e00d49d17124dac6f898188a12ec4181ce849b27e3fe6c7c2a1991a902f46c294886d4ded80e1843d59ac312d1296cbc40b0c1fd2736547f2ee224d8ce40452d13f3bed42e25f845680c32413e6543039066b00dc5d044cec66588282f33394dde2342f086c653353d72d3a346476cba0a534681937c713f33c847e449efb60f0dd3b564187c223e8b1d59f04c514177485c11a62f1849488081010ea332312ca7813b60346f98e9fc53e90f89d4a60a605cc318969c930ddcedad297edb2ed2fc1054251bddf40e75aa6aced7110d69367fd2e378e4ff30b8879e8dd06e98cc6684703d5fc300dfd96bf67e0a66dc2a6ea22560b0e595cd5ae117d0d431ab2041eb7ae14d105b2455e0d8a1f3c06b4d96ab90ff84fd6b2e14e68a716564660fc278de1bb53e29026e9420d891a421eb7f21e4d3c86f6859c991ed9db012b297ec9344a7e8f3b0f6a30f53b17f2bf7192737e94ce492e17e5249ddc717e25df06e4e187111677f7d515a39b54bfd163f511c5d525316ce3e8b0a65b7a0f85909a853768d8c57bfb9d81f82114f319d988a68960961064247155414666dd0011290a98c185e09bdc74763995b5a22e9807e2420569b82520382c676798412e819cbf001cb68d96fd0a80ce7be682ceab669134d5687f23499c027323b1de5ff05e6026024d726424d8ba39b7387c6bc0e5a0430f94f6148040ed2e5a387ca8020ab6d1f2cb525ce1f17200b99e9707511b6fb9d93c0ec8686694cd544e9de53701bca65aa9c03e9ebc22853de7c56dc32f1befe1bba01fe83ae7a265100548c4da3cbfd84bdeb6e4f1a1223cd37325612ed4575dd299864841376fc00389fc6dcc625f276d33d531ac0462d8953a5ee9633aca1df73f61be89779f3ea286193aaa2a2f45f8ea320e0300e5106aa2343ed5d5f90125080760e5b261187e100f435250be8c6ad25462dbf58fa3a3782c57066aefd6388695c4bbf88db7e2c5772912037156cca2c7769da005d4d23d8a05cb121aceec87a705a7d87506266a880e2bbe39173d3bb2ebcd0490dc28f39fa4d6e5e147b177af174b0b7e7df31fe941dcfcefc336aa2bd7cbc51f2819cf4e60abf9f850a8cfea60c7e49091c15ff0c2842a7abe8acc150aa12751b957ab3585602813f6bbec11245aad8a47f5897316d3004c6fdfe3ac689d220e8f46d536b6072c33dc56fe3ce6e009b3b64bf08c71d48f191e8559b61fc8e308873f377d6c24a1582fc6c2866b073e51eb6fa02cdfe2959fd98af0a06635264da82ab07db987932f1384b3061f74b67cca7551eb9944a6d1ef8d35cef13763c28dcd7547aeeb78e6c2386e2093de9dfa5c0ca41772dd9b4a011b74d44d463149a8bee34007cf500a9f2bfd1aa040f2a11a999887c34ad9829824f29a8b3dc09112215f7d8777c91d8dada0bd0eff107561f82260d0afaa7e99503cb7ee31c9c6958f145c64d3770c650810d765a20f4fa788b6e00d7d1d364151535b2fae02a6f816fad99f4fe3af15a554c9b1d124daa6f7f3d1a0201deb1303e1866423943c4a5043458474ffa778d7fbf4da2528737bd23c67dcdf6c54a54eecbac8b8dd1304b74d16c2cf9640ad15261d5bae7056b2ea931fd716acf1efe94929c02f4fd2a2ab93040117da256f4d32985a07a31328adefc383267205d85460f3b63f3906f9703b79cf604eaeb6b4cb6a450a58fe25569eaad1fcbeb371749b7e5d7a1b200e38b0dfa769261a040aee5ebdf4d8c9621a4424f146d21fd408dbed4c6b75c698bc568ff0c2cbab2c36894dde47f29fa6c74a632223115a93144dd4566aec330ac421a544e488ed3ceb4ea6d4631f36207d05118ff3c3773d757ba90550c511386cf89f2b2d73b83032c0ce314e1cfddbd743cf325252da681f7d4d695c5309ef56b5109d32b1b2078c5e19e5084821850b824b44da426917a8d0564c04149501afd47a32f049ddee62ea4789e90774ed2c49047c147d805997e0e01fa224b13bf683c369d3ebfce966a5ae53bc2cd27900f39a736734a0506880660b36fbd788e2240efda1116080caa09d88159abd68e7a08362571086c1016bd547843f8feec085961d7d1666ee7a67842a4d10f9e6d259e2de3fe4a97ad8dcdedfacf482e682fb80d26b3a5479844a8f6d3d804ee01fbd3c5ca988b184873b7ee3e1ffde24b34744f721693c6f92776af46010cde4d423fa8714b7b5be8f2cf8c24c06b80c1912cf4b02a78100ad4e6089b064f57521da2a4fd5d36515de5d4d12e0d61cfb8c509eccf5081c38230ee8746e086529689c4c3c5aba6155de42489356592001d21fe9fd80449656f6162e9e9d8d5503bc298a51dce4677f731004ca9c7827f0c7aad57406daa1d3959d1f0ba12b08eb757b909d2de289f6885ea9da8f484e782eec7f177bf0676f4c5b62b693a98738a38db79a8b996dad707c79935f0b69fd6b8392a4ed761ce3c5a9e488da83b4d4193b03474807c05d7b1d40999ecbe968d254f30181a1190d4f47d3e1688c7a15cde2af29b055ca8c018e38c1b46109cdc914006ea85e819a9940f3292e59b094ba70889d86f82a3494a35b0d36946a8fe50a9d19390e05736d20bdf78e03931676bf167661fd334dcdd768c6d3d389eaea29a6c0c9ad7e3b22d814244c9be1da04c76f6b734d5801854c866608d565c27b532fe235324948ba665d7581a63ae4c74edbe239a4c299668634eb6f9b0debcfa1740364d05b8b6dfbbfb3ad59f90164a6c4e25d1079e218be9a8d90f4f975e11a40231ad31f0a84193adf9a586624ede8fde2f82930a034ef9bf78e5302465132301587cc224a1c933ba1fb15e3e06e64d354b80083a5aec0df16677cb308aed3ecb4127c8fbe2bbb75b6d74bc80595c843b44ef0cb97c1fbbe2021c2e0abcbbf6387bc26204f06f0b54b5f366da6894476a326899fb2f054fdb7322ae7acccc9f98519bd0ee26383a26494f72bd4c3a9c29df189fcbf22e970e0ff452e23eb14fd572066fc21d78306a6d7791ff970ff3439012ecb68a8172e4259929826a70aea7f481f67821cace8ac89d24bc674dfcb0eda7bb6719f65255199012b2a0fa261b43e6092aa89df42f50fd8719f80d65f8bf2d44fb9d255c0d675a8bb559a3e00fc239f6dfa1ae13fdd90254aeecd439a47f160c5f3ce4967f909769996f0a2f605642b3ceabbe8d4b5c35b304588cde22d8315e5a6c25513d55fc7badf020e44620c11885955597b75b322b38099ed792c45ad7a800974c83478639ed74817a829a6b3e3cbd93efb3369414890c21a2cfdd138d233c5c3c86819ac4946c183411815c800750012aa8975f21637f80c5ca9820c9031dbef41018ebe665d94989022d5dfc4cbb48cc5304a37b3d0346c2206a6923713a9b5861090ec766004b062763117437282185909c46607920d650369b2c6ea4a60176b84195b3b466b48ac0c0954a27aadfa43141ee41a623fbf7973fa7abc3fd39aeb0da67e375b560ad173a81c549642c38b60feca4f5af5a22d2c0c052d957b26a9114d603ac1d72949f16e22b5a7e50b1fa8f41b5e3335ff344a7ca275fe532bf69c59cc7088259dfc3c70b84447a1820631a44acec47c17475ca1d3e4f66c5040bcf20116ec46d6be440599ff79e595459c025144d068d7e99a99498eca2b3cb0fe1ebbd680441e38b012c9107c2a0a4f0986db80ca43dc29169cbf6ea652067996e0fb18a593e72fd946a31b5a96d6e13cbb289e9b0363e06d2787136a20934898bf9794754014809172f12c3b188849dcf440b2c71b86c74aa9b6cf422be1f4b2c5959e701861d0e0152384f8469cc383c767a27aeabf820251249ac1dee2bd776c4e76fc5c9f52d33e557b2a4d2014d75b0970c737e309cadb5328f2cdec42756402f26ea47e9442074bb7d1e68d1dc0130939bd5553ff3dd19d6a891795fd0ab5b450ec39c3f2e73e6c3598ce56aa5a70046b8f76313ceedae0f41c1138eb5d0b2262e84078465e65299098ad4572406314dd45d2398c0b1320fda86f30b05adebc45607328145457364660a6a9907a12789480a8e0d9904e5faa5f489be67d18493ad1993be93621cebebc13b5f1d1647ee2d276de5521e6a7ec6f51c645558e5d5b30cec18cc57a321c0ea8c4cf64295eac0bd343625cf27e48626ab847c61ef6fed9341490f20a6ebda6db61b0bdc57600b9a0292bdd4a116ecc1bce08c1a4f104a21f072ef00d17606449a2095d2c45a157573acd79ca63d812ef1eee0a1f946bb511cbd042c5bee9de85f765186dde72d07766f78d3d34f050597d1f0fd7bd1266d8212778f48ab5cc12f1db670f083d33d90b989fed56e9c7c90950d5f44b5439c0eae9729be93e96ba3158dc50d9b4165c13778c497defc7303d0168018ab4e5fcab976cf19b94395c3f782181ca596adf6c9224a71cd085102231fc997ad06d3a5a4cea30063baf0d2a62dad5ba7cfe73adced2d2c90155a18924c3b1092eac582421a705f7261d6834cbdd97932f04fa78d390a43053f33e41d56c1dbe387396300b368f428cc59a3e0c8b0ef87f992b537ad644c422e55c24f8dbd6ff740c2e15c655917d74b04bc85c0a83c56c6fb06fe208a445510beb4bdad52a40963b3d590be07a4c9485e4e5844c59ee5bbd09d326fe29dcad0d0e0ed05aee42da68e5092f3ee3ad436950a929bef4539bc8f5bf1610f7ce38d95d2b1a93a2cfff75a330a852922087345bbf615ab986a51688a6406addc0ed5f03c980b63a4afdb9357e08dc8eeb1696747a1148f779669adc202828e278703436bace20337e90717c110d4b61136ecf94c9717cb333ff5ece9ee08c37ac3c50278baccf622d7927ace33704064acc5fc66bc9c5fba9f35bc137306b46a63de0d28dd95e11f2d96956d492c39bad242644418775c16352869ca5f80bc2745694d6242af24c5cb9d038db97b310bca45cc20185d0242496d4c48565ea456203078a4fc0ec64e64a3ec548ab1de996825a0438f2129b59135e5c5186c1f002bdf6fa6d9f1363962552075e0abd7ee4fc5a587a83b4364dff816f0fe143b649a1d7df5605320ced0dc6b3d19347dfe18d7f0199835fece7d9c86cfbfb5f7b4b9c4016821f5d2aa24085c001ae0e8ab2d9f30e61c746ec71cea7027c16b12ee449d26ac6e52f310331d58f0a5ca36f0227f99089db0b30531a33373d4e6c405c26f75770fa29e1d321ba8638c98c5987e4626b8588ad13e5ef8e2e31bde34682c0aa34c2be8b60a15c3cca23db2154e73e5399c80621ba192b8ca80917044b094641cf43ca5f737664ff36f0189ba654eb00a8529418297bf49d361f9434c6131a5298a0fe5db4b67295400a56307315487f2b39f0f33b405c39012668c39ef7f6e9a43fa59cb280e4b866332adf087d14ae9b5773800d170c3eaa01f3d35e32f408aab31b58525d349eb0c0e8f4c97b62cc1d5932412d098e6947b452fff9ded6a1a56a0111ffa4812fff117d6d4ba210af2b773ddd26e721a61153c3de58082c1e00002844b749bb652a3a624bbdf4718a93395b7a6265190a110a8f4b7a8c1dae729e4a35b9587ddaab465b107d2338f780a8f7192e09c5e69289e0450653c24faa6617461fa75fc73fb1558be986fbd0a636128c85953b957bd8d9296300e0eb954ee36e72ed60ee1e43f8e6dd6963cfc8f6e32a616928489138758ed0f9611c0eb98a88e84285176331b4ab0e99989b42be257b1d19f2a4801a1e641cb795f29b421b8e9d081fa56c4bd3efae4b13ab0f1bea7b85a8d425a3ca95663a5ad7a52f37ab0af765e9a5093f55d3e019ffdfff0180f0d2818150f74399b2a2efd7b6432f5a4614646f380d689fddbf60ef914e0b44a6f7d7d528be244536e6b6d17d3bc87e8ae5119a203ed875021b7e7fca0ddeb01a52cd6e4a246907eb598d3232712e2211d77565d393dd469693ae8b7f6a5cf325a7deff5298f38b2c191b47ef5fc6351aef20a742f13dae7e8f7ab937eff480a1381f4d4554cbca35f4614b707c2e3310b5979ec03d0405bec3a295ed7bd8cf016e6946312a74bb253e8b47085470e24061ee27ac9f43b74c44a5303609fcd2b6be03dd0ba2b70752654016958df71bc344af3f31b803d40b7236c4526b5255b86c72a60572ad80a5f6998173c021f0e7333d66216929bcc0c246969ada4955aad321a31304acb11a81455d777aef47c514a0beaf061e24dc2ed94048794ec3c5a9bec9b8d6fe76ad78ae24044fd6fac84acd796ccf710a7abd956aca4cbd7a24a22151e914ecba2162dcb5b8d3442528ce94d4d5c7e1e89b67b1d546be1c74a0fc06a0c04498c8b67505d2aab21342c84a5cfe936224aa71a1bd51ea284257ce1104672ad584a17040e90263ce628c2a5376ce45d18f729576314cabb534494466fa2df5c7210bc707c703c22c78932b3f8340151f88ff9c028b7f67fd904f680d17f55c70868609a758e3fef4d07c93c2fbd112ce0fd5e4cd596ce464f610480d4f73cec00cdeb8fcc5c4ccf204c27118462473de3e85b89505cbf5d08a72c620935cfe7453399f75c45821b99b64d7acf9cb9ca4ffb3fc10fd88032937badde58d4218027658913aab49d79bc65f864f94a0977b0a34943bb36f432053b4825cc65df4a97545efeba569052a495ff2a4d4538d40bc56a9f0704c7b80c67dfa52b94a62cff5bd1d29d8695b21030f1f9fee98d8c4b5b93e1e42bd52630c80ec87f9f40729ba5cee27814e3b6075ff5d654f0b93f3767ee4bf6654ad529735bed904190bfc5e97d3f2652414763024a3cc136e92764a1589dc1ccde22c5975e0e5b20b8b740cf6853b806565d12b9520ffacb859c06e4426d1ded073e95561727c46d960131f4206cd75a2814725afcda1825fa61b144d181ea9f8d43c9de5ea121c8098dac626e061b9eb62be86260e5213dd172b35ac829855b59eb9b5dcf3025a1219f58b24e2035ea7f61d64014ac2536720fe90f9d98f9246ae4703bc97ce21b830df9e5cd0289ec126f4942a710c2c3072b19e29842a7dc60ed08436a6d33580c0f68c4e33e48ece2c354cb2a0134b7d685c8a24bbdf2e4422713b72b537073085a6fa7c7b7d0f872e894a3cf3b60fd61ff0be38d3b58bde1f55f3b7d9964db85c0a3e95c286f6899dbf88920deaf9d0a90093f566f923d19f0b58c815275dcd7a284b8f6f7784414025b548d726d4b3f05a67f9c09dd1c66d24331cbe8d738101287b9272622346f96956073d08c5df87696b16029fd93ece548de6c72b3fe295f429959568967b1f2225937b818cc20e8253ef9b405468cbfd23f25109a37db9663176fd03ffde17bbb80bbd5e2262949a7ecfdd308fab406673c2a52006d3d60e50cb72f4d0e5e46eb517d538f00b11e123edd34d181d4f44fe262734d021511dbc677522b0dea7bbd1b42878c62a3fdd17034b2d03d5a98ac1f68501ce9bcc81b470a8f6ba0abe137fdc5af29aa3f486510027896ab21baea3ac6134a4585d611d5c2b8372c42e697a1939a88eeeb494bf0690c592e90c234625e8ed72194739753792523d795be7e4733a08a7363c8d08ae7bf2d146a47828802e2dc769d9b07dd34d951456d1527cd241b865a2d400c1035c840f3d74259e5c6915c6fdd9fa6c9e6e1ccad9464a36fff0e6a5605a58b3b7660263c94e35b75e78f8af24422bbe3048bb13cf2a1a2a6bbecc4c1a7a8a8941b9c8720dce78deec0ac38556f1d5fab83f3b1a1a242f584255e23ee4c67c9a93ef0c1aba6cc596088c8e43e5414df768357c7367df5ffa3088d4a328a1011ded1246cbae7ad6894c8088393d09f942370566d2cc41563b65fa25114e40ea51442fffe473e89353c489a25a7d8a34a79c81799a7067652363b504177e7e54b62fac3e822fbf0ad35bddf71084a22e2be13a54eaa80f05acc97a9ff5d35e6a5c793603d4bd04aa5472127dcce509dcb9f9bf463cd91c1d4807fe9e5e989aedf7046c2381e028592474c51598cf149ee8eabba40b05a4316f8605fc8511c4d74eb645abdb918fc1b2aa54953a3d1251c0627a264815d4a82631e46c3bf66e074d84250ac5f1bd7fc6968b09692043ef573c1847f8c16f8d441e06b4aada16cd4a5b792ad3dc18c337dbfebe6b0c6e0bb64aa0f4b843aaf515638700d876b50b803f0be13036622a982a5e2001565f08359c2ee6215bccdb79f620e47b409c778cc1147fede915eb8842acf6545278938023b6006720b57f0802f9a504fd35819cfacde61c00a9ff2b718b5149f6f65434f0d4693474c42b528c49b5fc6552394070b6c858274e3a25010e109ab43b4a26dd22b70d6adff72344f5d55a9c3e73da594ad135f1d4edaa6a50fd5ef21d4f4a5c68c2cbf4b3bff5655087916f7783aae35a82c3bab2af7c8521f5ddcf412ba18852bb729a1356ca98dedb4bcb0fe71c805977cafba89e5da7e86b6dab5c1bdf68411ef17167cf3a719871402e4dc8f8f3444da7d746c66d3f6c3622f3dad72eb0d0b98f3cc090c54d00de6cdf2889680c940d841bfa21e4a8cc3a45efd3e6940206afba56456fd0216e63131efa281f7845d331b507d1ccb5a5701eb803afc545027b4d95c1f6564fc2b56e58b95a5904829ba1090bef28ecaf62eb3536a79d2b9e998b50cd7874020afec42203541518e969c61cad4c4c8665a25dc4bb4968edeb6512df8dcb85a15d1c4f9441c9d64514ac82102721f4cdaebf567a2fc7e37dddc628bb7334ea65e9f4cf792279aeaf6096d3b22385c5838e5f0ed219d85b1be6f4eef2fa959a75d2d4be9ae92400f8cc8c62b114ab36213429e02a0f1d07f181ac88de9b76f7011f16d0a9d8caf43574324239fe4491d86299faa1fb848e19ceae83dccf71fe7c3c6b7e5b0c3d4370f45049faf9403a8fa2aaef4402658e4bec62bbbaeb3e1b06392e67d9eea08dcbae4e9f64ad31197db66116c0bec7a595b288a9324db450825bbd12b684603a24117822d05afa2fbc39eb3382d58ee0c7452c43da3040da42269400758be391baf90861d3370a845c4730d8ae270f8e3bff94d60fecd961b75bb13e27097a59a082a958338d4c0792f5b93007c4d4f92400f0e83780731c983ec89bc0a4926346119e59e5f485fb4f7d567df0a63185f863501dd2a0b3be51a06a6321514ce4e7b682efeea68ce1a7dc27738deb3b2d8b90c7d567c824c21dd1bd9eeb2bbc528cbfb6b69546cbe93a14a7aa35fb4de91bac81c2ba3b92828aed16d1232220487c7c2ea1aa953a4fe469d6f3030ed17d1895f39ecc3f092390e43fbae83b43a7a5e1ab86a1ae2278d6091858e9bc754c9cc8dea319048a02adddc9345f2fda6a58a58ef64b78d2c7e24af3b791a9e2dcc8158c3c2dcb7e686ec73900e58d3ee95d2e34a30086d95be0f5eb4841809cf55689c096b31d6f4061c7ea13eec72706b332def3a47154f815198a2e0ba1e480de02839c343e4b79f7308b8d8681d86d7ef443772690bd7e9e7032b672e701a1b77a1f1c012814a8d4005f75dd937bafa0a26985eee3d3db041c22d2b8a0a436534f0f4c2c093c97e9f135850b66495e19c20bb8759c98236eddb2919099e9878419de9fc54a7b9907ebe81c0689007bf4ca459d1aa172628cbd32a1f1fed75f3506e979f0d45459595d379a2881819746a2336489d4bbfdbbd9ae03e1b18e10761c6052a2a56e284d304ba824b9892ddc021871b8776b841850381692935a850e5a10aec46f1f9dec850a14f02bfbf0aaec46dd4e6baad848215c2b2e15d83ca3898fcfa339e4cb37036b13e969cc012052b2ba1107e66cd596914515343b53b74a8d5de08f637b71178111c1b7e65aa17ebc5af6ef1271debb0c87454c91ecebb182fe33aa1530e56408c11fa37f5580f0c0b75952d7be916699d9d01185ed9ac7f36078d64f4d1eea2ce8e1878a8fc4ee5cd4eb702b92f7e860bd8f52760e116cbf6d9b568091c5b8c2e9602f266effc628b96dc3ab2dfeda7f6c326089c468f040853c3aa0f13d83d11a3d4e75053dbb152071515b9a283532158f64f4c652f374969e4436a4794ed8552865ad94dfa775352e2319e4b46d1d1e81fa9e14019ba50846d43b8922420e08c988aef30a081808e97ba41c6cc0c964ec048819f5eb37ed4d6b8994af54d514fa30294a14a039ad000b14e338d1014f6d873e91f060efa87016c9033a5f87cab3f3f9aa1cc57292ab995187a317454173fa1f9b9744fbfd2a40bb3e63947e791f2d51df44f1e0c00015080e3132a7d4791d1511fedf7f2936868398bce67b9bedd0228fa2ca34c116cc7bbe7552aa1b24240c8eae1cc2d9f85a7f27b7ef7ae9b316914326167e00cd18879021c8ea9dc0a6f8ae67b572377612a494e45075dc35865f069c24e8059be9a5c13e9af7c611b896847c1f03e819f413cf03272bcf79003473b6c0976f02a59e70a29140375967e9cdec2794f75874f2c006f9668cc93d864985b46262d1bc2d6b17002072c042e695c64ae6c2e58e8063368cbc3c20d313067b92986b83ab227e3d9068e3ae848e12a29b8eb248be304a5694fd2957f2837be2a28a6768ca68a8bd3ea9cd6d10b6bb0cbecf4d5d84a45aa3bbc956bb2ed23fef062a0b81fecdc27ed88e0437e8e6c73b4f295a700ef81251c9252640f06950500d43170be503554c960815499545217fcb7194a4bf8e4323df60e4cba08aea9d00034a2b5fa48c6ad1126ab0fb56eec15be0d1dce35c5b015887de28fcc32078044abcdc2fb231d6103761d5e58c9045b9a161865842cd0c54704d8743b7a4c9d90a02e696ffba520d016aa113d2ace435f7b380e5a25595b6800b7cee72a68bce49b9da8efeb729e36ec92e9e51397950a54100df63699bd805fe6decbf377acb324f3122d65e2af427509c93191d2b512779123fcba49d46f00873274375c66360419d7c5dd213ce0a6a714b8acb725784f78e1cae96efc77a520d0482042a6166b48bbeb59bd1b56bca418d8da9d0e8909f948b26ac966743eeba951325a4c24b8fa46425cf6432cb4bd3a9713503d5a19971225c1fdf99b2b7de7c96b2b84f1363005e394d0e14d61eaacb6592f128cff9413fbc0b979da2701b44400f2f95f32b815ec22b738a759113d5de81b40d3d055f00eea14f9f8f956291863907a10a4c9c722f03ac62d0ae340a5bf2e45a1a31ab193c5fd4c29c8d34215b2f8380506f698c905d9cd088204674443303b0610509a5d0ef6ae3fcfb051aab067a499a56f9652dd57cadbc336aa1a604f9bce98a11684e67f96c680418aa58cee1c35f226fdff9f4b2519481df1a0f004d363b3183ff34216c800bb432dc179cdec2a5df6c515fbdef0913a918360b40fd0830e4645e2735e912e2d5907a9b72086cc24e44d56fd4bfdb0354f22d954f4c7c287c08db5719ffb7c40bccfe553f8c0014489fce2a8624a4c569713765cb07480516d85eb4eb72f3663e85b533feff82b131503e092ee821220f7da0ba49d7564d45c265452193766df6032c1b6032dfb40a2b525951148f52950d02b3b4841102aa3c1acddfa889c1a8c91211a20892331c5ba92124691382bc62033c56148a88ac42e35a9ed48b738a760b664a79361e1984b0a2f53964dfc967bec26582f1c9b21fdb3efb5d3985971cc75d9520d005013c27586f2a6bf9281242f278a3eec18077c871f94db9b98588a60bdde584f475d110ceaf88687646cd71c25891184dbecbeb7601e9d9dcd03e020fc1aa61d1b85b5fa359b4bf1e925ea6ad73633d8c07a99c21674921a76c36d76c083fc961e704eb31431a425802dc9403a9d895117c4d2e0cd147b69e2c0a99a9a6e0248f07342ece195264261c33432cfe5a8127cfc091fdae626946d9d8f1cee116957638f4338148cf66e4372d8e157495405b6e0bca98a4d41df697f15c298daba333df5c23655197c7733ea9ea84145def363c79f9a7bc67ebe10de92a354f44b8de685271093a2dbb5e252928e0e16e4850198eb0a3553098d6de4fe1da9fa499f3eb0239416bf1e4f80e12a5fafa55380434d98444c62797bf7a444fa45948d666c3614328eab7f97f8fa9442a241db98e80318938341cb2a3c01a772c611d4ce35c167df9845871da8bff67cd3993824d44195b778e68476f621f3df3b2a9cbe7ceedde9da6cc1230d9dcf70660288434c153264dc6617562a8036515e3dcfa6e7b555162f91bf39b7f08eb1895eaebf96303ba5ffbeb0806f57ffb7f8f80afdb51a29e3d7afc61cf28ca3cc916ed3bdaa65eddc1350e71d5605571aac8560f51cecac153be656e214b95da89b1fa663e26f71d049d17a7a86d3bed7c85acbc34826f4d5a17bd54d90eb8d6e54d029518fe6772412130ac4e96e59ec8c45f95e26579602668fd0ca4c047cdddfa79f1129979e19910b698811f2165cb7636702ed1179cdb3f16feb0def9f245f20ba7c2ff3a51f4b7f4ee9232c10d37bbc46afd53d3862784ae5d68aa873491d35d2800a1d73d4419f619868e4961f92a6a699c125520f372bd0cca00f6966c6066b5e2763ded434a4730d2a8f76bf5900799af3f39e13c230d950e38eca3b01200f43ec84717daf35d432039860fd8971640e114191f1f4ae6edb292f65db8fab53dde48846b3cbe80fa77775e5957547eec46dc43b177f783a4e46ee98dd831a85f2b0a03ca209de8b1226168b7c4380db22ca4494f2444154dd7bd8c9e47345006642a686d40cb5f837c0eff347a8d2fe9f249119083a9c4e962531b15b3c951f5689902cb1b85f6a5dcf93c1a540ec5f00054c142216ffdd1236590249a0601ee2c0e2aee3724f14b268f0028d8523f04c727d2a8f84194d5b06ee3185d798892672b922d94952ff0ded5dea8a185d8f1b25a3e7ecc0502a5fe20afcf161009f37256f3df64762592685b123c4ab7b84b0ae271477d14922d11d5e326d62d9ba8ffdd0fae16c34696ea3dcf7b8f1daf01f606a6c2c88ae4a9979ce89826d669587d390db0e90c8cafb0990862493fb8eb54fa263e65f81836147140e73a77b231bac3756d8dd829d5a3059e94d4843108e1582717a6f12a618470ea7101dc686c0341e984b3bc7893d51fa9875b68c22760bb7c9db138ce43d00cc369e0bc571c6eb18872d3e211aefddf6c043770fd6c8b0aae0f65425e61e7115fba984c9d845167addc3642addb1450085984d4a31662d672b0bf66f5354dc3011ae6907e180ee36a31981255d6ec592249ec84a4586672ce94613fb9907eb26219881754ad51c23ce5932f1afd3a5c3c84b5e89a05cb5439fcdc1ae48cb957bd3c9bd5f85e06a6d5d1123ed20a3df4db57c45cd07f1af013aef87fb2bb958208b171648a2ac8fb2183b6b62cd3fb9e0dd3134d82df2df65e5fd3ab7cc77e881cb9df712062b3e5b1be243fa1147b1fbe1ced2df8aeae8979b2339cb0d70a0562c1c043db1d055b39ae475be5098aaabfe80997278844cad0a231aa6f3dab49f928f1d4d0a16988eeca011595c59a0ced3e1423b7450a38f9db997f296743c8d1b5def11d1613288b223255905068ecafed7aa00a1af062eba874a8348dadb9d32b06b43b4586eb64ef5e058ce6620eb6a17ddde60a76ea607380d543baaddc44311d6c7c80b963fa2aacfd614108f445b40d0c02b029734290db470341611566293d400f85eb8166f548cbb1dfbeaef88c068d324c15ab82bb7295cab13b024b041c2add193f8bfe61715d5f23002cde023760f080008376ca7de31ffefb8d8b6c1cf653d381248747215b9d3570051ccc3e674417ca20b486e2692f1ed70c9890d13480729c14570fe440de35219f699e594c051c7a11d625ab3c9b7e98e8818f58022fb61b15240ceba819ca65d2690c7c2bcebc003393a398bdc59396ef9a75cd5c1b121b4b4796de6b9e46a6f91d8b858d89aaec65dcaf3114bef330ce8c19846ad572026002a7f0dd36deec271f99709e8d1dcbf4b3040db181352469cc48d5c5d44894ac45a0296682e1dd030281c8d8e7412c4a6fde2db46699f7ff035cbd6c58a2d942b97fa1a0f257058d5fe71cf7bc4819e9cbdda692bad2c515b9f48fa3385464cd7ce0151157ba8e0a7e1ff44cfe15e5d8bc1eeb81edd7bebc598465a6bf690c1c0a31c4290afec3a9e09b0bcf87c99e44c6bece0d42bc05a381d72b9b2561f679f77e0c0ba6961d3de1649756b09b94f37e6c772e9ab8736ba41f01a8e7ecd8905c5fff0ceb7e59f3b01ccea3e426900eda33fcff4b02b4f076be4738f1b2211dec03e28c330f5e2d086f174fa01644902d20ba840a6b12aff28e1b1dca4d2d38946699026cefe8f1ece0536c450605d683e8a01bb1fbf74e066ec795f2a1c6661532b126d5b6403836c74cf35cff57d0ef9f02fe89aef94124892bd3fa0e9d87940b483b221b7c92ed2dde8595a1f93087ae7dc8419b5c440acbd4d8b45e3060010c8160c2d18064d1a5a91ae82a97486f35aedcb14fb5d89b1d9ca994909fe19ddb11644f4fcd8e47953fe6b00c35e8d99eb5b1dae21e7fe2f60349bf04d6beb6fc96770a064850411ea670470271ee2105641f14680b0caae2ece2f046ee682dcc9d63b62350b0ccdad4e7e86d0d7594894c56f5719e379ce28e644045633ccb4ba7dce6e2a6c9312e84a878b01efe2c5f6a9aeaf740fe32550a17912945b01703099d463aaa541d1e700a7a2e5c8d42a839637cc212123eef5421eff5406522135315499996991f49829aca33a1f517f6f5d33e42ec0d88a4cef113a1053fbc07a64035a6b28dd49e6aee4a5c110e68cd5b1c2ba565279fa6360130050cf8ceef7c8cea388bdc3b09a5ca20faed41997acc20ee9b8afaef603f4546f6bb672b9da21aef65d587a9cb5c3ad29703fd10e23ba9bac206ad0791b4529e5a5c975eaeac8635f3096f14f5db3687729bba2a97b28d5915a43ff3297c2fdbc13f276d7f76a1ed087608dfa547fbd73f66cdf59b0a0c969b01755fc548fe04b6a8915bb0c01175813df53bd3a13718577734dc23e0448c0a48c7aa0f9d8270c8375fa4e9e1603299fdbd43bc770517210ea6d13379415fcc439cfede53417d3eb2cd83842de20d02e6d3b1b61a7970e578e8d9eb31e43e92fbe28c43f9339616cc69af8c6065689aae160d713c93928cd09bb5606ae4234dc20cc64c64f576d240850e24749b262dedde3ad3b5295c31021da15c22d5dd293a3570ff6a4768a9ec808b6c932776e7ecec236bc0057b0ef0811f828acd8122eccec051e9638d6f562d408dd885effae47b873c2b0424ba616bb52b59244399892e05824ec2adfad668f52bd84c4976d30ff522997c22d3a28d5cb5cd1d6fb6c3c08a659ec20cf76a7e2cb5dc29c3a499626cc7668b119f739c1617425025336ff2dd64905e645b2e9c3e47ca3a3d864138700cbb272e8c3099d296181726c66b7c53638485b9c189dc469f3945acfa0e4d370c8abb4a99fea7c732d76084c89216b92719cac7092e7f6d2e0f9237cab4d0c88647a4a175bdbca639a23910dd628579fd0f985d8cb7bdc378438141c51b9116f2614b45a9d5a333f646dfa57b86405f3b352613d3fe30c87ef85a0abecad88ae8b2eb00aaf61f40fe6fc9acce73f155ddbe5a9e8f80414e5b70d1b2a17e834094d9edc6758d2d3bdad6b0c60794d4f9fae44d16e0b13e667faf0eaaa65894fd0ba53109f4182549c641049a1d7bae5ecad04bf57493bbfbdd00e4b33e46fc45b5e0a538a536ff4854779a29f168279f6f25fa8c666a0acd1a4e0a28f654719340f0b06375c2666c7df29e773baf0ae3e5977b45b071fa512b68838e83f8c0980bd7e12315bd11c8d27a5a31f506ffece67a6c82268dd1aa86c32d2fd581c492326605fb898a71711395910ba65c015e91a7bc3deea6ca4c05505d984eae5095893a170ee46358debbf06634bce0eeb479199df6ff9fff7d983d706cf4f8a47d53d8cb454912edd4b681215263fcab0394c73bc579563761222463155feb3e421cd4919752c24aafc5c0178b3d7d5c85cdf02eaddf1625d3010baa20e855c06a510f80a6463fb24c967cae0c76295a2d2ab246d0c7b8b5be492fee4785666b7daca9b81bc52f531d78677725ccb0f2010c6dde6e19da7e71a929082fecdfadc835873bb6bf5c3a10e4d4164efbdb7dc52ca24a50cda05de051c063deed8d93a1f260f1f3b25268e7d5414fa3e586380368b07ed4f9a4c4713c7de919c399abbacf756553b9ac33a9bbbece7ac6db46d3b9cbb8e2c2885ae0f4ea92debeeeeeede755dee7297bbae1b4d46a6db8c14daed9b8cdc74ab3a366bb226a36d6768289876ce39e79c13354eaf35c60bfdab1d748d154ddf3d00ce4f11a157b3b6a4847ac60a2da3517a25caa63120515450f47c102608fdee62815e0df1b76fc719ec106f86fbf9edb8f2b1f8ad8f2ddceedd1b5befadd70e8fbb68cc69adb76aab15f70170de9033f4f7f451059b6aff391354d29c114c1e74ec503071eafb36f5d040db840435d56688ea8c11123341ee12f74c159a34d54c3587cdcc986a33333c13e4304c7b781493870926ac2b62ba6eb4878f5d8de68cdfeb8503070d1aab958c0c2d85b43f390324dbe4cf19a05d9f9a9e90409b884653adb66a4c4eebdac24f6bab2bb51d49dd955b85c7cec85d479a9c31d5bad25403a598c165259eec8b15d4d66aabfdec673f6bad1d7968968e7658976082d0645d5a85e18ff8e11389c1b4e030705c99f41389d1af4b93476dc15df46158a30d346912599a48cf48a143d6bb8b06b9922e76d0d764822836858625367d1faa3a79d3ca32c09caed507a16189e99a7b8a2964ca04d1ae5f1ca8b68ad4a5da4a095172e26e3347daef93be678ffbb57ee6f971d3d5c70f09e48fc00680f6f79865e389f6ee7d5cc7494f419ed81284b4f8b21c06d4aeef301ba33770de90398752a16cb2e7819e97bdcf60ee7297bb9cf38fb44804e89126aa3431c5899913323a6fe4bc795fbd28a7204f14d5d6ac15d9de48beeae844d17622caa64f8bd09992940a34498180d012a080de990654cc1cfa643ec940cf2226aa6c321f55d9f4c97c33da92b7803624822a6a42c926f351134736fd6c4556aa3ab3c8c8cba65be60429a51bac5aa7305b24ad1bb451d360763185fca06f107c9e9a71d079f4becba2456a2b5709bf7cab3add3873458a08ddfd8c15da737eb14b77f320d0ddf603bcc70f7ade48ca50db9e69e7a39acea212100b1e13c717fc3d160cdd6de63c9d635120017a164d9951ba69d114d9a63446e9671a010a761eeb7b90feb0c36700a5ed7024ebd3fdc2fd8fe57977db3e522b6a8b3a8cd24085b648815235e88c1bf3516dd18f81429344e8ccb9982864a0c840690148eb6fafee95ce6a65763ffaa831ad22c81d511b250ada73e28e34119a084d73ed42f1cddec070b3551fb8f7de4b5bc248d1121cd09e942f3466a1bc3d8ba4e4a0080423bef068c018637c0407401a3017d8c8081a7cc83aa454b1f59e45529ac0369c328da2ea6ad7d18e56f72f89eebaaec35dd7e10e7739e36be7076eb5d65a291330e3cceefed626dfdcfad3551fdf7b6fbdf88e36875a6d54286dadb5d6ba7bb5f606fdc423e9c2fe5defb5eed616f1d562ecf866dbeb5eeedcf1686dadb5669dfaa4dd825fcf5c5b1a1760fe03f0a86bcbdd059a54a96c3ec76b4775c75c5bf3c61d04d4bfd606ccb2c5d65e7b31cb477561ef30d0b9a882db99737de2505a270e7d9f814e9c5be9c4d674c74989582b165b8c3bdcd94cff6619f0c519c0d7bb00ee3a8b475203dbfed89cede541fe9f38ffe15294a525b2cabed4223d2817249088b24577d124cde5bc44a9336c31bef8da0f7f9676f7def7dcdd6bf3b5b9ebba71def791ced6c5d8e2773c4a00578cbb3b7e5fcee1da7baf9d3757f97bcb5f3f6880d079e2d0bf4366c4ba714e17ad310b037badc516dfb0a6a9cc890ee203efa0ea581e8b81ac98e9a2ffb1367d87c0ae97e6d3ae5677568ab88412aa2f871c1d9a3c2a3dba1d5d39c2295971c5f6a0329839fef34587a6804e6892de6895a1d0d2d0258b4dd5a9dded3adce1eb0e2b02be0d1f9b7304efa79e3cbe9fa0c8a2d3853f64c5400008dddf789a38f8343f9276778e502fb0e9d36eb4207935b02908da549dbaef0dbaeaccaaabcefc80aa558d7ceaebfa9aaecaf32348d5b979b6eacf5a6d9b8c61536a3b8b032240faf3e6d27bedb5b44e4a85c0a3aebb1e312fbed75e8b5b3930c6e3161affdb1cf666f17410a4432b877ef0419374365352f31ae8f01d11033f63a6272951ccd3ff69e203fee97fbef781f998f1c7e7f430e30f3d1a7f7ea8d1d167f41d8114e64db55bb170c721ddd7d590fcf4f3e368c0c48b0e68404031d3c77c131ffdde7bde9bdec77b53ccf8e38db50cf29eac32af06b94c6949a6b4848962b0f00c2bf529d525f04997a1fe7f9af878afdf93092016f3a6f1a7898fe951e30fea4dafc79f263efa4de30f6aac46473f5566b481c662c6d3cfd8e9c94a741a57f889c4f0ed670c665c614c848ddc65b5e8195b0da131ef69ccfbd3eb6fe2636ab26201e64fef03f3277f9891c807e6f5f8e330a313b992d28c7d4f7ad0379644c46600c795cf87ff8dab21e2872f8e3f4d5642be37c08c7da3ff10bca483161290c0080828e646b21a037f0361f89ef8e1cf988d22f48c4df72337f22838ace08927ae0001c5660c1c67d8c008e48d2b214f3a0a74008ad8f6746293ae8489302a8cdc76ef80b67fa92dcff9526db9b32ccd5dfe9f3dda16b7ddce2c90bbdc0ea167ec8b518f21626b2ae975c6aa7ba9cd8ddb3bded029a5994ae7e59858dfd1636d3cde717a403bb1dff75d91cedf66b0f53736daad084403646be39f9f4ad58d64f71e88df7b107f4016b06f36fe7a3d56aea1bb8ff177f82b1eb5457938d0b604388c360d14466048a1a44a9659851525d00a17b21c2a5a2c41a5cb8bca143bc69e45546c9bfc575705a84fa0e9eb30d7cab2b5d28aa3153579d42edbcc9b990395b591d9ba3ffdad9eadfb1edab17bf990dd3171ec9cd3babbbb75777cbff53ee77cc01dc970eb10b3e69c1d189a7eed4293768bf806d57d4e95a6eeee292dd6babbeb52d3afd74e9a47b06920d137c370eef603a8aead1a7a13a7e62f56bfda04ace9efa2698efb9c3a70f69c9e635b8a63bbdfc0ae6ecee9445d5a4439cf399da80b0844eef6f3c2137be64fd1a6e9dbf84251a9c6cd26e79cd3babbd3b0ee2110211073cef994524a4320466bc2d6dd5d8736359c6aebcb192e639373ce9965623e40f5cc9f36a8d59c359b7477a78911a29c5c55ae3a73ce594333b392529342ed396dc2efa2e7d77040d35dc3017d8964ba68fa3694e182dd657ecb19324e3f43bf85195962cf7c77774b33cd3be79c2fc345bc41ee9936bb37bc09514e6ad800a1e572bfed89e306cac9c511a29cdc0c5c992ed6e608756d2765a48370326bab03a7e6e870777fb97b8ed8333f2c82babf740f29664a8aaeeefed994f0a9c0e431f72cfa768437f90a8d85cb6e2160de4b1d20e21e9b89534dac0f268e03aacb1f05ce1b3971f368bb603a429b94e83ee3234c6f986e71a72c924ea93a5482a8adeacc9cb5a6d1b9b3a90b0e9b73fa4c1e75c7c32bb0ec39b984aaaf427c2a80802a6277c7a30b2dd056937ad7908260d8aa04f584d600de07534968921629025a3c7640d77b04264e71cabeb4c8e8afd0f54711e8fa7446679645672bd0f48fd4d6fd99ed1ec42c3a9b5436a3b2aad37d47c11dbbe6b8b1096b157b6ac8a2453270245fafd677363aa3b6db5240d5b9ef000a54d88c7dd9f785d41f41eeb76e36fd4865a6b1a3494cd77d26f685625f2af6a5365b05ca151402ca0a72bb87feadca94ff1dfbdd75bf9cb4acd9ddddbdc87d95eac8e8febdf712b140df9fd5286b550bb4ac957c195af0fbf96290a446d44ddd63d6fd693471eedfd7be3717632fcb75b526ce7dfbeec20b0ef8a9b01913e2736be58cbfe5300664d1ca8117ad6e74016787a0b5d65aeb4d6d5d8bbf7abf8ac79bfb42aacebdeff365c350613376eb5fcbc274ddbff7bdbbf12864e2dc6f010502e36b6ee917b96333aaf95683d496fd99c354982e6bed93947683fd03b429c2e689b63fb7fd59ce5aab541f83c376a8bdf9a83a64e6711bce6bf3ed61b64f6ad7b6368f48c8d74ed5b1ffda61adb5498093daa0671115d9b6ff20ea89a69452eadb8ee4cf8ed39e37d869763bee78d5d638d3f67ff8a83a33063a50daee5f5bfe4e18b160e972c06a9546699d934392182ced00071f324aa33c2c9bd46fadc0691c3a64c1840996e8e24b962f6258ca1113608c313ee2e4868b45670489c89e208a0eff51cc222a76ce942540dc14a229b5ec82f9d93e805af9327bdcbae777341a7bbaa9d61ec01c1d3ca9244dabe3f8e0f1a2fd736c0e5b553cee0a62c5ae55ea8a8b6edd1c7c63c70be68327bbf6fc9c244c88be400229cb134474a0073d2460490f5ad04390ed8a2d78e0441059ec7b551a7543ce983f2fcea43fef0decea5af36b9e0da0d76b67e786068db4d9ef283a4e1e2c1026fcff1993870c59cc07ab191a598c99af4893a9d491aa43c200942a22ab3ad4c3cefefe8ac0f8c10ad828f9d95258683205e42ffb9d8fa3bd74d66696719aa16140166d36ce59c30608adee059b4c01ad9cd010caae2c1565cdfb39c28ff5f2fc3839e10c5baa0670deec6004587256e4a28096fa4f3bdb14d601d1221ac8b4de945e1c6d93ad4d8168f0c49e5e278c11da955eb5daee567164d791d4ce0a6b2bdbf0a5880aa34db36711153825b81cd9367b2ab1a5dc20cc4a88f744623edbf8c1afabdfadba6fb90bfff8fbf8a37a22b1df71177e22b16e948001664cff6452659fdcb9b119bbddb7e02edc8d1230c08c09197f7eb80b1389fdf74462f5bd11087cefebd80402b1d51c5709a8ffbd8f7ff8f3c3f1a7c91c7d6aacbe3f11bc52a17b5a4521e0b812f2fdcac7fbc6186561c6bc2e8f6e44571b10d0ae586e4e80b6c792c15dd8674dac3c7c3136d2212bddf213251d96a3ee688908f0d19110dc328c8d2d9e620b8c315e210b90c334c65364d918a3f0a84aaacdb83205144153d85453104961b449fda112336fb086b4dba087156fd0867e3778436b7d835d884514ec0627f8596b3d8badb5d62db6d65e6badb536e7b0d6dae90230034b2c306676772752539ab73d9d050616955860e81b840ec26f38eb642da1a9a52c302c6b495377f7da0171e3b62b7e2f3f055a3ac06c7dd511525bac1c7ef1153ac88f9cb3a784c355d13e6aabbef7e1705870123a45856edda0e9a77055c75ff45155a4e9832275f07680a903ce20834b4e342ed68d21d75eebf6c4a2192db6af0a6b3d2cfc3185ab295c0e04733dd18033006fee1da9fc85dafa4060725c0e772b4ae1dc15c30466ab7e0a078299a5dd95979c683e0be48dbde9ba3f8114ce61a799caa5943660026f30ce53c19b8dc1137b3c0687a5aee0be3ba6705de06eb8a25860bb5c036da352e52ff758ff7c061aa5ef05dd3df4241bda50f4248e93dc2ca2298b09a6f5f277ede38c9b942a66a93abaf658d7a77b63aefd02883b8cd8e3299b0e343dcd700183dfa8edfad92f06c3f946362f0c6da35299b2c924a66c5e15da66a2c9c1844668fb3a95b289480e7a8a0a7d2a21dc16cca9291aa5b2f91b0d024dbf869f08029db3d62ad5bf97caed68e5f81b958d4ed96aaaa8c284dcd8a46ce078036be4bc81352132d5b5fb7bfd6f06d640142ab7a96851b99db2cdf2a6e30c560c52396bad527d6da1f0ed946de2885750d5da50ecb193e545ea48a7c26dc1da789b916d2adb363ad2e45e8849ecb1421c962aaa64ca368f7c4cd9583bd0f4c9946d7b5fee70bdd6a9e7a108c4ff3e78a3968a32bfc6f62abf5a0c510d394a69d1dc556fa2d0e4cd926b69db78a251b94dde9b0d0697f304e545e7108d39b5da8a8942e7d476fd1a25688d2d89c9c3fe7db246093f598345f7644d15f9c99a28bc276b9ad8957ece23a97537d2f0483e854deaba236963d391fcb68d4d5e1a0b850fb5553db5c6b62b0d202334fd7c83264e8e0d89a12e20def3c6a9fba45680ae50ea93fa17cfae64c69c41eea6080edb90f0b2ad4f07e38b5a8d748b05a63abf3012032341543cd00418b1eac5aeb5d65a6b9146a5408733c021bbde324657c7abd62592ecfa5ac9aeaf52621745b1c5ae4f65aaa850d0b0ab942454af2bc6984aaea31246128c24c92541905d810ede3921028c316a627cc36d8c317eab2fc6f8c6656325318caf1477cf222b465f918a366104144146287d90115f3e78a2657210d23340f870c40b6458d1327b161d010513475ce9c0115c58b1b96beea992b16711125254d9b2c90fc20443556726e14394994409b6f83b0bdd7bbffa56761cf244ac1d7f88c860d4c20b3f8c94e6913dfdf7768421e7d2ca7aac951d7f087d709c017cfae30c24becdd8e977c4ef4fe38f6afc69893b99283ff89e8afe8c7d1f38ced0c407e6c19fe1f41fcc833f63e44e02c40f61582bbbb2e35ce18f65fa982712ab944846861a7f68194454ca68500c0b13612316498352e5ccab9e482c4659c41fc452a41d9919516185204a2c17fbca9243eb8185756befbd1863ff4e4cc17a1f188a26187d8a9141a56662a85eaf0f645857e216950319333e890ed9b3c6eb4e6b9b1251db8883c6cd8c468b9b1a331a2d6ad898d168610384198d1620b4bcd084a1bad27221b96ee0e8018e6bbdc31967a118589449adbc41b7ba353972e0b06837c0d1a1a423a704392f252f0018014008464210610797a7ed18816675acdf995f96d8638de8c076bcec90e085041e3d7250438b1e256451a3073b46304100d7ad97995f9b850e4c003cd7ad97995f9b05093c1be70e63715dd75cc346fcf2e8b163a6ebd6cbccafcd8287c7ee98bd8e677eb1cd62c77a103b218c137cb8e511721e21cb23d48382100a6e4f10f21384ec094229fcb0585e5c722c0e8514560062fd7af1a070565b2aa820f6f80ad6fa955126b5658da8b0421025968b7d65c90112a40619001d000126ca08015e228450c20f275bb8c384615c2844b8a43c2acbb2949144870ce370ae3cc2398c9625c6951857625c897125c695653883e105250ac49063522b4b26b5b2fc91a13c2a99d4ca124a599665e964c2c25989049430a874a26d44b5b274a18523213f8ccaa39249ad2c7566488022530b67e1ec1454629c937016ce68344210a184009cca12e330aec6a41c026351ee90c04367f48623134683a8128ca3326a4359d292da5096e10ce3caa3f24938ab31c138da839e60088c45599618e764c7f5c163cbd16aa1b4b651a96efe55315f38a3610e9486b3274a65ad2ccb12e3988441e12c9c695ba8240c0a67e551c9a45696e12c0c0a69e551592b6959ceca70562bf374520e3161e1ac887076b29540027bedb5d75e6b67560b24947ec802dd29a54872487248724872ee5cf0204bd2976bb1c54343f88abd7287acbd5fecbd82830ecc64322d3df460ed4562af6c69498b9c962fd77281c45e2dd77281c45e2d185f8b65f66ab9960b24f66ab9f65a8cc45eace50b0fec901d1a721fb243766828490b167bb94062afccda8bc45ed94c4b163dc864b2571837283458dd5a8b31c6d6fbc05034c1e8538c0c2a351343455363a33a7922adcb895076159dec3a652cd41895e56c42194974c8646090f52c1c74648d37461a2e1a37ad9b1a5040a8c962d4b06123e7aefa40af6afd6bcf9d3dbba032b1cbe5acb54af52f9b91656143a481e01282068de6fa46ec375b8f02b5d5138327cb9269b0eac60d1c2c9a0ac70eda7b9196e34f1e4d32612238aa666c92cae80eae2d96b5a597e0e84822432693a18fe8c8f920e765f302400861f8f3b7b05e70215c98b8ec3090d582101144d8a19ab163049d191d584a07b01d12a0c8d4449abb52b95ab763ca1c90c04388189a481369325968344210a184009c9eb8ed18210026e8276e3a624ff730136028d5e9bc176932a74ae9cc6eb1c3d31d75504c57fdaeca84759d155dae93e9d13d41732a95c93adcae54d6e59e20bb9b09a10962cf7d135ee73eaa4797032fbd452a9de12e477bd01b260e13a7db82ca64b2088047889046fb82e7b5af914d52195012a009c6f64ad8d9f111e484978ffcf211aa5437ff39b6d8027b50c8a08aca42169d798f10fa5b704414ecf63102348c0db3a4e9833e7a4215422a134b9126d2ba1069f426fe508ab414c49efb43e7e29f97e8ce6437a4f0e3b542de69111d61d199bb2a95fd168751994cc70c0b88a3c20a75052014489024bbca280e73d210ec2ad26e08f202c02be6e6d51e34903103e903241ba415520d120d920a2906d28c6721a9906220cd20a59050483248ac1149062906e984a4916090461a4830482624112944a27183142281481fd24d0da40fc943aa6103c943b201021208ad104c3da9e12e5bcf95d6551aaeb4eae1bc52494a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4addd0430f3df0e0075add5edc65ef0343d104a34f3132a8d44c0c154dcdca0647035ad5d0a862a43e9031c386766fb899144a26e634e357349f9d348ca9bef8ac1a3ac3393104316ba499e53eaf1b69a86a709bfe38dbf52ba5e5198de9d5441452db13b168faa47733b3678dd4b5a9229fc32e7eea0058a2af77839adac44d1c7ff75a9778e1c5b4afaa42c59015a129bae0c106422f770f8aa795d680bb8e8669eeb48bbd77895d62c4ec41d3aa040f3cf0c0030f3c0c0d0d0d0d0d4dfb378f3bb08ab327a3b410e00e0ce4b5f168535ba1d0f77abdb61d6f44215f02de0051addbd06d1d02f42177f7a16a021cb87f2018b239eeed62dddd77189ab89a15b2425ea928ed405b2b343fafb39dcde9d6f6b2d0e761215add6128ed4fdf473b6b9d94a8744dba042d642302000000029316000020100a0704e2902049b33059740714800b638436766442988683912086511404410c8310468c318418e30c324534550172c2fd4a2ca22a0e21502697386856d96c8e98174866b2c61a8b23d7afdf69d01f888788813b8cea6df0c2f3caf42055ca66e61a85fdcac03e4c0c81ca13ca2ff5d0dc811c723ac5a5c51753922699e8b76fd88c65901d099598256ad52db85ba062029bdc616f31581b2ae08a63f316a3e6e0bf5779e677188d450d27c7f834b8d1deb67b6ce969dd49349238337c2bb66860cebe099d000d36255ecc0bb10de39e5f2cf5fa974a22c4b86519c0a10b770e37c5ca610eb9bc43d358f1ea605ffa545f891f645b9ebe1cd26dede932215996876b8364593dbc0c7276fa48ae0afad053705b9f747fb3aedc6c52799aaa4dad93a98d9e277c172cd6caf720a14e1343713b33da7a28bc6ba25e7fe5e7bcbe0532b7b358ada9d0cc40beea7a57c35141a83a1e1ab540b5b8a52d67b9143af3d3ef2ca0ad753085e5c5129923b3d87c61a35034ba79d225ce3e0a7dd3d17748745421487567528f018dd83418335a068441686fd07377dd783942d95f06c283d236daebe08153261f915a1a30c827dc25aad18cda610bef52d5fdba0b2fdd6b50f3becf53358e945e3b9476bac8044e6f1258028575b2cb400c745a6c02d05f307e59f8488ef72157cc97c1dbc18e8784fceb784dd99042ffaa5f5ed6ad70b58e3a18abcb379c543c1e1612baa5ea803192d603154c3395674a60dee44000b9a899e66190e44127e398e52ac4e51737bf425071b4725c9fd4127662f30b530a199cf36046d82a99836f078b5e2578f9d09eaca5a566c80002beb6d2013c4c0404325ed97f6d48f5fe55e85902dfcbda25001b245f81aa566aae124052ad8e8a3fb80d5b8667f464124850e2f62011179c59778de3d9d3ad74998141a4661ed9e3c296946a3800421fa94176444c2f1ed04f4fa0c3171e78845f649ef47a124dda2b5adf6ee126793e1a88215c1582d1081795404523906e00cf50eb8579612cec06e69e08a3819a8c30e2ca1d2a4961013be0d77b15b14de9690adb577ae2607fc6c5af402f206731a9d46f773a7948c28b7918ccb3f5ed3d6140809dd197b3d8362686b2dde4014acf9cb0bb4bdbdc0471485815fa1c4778801933c2c190f0bfc8b87bdd61d941c21fee9e52924743aa2fe177cf50ac49e7a43fa186499928c27504875efffbb9e9497912ec9de58d3a45826df2f9eac392c9fb627d549217738d807d65607f9cd8839f51ff753711fb46f9260e2d8cdb1a164725f0b9b45fc23378d84555b9f6236630b03c3b913da6b425fc52f387e96a89813f2060ef0704ff63ea675cff48881db12608302ffe12fe61bed740ad6bae300c0e4d91108fa648073bbdcf41947936c241d8ad92c1047fb80e6fb3c416e5556425dc141e4ef72b7d43ccdeddeee89df47ab0e049c5b3dac60c33cf4d1b9a8ae104d2198a3f6538ad7454c2d5f3d17f59ec4597f26da2e80df6e4277ef7adafd543711fe51011f24cffbb77ef32b2502cb771668daa23bf75f9e9b31db12ee50ddc7828535d3bca4758c269742c20ccc88d77033f705e86bd8f8173d268fefa286835e1837b50578b6bc40780cf20ddf299bc3ce4394f90242fbf81cbb27c37090c6bb85a30ca43d37da6cf3c7c0f612b1ebf7799b88c6233d97b53dc22ca07d76eccb350a157d8a41d35bd7ff9d08c006515ec3060d83856809fb7575786b44f541c355f0705b69bb1bd3778efc8798b32e616b408de588b8b22a4f79e5517485b727851a0bcf7e88f1a8b3f4f221ad0097b8c51edc0aa5b1933cf34b563043f4109e1cdf0ad7f9ec1b0897e4f0ba5570151f3a7e381e289cd1ed316357b55a5287240e075834634552f0100790cc49f7228f5f58da4e431803c8934fdb5ab3e8d81254f03563301baaa015979dc0a2813cb5b809bda6d8ef0f365612c26f3a63ea97a6449cea192afb1149abfb395e4fac0aa61655c518a016b942a14fb055b41e3ab5d1dae3ad0cb04e49206a3583b8bcb041a621e309c97d357639b139b8891047e52e58c79bb01868017720b6977891831f6e3eb719b134394c9d650ea5fe72a7d5375360325a052364d8491abc85a11c782e94ed804ce620dd0104a1da15542d2c06a08fe2b8c4ac3af0c3d4c516ed9104b861263285178834963604a59b7c07b81f7c8566480a17d8dd18479280132d2be7a90636324f61415db7fb716c66fe533436df83218bb96984e4099e1c792867e9a685c106f04fe3a7752d70a0baa37e309fd248a289375b3915b2b14f2d5b22c9d8c9db460e3c9454eac7e5882d8723939f84657b3693776c27dd57c655fe63c63e9ef01212268e8c93d9d60d7f093b153737984bae72e57dd80ca5e2ae3d7dda231dc6688846fa6c6f83b6106e0e1c515b3951ce99c3484ad0fc728a1a905a40607c9626cca705eca1fe7ceda15c10b8064efa32f3ba0e8c23210ba65898010e48a87dedbd4ff69864c184b1a91c82dcc3007fe82890005ca72e6fb4412edee020171e8fce8fc9a5c81ee5f0413e4f03e79399cb35ee6ffcb5962306e8285e6d871df80db262e82448fc9066d221ddb961bce4b310c15ced45cdcdf248eafe89d5a690ae17eab52a3829edb0071ec611204c1a89fea42960a29b73fb1ce3acd06fd81f9a4ef6510c16d83e2e22c14aab904b9fdc6fb32d54d32d371fe28f45631037095ffd9549c4f0b040ed3d7f265eb7c9a2480b6f6fa1b5315f0789331e0026d4b09f4ede64e1b57325068b862d01b9a982a3cf95924f58cde02534d0bd0ecaaaecf6cf4cbf9ebb09899c910ba4d4411bb0f0a895893aa0f46554626e5470a0c0131df94e921756011dc0a6e2c10c3b9237e23006334e84a018d4c8f346a04817975d80d4401feac7f704315703c9b071ad2fa6d01762b002c91e56b355d383b20e8fd65612e0015248d408360ed078f97e3d392d53d8356050231a4469080bc8f4ca11fd4ef41372c862976b7d1a5f829d29c8ae12debc6cc8931abd388eb04213e020e749fffe476e4f936e79394da8322aabbce7a51448dd789aca23f3626c3eb96fb5b1c8cdd74fa60d7fd30a6745b5904c99fe1d77ee60bf28dd881495f38657693fbd748ec47d25fd797db092d30c00ac234a0bdcd60f97c5575848ac7766eafab6b1ba5a47c826361f547c1d07c12df8ed49fd4a4cf7482ca34f7e0ab0424f3bec931d1dae1ac18043877a595a52ca61d883278bdf008df6b9c74b8efaade3c7e298ed2f4cb4fc3b8baadc34ecf7096fba9f59e4e124fe84d33993d428bdb7d621667784886b20d9c41fe9e59af818a5ce70363a7dc6e2f19440e15ae428490a8b4981e411cb0d50cd0e872fd76eae34a886cd8ad2276d480b8d3b062a3f3aa75ed3e57ff19ad4cdae7ce0394076f9ad41201e039da056c1384e91cb6e8fa482db720cf99f59492774e3087393cc269aaa17fc6b711d12bae0a78265f96ec31c249ca4492b46728a7738fe072f94dfac5b6782eb87503ae6e03d77a38774013dac1f2734ebc7020fe13101a69581240154411542d46f09d770825e9171b577f3221cf427fed8c93d03fde7e7339de08f20385649354f9b38ed40226d2964c9cf57650c6902fc59806ad5ca2b04bd5ea0814e8e2c61353edf6bc05d7293802f243dad2b6a51bafcf471212a4ba9954323ad21721a4d070e547a6489e5ea1ef5b493bf4f83c61240c2d2b318d0c9ac20b4d58ff8e67022c70f480681fce29cee73d5cd56e34e1814dc712b6ec5cc5c9b8eb55de842e649b422141b71405ef701396e3a53d566e5f9be722140c99375e2b1cdabcb6cb7d346864b3a2b6421edde3af40e72eec4b99c914b56798d5eade2def73c0903b6d656227e446f30db76ce1cf2fa834a499fef24619c31b26fc479bed871031cd5326804d6fa07982120f983a20a17bc717804fb3f6487a44bb0ab886fd729736f6a2a3d835baeed0ec047be5a5f6fd8754cd8f8535643cd44c8fcd349a63c75ae51bb222a0fcc574260feae6985f46a79f0be1c2f9980b75bbdd42845ff82e45382e4031dea96aff3c1adbf65a0e882725e68efe619a516340eae949b3b682acc7099d85a18565c4c6bd809417e03e657f10cedf3d8f4233a8d294d72ff230bb277a814c7a2555fabd7abdfadd3a9aba9a7a9bba3b05cfe4cf41af55575faf5f5757bfb55e57e2f125d269afd7d4efacd3d4a929397ef6124d9dcd3a5dbd9a92c1372ed1d5dbacd32831fce117688883d3e938d028054804e28866725f5a01096c6e6d0f7d2f2b22909a98828ec13813690d9d325c3630e6a50eb3897bf1eda42817c5f535f251f673134a6bb19201918a64299e2513ca7d70a5d5fc993accc3073a90a777dacc8d0f5a771a9bf6bc691c53ab995eddf46aa6563fb9f211a3ce34327d1c08e3d8bad2d8d43159a9b4991b67964abb09e1d3581e31ed32e8e67c8f43a2e868a92ba45125eafc3a92d1c1edacaa8e3451543e045cca7d3f5df7f1769c4ef771bf1ce7e171dc2ee7717c108efdaf5ffc6fa2deaed073fdd8d60cd92bd15655bfc4a7edb737b5bcbe86988ad171a0ed057ae293177297741fb579594885b178a489092abd2401f0eb309b7ee7dac966def803c96997532201811940bc5066ea153524e788fd72554b989c9799391a0b06f485dda831cbe0334b6f5cd728420b6d7224eedfe16fc4c9fca62468babd54c338f2e7c546ca6c13a038fb2f2314644ad4583fa14eeb5f996826ca5e92db270d6647ffbe9cf5e8d972171c387750d7eb91ab38f93f33efad0930a280d563b24fbc5c4ef8fb84db36c0a56d593c748141ea46b0104ddb1be19b7c2e61e9064dcf0f762e921fb2d2e94c9a37146bca8d35db2e805ac23b9bd5c226299962e8db4f06a0bfe98666327d6e7606398698c6b83522b2693bb9a0b176813e045cbccf87520d2b06fc856542a8b033885f0106188b4e120687712dd2e3339f89fe0b5df5cafff6a1740a3a6bc1eef86acf8cb3549047cb3dceb0af58d218e164ccc4decab26dd1e8fdc05a1cd61f7163d7884a0afba5893130bc7ff3d112c2f0dc088925c5bf67c526a3949b462900fa378da5c1e683fdc6a14bec75c16b63fc3e414b422fbea838fe4412694b40a0f1ae45722e3af6862ece817d9f8d72227825eef38361f7cefbe42e88b6146fc942414f749dff87d15184bf2e1d10288e927595f201b466d5b809a9ca30f65665e8c492b34b48a35110677da953bd5080de42d9cb797230a9703652b86b9f6cd91cf21fe4c9f4ac9f6e3ba795e8ed92962019a2686cac10025ecc39049fd859daa272e3081a2c8bfdc53955c4a481e8f6d31a9e4f171c1a0ba8746fa726c10f4cfa486090fc2c4624806d8d40dde88416fd4a63ba744c0c4eab6cdc449b134718607f71b9312cb2350010364027999a3f3a0508553df011ce7c602a4d240c709b588dee672c57957ae47e845df6eca16ee5229b0de341f481ee486dadfa278e276dc5c964218b88f238091f6b9b2c46f84dc649b31dbd7a7a5c6e5d497e8b34dc93e0da313ef403f9f60bc945e83ebab20faf1abae3c095a3e70fb44f561f3f086a6121b9b297b0758f0392bba9de4f7cb39a3d2cad76eb07adce12779eb108c2402cb641815a2e1430eddb435d40ecc66a191c1b289f2e78b90310132e5c011695dba319bdc58d5954ccbc6a2a3f5cc788c583d19cc3a051cb8b725293bb00ce73412a29d0b459658670856f98e6e9e3705e517cb624e9c0208baef6a76281dbff4684333adeaef4408506bfa23f3a8ce5d4105dbb06d1297aa54a1c7efb6b78dd2c09497d4bb6a5498dc3f460ade6c2afc8f97065305e000802f6365746b8a7ff36da968889b7dcc36130143311da74a3e322d88c1c9938653338cf64a97f38f5760b00e1f63bb6020535c28f45a6214ba71542473a528d4fc0e8deecc208311bfd761d8f7caececfa497b85bbe5c07cfa674392f2eb1d90d45031dc9831fbd601a510cc908a7178bfe66dbacef88d0ba0bfef21b535174c5bdc8348dd6385a30b28b4aa61f214fd160900bd2045235206ecbbb85f7cec17a67f2205a5fd4bcb16e01f780b440e367785e4bc3a8773432aa2922d0d0a1fc0829283da41626dc289b67d6009816d5004cb33a76554ec946401855de4ebe458a16c2ab8acf42f4fe01c9de09b5cecf14ee2c9e83b90632ad4cea7d2c254fccf361239969c107e231252c9b1d1e05dc4d245e21d174bd9789d61b5cf4d62dfcf1ad977913bdcf598236b1650488ef24ad320e3ab7a515d70bdcf9fd2b6e28c352457fa60ef7073592473d1e38ad529921d8cfb7db116e2024d9d8cefd0ce6d66bc309fa72e255fdc23a539c83f44817aec1c03012eed2eed6cc0ca236949423b12552a1034639c7e8b2c3a535e2cfd6656bc611c0ae710b3522bad97f57ed5766a4649c7f0804e784cf85d1f2a298543c4a0db8a7fd08efe90d800b44a08831a8d3b371fd7b830285774d0f05afd86371ad0b220d397d284d6ce68d03527d1f24729f10207eac1094b806a7064fd28b6ec21d664500d800b29936c7f7a121caa28736d6a4c17f55e51d07a3617bc55f46ffc68cf202c74330b6004c647e22ad5448692492a81637688ce0d317f8f24674277e75969e7f330555624e06483faf711177d2a958ab62366909f6a579f5112e293c1743d2c957dc237f970fedf47fcdc7109b28450ccbd2ed52ed359c04cd87c2c022b8b01b07084156a2f35ca4965bed5fca6ef9aca36edb824aae5df3e1995cddf5ba5511311e82380f22ac727b0655c222a5290e0295531d2b481f85a3737bd976d3692ecb95fdc915de0147b36369208d7296786f340f0dd2abdceb57746fa2779e2dbe43a9f697bbc05c8705ffc0bce225dd1f5dd680b149738954a802b2c52ad7c48d5eee78854dc75ff9fb3c9340ba78680599e1d722731dc5e810ff7ff01bff97dc5dfd8fbe272e3ba8b63604848ac1dd0607169b30562c709192c23f275ecf40274baea715cc46c7ffcb6c1b32cc229be49515d07580f5b3391b898b6dc0cecd0533a6cb56eab9a37963c7c2e896775d2c6c0cb59c35a70b9c12827ace57c6ee381f7a3db819eb6763e6a7de3ef39749b870ad34af5b81edcfae5e47933db36cbb71925951825e7f3d49a082b19a04122eca3f6eb60450d08a161bd399f09d06ad8649a8dee8d14717ebcbb95f00c889ca0d8b70789072d9cd02cb9f8323db09a5a14abdf3ca0cf190edcfec75718831e213872326a3b4bebc010ad414bd61d5e3ad9afbc9d209fd605414a3b80f42d0eb666a8698c29816423f9437d5bb7a5f935305a95b3dde7378584eac78b9b30acb6d64a58d0d1e84ab44de4b883ab89da0c212fc326881e39970a04f19a3e354d610ddc08e0223b7d9fbd359d6336f00263295a9b60aeb401f20e1ecc79aaabc2c48a180b5bf223f31af9d2d4544cb04bb216f5c927c2361cefc97376f33ba9d5d41a0c8b4310d88ab4497323057d02ac96dd50009eee678f183536b7cad1467d927d2916e91f55d58cef959c0bcbcfe8557d5bbaf4da5b71d3061b98c9da641079d802a2160e6ea94503b8c75e0af28ba718ba802c91a33829919d63aa43244a9c241570da74595da1aff440a713b0b6beda2093304822b1a10f83a432b831a9934a9f94ab33ee5b6cb8524e9477a41796b15274cfe6d72bcd40108652a96b36cc72dc8b71f3a6e3f58693b456c7a20fe72273a40f1c3800e535fd399c739435b82dcfa673c08b978886810a887594e8fd5eafc232375d3292fa06b0b52df20d5047dee9cbbe27b19a9a8a81fad8924e53f212b15d9b72341ce662296f8f9b6515c7ad2f964ad7869910c4e0eb30f7c1568d2ab9a93833e95b8e9dd5db9e4d923de1a6d3019c40fcfdb3a65601c65a7514e4b46c4695a943a5f02bb4c36f9e550fbd18fec5803113bc958a6e3e8ec38dc3353dc94171d2822976025ebfed87ea0b5abe3b0afd45b00791df9ae3f1111de0e3ccb056e101d371b6d5fe1f2dc34df157ae6b979e072fc19ab9113dc17ea68faf0e27989b459cbfd098f225aab7b1f1b1665913490235d82fbff49923f6a360df8a8f92955dc610044e2ad33f1aac81ef83e384afc74a3c1da906fb548a0ada5a5aca40e4fe33c69ffc1578e34049b56282879a23bace25093fb38043614fbc85049c9abcae992fd944331153b9de53ea8ae6694acae6623cb9911a25d63cc23e5ece4f59b23c900c1c4c010120bad416b02b5674e4c75626b6be603afa138233a089c206fb654869c2ed9663dd90953d69261ec865a6ca025143576cdd018e8a06bbb15172ccd9aa94cb9ffab993ac462a6a2e276ae384ca2c4f1a0385cb8249de3dad629b6c326912f4fe21d2b517ea938d230504d3a10c8437873822bbf20ef9df0c75b0b57fe566c4a6ca1d6c39caa71f950b8e5467a575c9d87a2f619ceec2a5e84e708bef9f624268939b9a5d371bd48723b57fb31d668d93ca9f9b624edd3d9c4d0302b8d8ffd043ac95b23463e26c46dff0c607e8145d4ad31818d54d55171ae65eb2bda9f486ae42737b91408d4dac37ce566e664a22d6cd017bf5dcf41add106e08995f203ba94be6ed5c2b9ecb7dc25937474cc0fa388c28d0b2ad44574341168c42a53d1602a4f2d58e7dfb1689810d244d662af66ae06da52de3f12415cb74f900034b2c0f3afcbec728fe3d879c4cc8cdcf5605d5c157ab0fece8f5818b6989540dbe67f0ab934b1077c0b2bbf2f448f3aab217df6a7453bf8a54e956585597d55679b84298202f35442afa9c73c97ea76f0792c653e8a2b33942c7720a1cb7572757f85bbea2b7662ca1216a40480f6c795ef4f41be473af3893e97fb84a7f950ac999b0f2ebfa0a575663efe48256627d1d44f48c9f194e2f8345d40ba32f1c9062f4d422c04d6b0482aeb10881142cbc35c4430a4644296ba257c0d25ef3f1356f10367581113c7023bd66a1a6effcd396ca4514687c9fb88ac5ec136e3af67d3cb1f6c9d525fe373bf268e3587a7a6b9cc670c7a30e2bce60c5425802f4b6da4e29c663b4eb4bd32b28e505e090d14961a23f894c611b73a03bf6003c1efbed0fa1cd564cb0346330bbdd0a9b5aad5ded5d3766a7ffa870dad0ef9531e265f5141fca4d85fd418e537812b68ae8a6ab8d19faed6c81073a90645efeca52a4ad9fa6f8d9de1e8364f1ce429fd755f53aa46ab705f9b833d3bd0df88a78f635141e4d2c9254e1c129af94b7b6cc1b3579e65cf702dded87a756e7ab24f41749bf9a31833822e9b5151a4d7c4a5990d206abed2603b29a8d663cfee114a18f323fcc8c806d8360b3830a550e1165dfc627f835326467c8c957bb1f4c028fb5b22c276bb203016924a9290d35f82089826174e61ab7c19c8ad02a9b1b55bd8e0b8fabd3c6c64e7741174d9e886a910958574240ab0f7a61ccd088411f267243c09054c5d5fc7f619152ee210a1fc4e7a901d119a19f41883629692260e1b23d238f1e23173e1df89596ed1ea665c0030593db114c64afc936e9bd6e420eff78b8e5a4554640d508014f09c08d2b44862f4577904388c0a8dc2671eb4a1351052562a43ddd5abce40ccd62c9323d34f0dbf82b311b08f1ed3338685e868d6321cf0de1b0d9324f81c0cb65bd2eed7699d659a04eb140a08eac62d7d7a6ac2239e85f3ac764755f89deb9816b35755d8e688c8dea57607e452684df4006c559cda29dc0b63596fd19384b4efaa8b93172092e55a752f1c897612385ae335dd502e398a4e48b7088f660611c7f55ba3079c860357d566d29b382d673b900c560e049f4ead38d0c9af20b80081225a84c25b1ff9ac689861d52347e896547e2dc45f9d6a01c87763c2ae187ec8773c326aa6df6236be004bb425577b5e50d30d7f116bca8a7c6395b190cdc86a7769f84e4398eeffbaa4093f96163da6be71a7e8614ef4eab7430ed6cc7acfca808007a55cb626297896ba982fe358ab5fed468ce141293c0a32020abaa0f755bdf522235072b18a23ed3b0280517f566d4e6df4a8bfea51a051c59c78a9936c4d9dc68af8432a8ebaf732e0901544dd90d4fa45e8c2a4f2373d01a6163a69fa77210e10992d178ac1a5ecea303ef4e9a483131080725f8af31f4314bf81f0da48322c7ae332d6f62d6b63ac1e0deb71283cc28ed8082b9a5e09516cf577eb55f05142cd684427f7eb902e8461a41708f50733085d3ce72d75964b3669fddc57a8d233a36187630cb1c30bbfb48fb047d64f5102505b723e4ef78ebd8cc22ac82f580330edfc213f70bd6ddfa1e4b1c6b426c100aec1b4e6cd547bf3eda6628255f2eca1e446c5adbd32e20f19b05f0b617167c03f5645b31c2a00935517583159a5a5d213449a61456f949785974095d1d83a7301ca1da6fce46b8606d80119726cb5c14c237d7e22dcafdb286783a7dc63eec0fe25edcc201c8b59a16fc382afcbfe393bac11b0a944f8a11d5f399bfb2a9717d557016b2a8ea8401faceec0dd332b0aa30c60f6902e34b5fcef9dd348a9a825db14512fe4808a2f71fbc087b268ba4ce017061519b8be5860616816e89d05ba6a269b2c6b067f6ab1c3056e62bbee6e530544076c7b8d37470a238792b419e726c9d730585f4d819d86ed5691c3a31938d0ef90db2daa712ffd302b5b804d4dbc1e7f1d83fd9bdd05b25d067013163a87e8fd2cdd3c646fa813b8831b9550e074cd4b20d23bd99e516461fedaf0f7fd10f62ffab9df5cc3bdf615e87747b59a4eaabe555884e202d660a24ddfde20694ca78624274c429b4cee802306153f2189f6d1c66ec4f98bfc4751feb3641dd676e90362343b62749b5f726d5de1710dbb9088212171e5b2c0e84d4b54c7a4196d660eb6052c20046ad5a79af2a1c86d0b28aef33f018a057e7c04d5e9cfa0738998360a3e9f8231686c10e517cf6ba70dc749515a80e50b7260420648f8417eecd2e727ec72bbfa909d604917a044294933afa6db7dcf41740b75db5f78e23fb34af849f9bdaea7f94104945b09fbb62f788424c379b0e5c8054b9e2e78b801ac0287910f5d88c138a20d933df97a986486450c6d96154bcda77652e47395223fcee7248cda8cabe0edfc8c6e7232eb47b9c0953261378f31d37ccc569df1439f18fb045b464c09b873b0b772479d167306c97e3009e3e643fee1075cba6ac8b01289429e93aa5c15e961d59eac8118d3800beffb0a3d49f660450591d59244d0d3aa5578b4320098357378b3236ce2e1828766829a3940692af4f9eee666b9d7ca5015529ef3b064ad91ee892af915921b37b84c0dc853e06bb1065377a15ccdbac22b59d1c558d33b29adac69698a139bb51b88ce075672fc6d5718ef12b940e3751e006aaec91c709a5353333f09711fb995730394152f5f6b4d52c14a737ffc1461ef934d47b172dd557ca395dcec760887e51cc4f15533d75c3df69ad6fa26d6253104276e5e9e9e4032291e9b0ef608a257b70f773a5e3eaa4b66e45e92b9ae9c64495c93c36ea59192fabab56ea3d7961e7831316aa1bf31aa5aeadabb39f62396ba7e734aacab30cd9a29cdd08e043da6944186682b996cfd32223b8d51b1a02e1513dfb063a76460af6008f81ab9c69fdf75d9b6bac5baa8b9b55af95ac4d09d4827564dd7c38fa5ffa0077890bfc287fe74c15df09d2aeeed07649a67f6d313d45efbfe3e72ac44b49c591bcfe22373bacd474a1655dca47cb0c6f234ead603cf22ad8ef5ee99cfb7089dd0dc1d00c4746def1fdcf0c7e184ab8eb45633aee61a4554094b89c2180d90ac84d774f041d7aa57e3c5efaf295574233ac853d941e77a363a2f2274138e4c16cace41c05b92932984eabd8241f18fe3207b89e7fe3816c2cab3a2b0dd1fecb8ce7ed971f57598ce3ef58971d82fd821966f4fded78c5a60528e543d996639d73a320ff5ada00626b8a1df6da27186819f1691285d5e80b45e8b792adc8539d8452d7c6ad22b38d9e140120186859e6bd0bba8f2e1a37c87cb7d584cad5da5ebec68e53cab166af7565a0a57cb20bd97357ec58d7c4e351a6350cc01cb7719701f7f7f17a207cb7442bd9b76378288a7e5b067420b2ead8ab3d0619e213cb19dbba36ecf56126037a76d6cba0ad13fc0bd0dd7ea3f5ce5ddeca2a0d9d381f823b2aa9b1045d84d20f5dd40f905e2f2096e764b075087c516dbb186772fb9fcf0ccbdfca5fd4e6e32221e107ed7acdd660fe839fb10398077b96a9f2001b640d0b2589a940698c906e5aea7b1cb2b013e566cb2040f860a8e2680705dbc57c09f45dfb7f4978015c2199d6dcf00914b719fb6562e50dd017dbc72f2ad60c10baead4e32e3d201d983423b2977da75043e42badd4861cd3538f4e1918e82f37ecbd54f5a257f0f5695b159af6ed29fbfee520657d891e2b606cdf6c28ba879024f88ca9c5f0582a355a0a055bc5069c7a6f4aeb105c8e81b9708da2dcf047ed1177b40b2b2724e97df609e7d34877ebc6fae22a2e60708167a025ab00a8afcfc49ba4a6e416f670487c40e5209a5f7e659d1e2bb6fa097701a7aa82c92fc17eb2729a6009d115b53d1e2c1e4dcd61ce5276359d7039cf81bd8b4e16429c5a434e380f9ff19685753f211bda77f15c58a0b0f7dba575740530e0b1b9ac7adbd5cf8231b75d76dadc40469a4bd155aac33d3014dd565790e11f04ac79c1e3eb07bb5462cf11dd2cae49e82f11ce07e676d8cc725f2a98ca554b294de72cab5db53c88b1e8e8c57daeb5401abfe04b33ccab9702735e7c3a38e43db884fc13766973d673e69dc124ad41625644bae61aa8e3b7869e5069b628cb5a815601192bc3b57ea72f3b4c928df4edcff98f65a178a747d2a024fe1c5019776f9b2ad8ec41ca9aaafafdd5e56323541a1ffc6566c77f1698bb4be2be4dbefba154a6b534a39301b7980c0ee3b3b4a341c133cb1c774da5f54b3b9f15f59ec29bb437ff5b2cadbe47851be63fffdd7a332a5d3b9371be0f37cfd277a90b9f3399f2702f7c73b414e5ee85f4975e55e31b2eb1fb57fda46f521bbcc6936a3119cf9ad954e8d19c9b5be5670617820769ef73ca8bd4e89b4a29cf45bfdbb124e3a13001a23d6a73d29e81150e7e5c80069fd9115d8a282ea026da0d2112f11b6dc50c5c1d100b539afeb0d087422d19034a24fd06ecc9a02112522ac625cd204b81b5f02b01c679a3e31d94480ba66ae1e6c9cc2789ca04ad234729266dda3572e04f0c45f08c0751b60e5c5e14032e9e9a44ebbd695fae3321c8c08cad87aebbcadcdc2fe7b6d2fccbde760598c8c2c140e9a2a8d75d082a4940921c3fc3a7bc2d144a3e18cfff00e76f47068d91e41da436f656691c4a001392b09a22297641d7ae50a34abc334b477577f767e4690b6ae1b1037ef0305580d5959f4068aaa88fb32f962b220509a94ef172d54ded6f2da42cb756659b44ad5ccd45b751570e53122f74be2dffcfb7dd90aa29af2bd518865221b8321afcec2404d027d0cf85b84bf51dc3af8ba74c34b92d8dc107ca537b18523708ffddba1a7c7552df3a785241b8089397ae4fb161fc6bf333edb936f3efcc961e17c83fac7a88e016b2df03a3387c799f7f18de5377087786a8f1c05a9267393c42b5f9708a8b9e02762dc6eddc9b8740ad3121fa5509144d6894e4fba500e01b0dd16f424a88ac7fab34e7fa91f7a8e578b3c735542e4cc22820102b5dce51419c8051420a57cf887c174c3af2705119a9708cd9cd2aa805e5c913e239b121f4abd5c618e626ec275814ad149155359107f482d5c358f879c9cde29e7bada823cdb1057d76bd8cef07e572fdfc2ec255450bc18b3205c0169cbfdcf067dcf33e9c13e9aa31de92beaef3b1ffd1b77e03bb8339713a6e3be94f78d54e8f17291867d70df95869e10ad9b35ffef2a80031548aebf898d2aa855a44c18fad105e1ab697c287013f08324cae8a5cf948657468cc70d69a802e7f386b78b804c6410b3afc277824471a0a95d6ec8a8bf412184bb3e70d30334cd11081acd55e6f9c26dbb9c8daf6925a919c0cc704529c94fe0d75a51a662c854ee0e4e8ae9846861972ea0582180b14ec207b0c457feb1280014cd9f445eca13ce27aa113d6a3c9400409b50ee1ed3bd035cab3afb48f29fda9eee826bc8a35a3df38165e47e0eb5a8db050fa383ab3017ce50a1b933e4e8f58c16d36d61b75d4a5ad71ab171ee204b2cd211cdbacc90e899bbef76679a3916c02b2c3a934722722e9a76da05679bf3711ccd795cc2d954941e904c03feb6879dac997d6504aeb8a2419ba12318a554ade3827187c4df2c93e0b98e2434663e280578df60cb0dab8c850d7982ad8539724a241e6ba2cefe6cb610d6d62fc34011757cb64607c8c553f7205d1858e698515fe61a68a015e2fbe1420ecbd69e158d7ad21c67dac2279c7edc08f1e8978fd55398a1860d6d506b8111c10f61f95ba990ba22107a120c6dc7910fbf329b0543edf897c5e75004378aff88dd3ac81090f1f4dbe90a595d3d08e5f39d14d389a66fe02136af2470da733c9db154061966081c33de6e40d721c2a49e6ac4d57963b8d9255c3c82c81d3b66f08f2cf388947b8c381bfcc3624441bc525d6ae07a6035f5d59bd862697d382e4cec7a355d6ac26075c50eadb7587ae8cbb383b47f17a1a1b1e9782dd9682fa8dbb12c6af00bfcbfc91add166d9a6d14d0587442d5cfb1eb3a734c883b692d87d8645cbc03b071c5e36f957394758d1a78b81caf1171f169ede81bf21f931abea8d4abf490682a29bcbbe79ece730b8c5d933dd909af9068c3f52f7c1d74581af4f0897282f9ac0220640f06d0937045ff81acba10df1e7cb62b236e1336d6a50181e2112585fbf5475d9f5a0107031ccf4807cb7da007c41e6206a567fb3584031f31b76b0ac010228391006ea262e5ceafb1bd7e88f0e0b537b0aa55c952721a1d9bfed8b8ab19541a202b098b4654dce3d740ee29bdf893ed9d14a0641d19fc0d4569e1a321f9b392711a7f49e1bc6dc14f3eb72d8a0e494c839d0bc77244ebc19d70d9211d5ee621cac7b7968e2a7ad3bc67cbe1e1369597bafd3ae5651d99018a7b6cb84bfa72aed0123297975b2981aa1751aae92f6d4d5c210fd0eb98472ccc639aa5c464541efe2de4b503aebbeb7555fd2a84c2038f3bfe9a8bdd2ecfc1b338b6cfdc5aa8f2482b5e6262158ff5ac45dc839dddf36e76629bd7861e6080b0cdd1475933120dadc3d631563d158c05118f45d620d57ac34ab50d7261c8a11b7f720538a62470e0a4effc499bc095ddd3337c6a81f40365aeb4a2a98fbe3afd85387c8cd82afbee13067c4ab01b372ddf50cb4b8e87eef54f54dddf49376d0522c3ec93d6b90b59f667d71c365b79e89ed486e5f6fbf69e14cc467783055532dec9b173d69d59e7b41fb138336141b54f12db7cced2899c1784d6db97f67c0d18053c7a73ed7813c49b73388cf541ce6ff3058c4d67db5ed2647d0649b704886422d0105eb395e6c93bb84211000ef369f87754ffde307fee2c1392464ed84eb129494f78c8391e49508d7c89ebe9323ba630cb605478bb51c6657f99fbfd0e2f27add890d6ab858d229c0f39971371bdd3ec7dd505cb1858ad4d358f1154108233f7ee3ddbc2368d49da0ee4a77c142bed81e26310eb5de37bae23b16f4da3c336da0bd2d0118c10599cff46571014f887bb92b6d8d7d3891e94cf1e951fd7d44b45f8dff0ba1ce302abc28fb837933a3894296b01ddd1c61afea2fa38c903256d9fd2f4595ba9856243260fb622a9c0ea6940d115577616a0dc7876338caeefce9022705888292c7af70bd98dea6e0dad8a3a61f81ff471eb0614e340b97a0acfb05bf49f311344fac69c2477bb60760622b42025af8fc471c2e26050b106ea038d2708f393edd6c0c90fa6eb7c2166d00d9e527420e3a520a77f0e470087991d6065148c4ebf1b5ad9bf143f3ce470b28279548e504fb5be24fa4e00c5d803215047f83e53c300d0418ff0b51060fc4a9f038f93222b87447b12794cb98599e82c28e13aa959a961c7fbe2906699486890cf372be42a49de1b5203681fa2415151ab5ca3ef7f64ba94d2e847081cb232f1219055830f261ae55e57be6bdb283a5bd96c55400a84278927dc1f495f9957214b4e00841710e08712a4ba5a04109c7267e3f9c2aa77eb3de04fa7d510a008c882d95e54a57aa3da30d2ebf2aad3ce57405dc9164f3583f2db3d656788e9ee05345f99642a035db23a7d4d3bf6402baa2ea387f6a97f85d23dc72d0bc0971c4879ce2586c133c5e5ba373b302f56d44452e79eb4f7f4bc92bd3d741752786896bad114349412b72709da1d81361e43f1ea1baa1872214daa6ec7b5f87a5d8b2fd76ab5e5f35911ccb1ae7375075e0d0e8a3fbf34f667766427cbccd16c4ccd3a9c4b3773daedcc5ea8ca29325552304e23b706ca1a5866cadcd222eb042a54c09e2e43ebcb99af6b81625dc36b440eb1eb79cf84334322c52a44f52c6c3e3497dcac649c84001ae2da56bcb1be261257745dfd80e5f431e86bc92b58accc5a8c4e0c82b3044fea12535808bd798ba77377966614b1d65907b39d0a7b395c099425c1e842bdce3c8fcb5a2241136fdaf523d8b76c552d37185b4a60359d5184220a0d9b9f9c872532fc9e98bc7719f905c00bcca46e4d469a1b2aa04411bf6db32874a3a1a94aad6992add1d4ca85c72af36df9facf825d22209446583d977a30e07a5ad6824dbc9be6f7d66f89dfb4342efc8fd319123c9028c48248184faace8f9fd7ab4b6b1eef9dd32358b33b5e007260fd55b47c5323c207f64ea0588675d512563f7da5ce604325420ac3326c66c542d66336b5a296ab844c3d8cd0a918394e3f7f4d4a50be2514b3cd63d222982286c2b4a88e10136339fb08c72f867a9ca7c157a74ddb2282bed47e0dee18d8fd3048d0d856597bd19344c3e442a8c5d19e9a304b8a3a334f332a12c249a19521dfd3b017c84addcf77b9ec3c892659a404603df85dc3b91dbcb7483b24508c7eba2b2b92840757a2c7734d6a03d2b84f026f79f71e9f0c989147b0531319f842b46e860675067fa9b355ddda5baab43e992d0028d1ebbab48e58490ea7a6e59ad33f851be3b7e99d118b9d3ceece970f83bf9e66e4a5170e5b2870dace34b9bd07e1e7fcaaecd5af3098f00e5571e52c0d717c15c1e821942c7810b4a9d55a2c0a432f184b2a4701daa09e1ba069e4d9509ed5c83f713ca6d46975a0506cf05a90a2f46480e369edeb50abec4de23d6827cfefc72d37efdb688c67731e0c68561cc6d50727967086f48a1a00fe092a095a417b1b3299e76d7e49b26cd3f43cf42a23278c5487b708daa77a36ae36390a5e0af6d50b8b048b289723c24fdd4644609a4b4a7f9851568daa49656b0c641a0f7f11808c7626004e6928711a36f7ef5a8ff9efad5fa4a909cc4b345e82ecb01e9f5429bc935c6d4ca5cf3047530c454a5c9fa63dd636327325fcc4344a89db7c37f27d70adbe82eabcbcf365df45579cf95033c74fd094d3e631c85adced52cc375f6803264f8926939bc4bf8158275126346a63bc736869047447b6a91512be7263676b8474a120396268746789fe53158aa59a31c329818fdef31514513319694b5758e20005103bff8340d0365c3a6a1c5055b3839f365eb1129b8d0fe498676776cc0d6f3d8f9fbc43ab6f7092caf5142641f2f6b5d347549f482a8154cae9a9054ef98f9020a4373b6bed121bba388523a5690e28847c1ca709f090ad97594d9a2519b311b7a0cc6572486bebb71a79fdc1d42fd569b0ed7bd96253425ffa4f126cc4068f173c068c685f4cfd56b533d26a75ee1e60e408432c67591615c9775efa22ee12f5e9f1d80583a3c36bb66baecd1c515271bb70fca33c305a64c3d5a6c031ab7cd10ba448b3e88fde44d15722dd71d698444c36f8e81fe46a110b7da2c0be1631242db247a2c2243112222b161d21887e84a33116ee8a0952f96157fa847eb906c5855a870a9d4e832ab472039b4d079ad5a3c1d549895108a5f77699a42760ed680ce9eb61c730c08298c99764fa8b88823f84442634eef1d519f83d4b94ed78ab54c6353ce963a6c7a20ff8ed5fa0ecf620c4950c92fe89861e9877f63ed468567fe4fc79a64f47b36e6d891dd5a11a289d89fe805176a8c1a82c1622f60ca36aa08bf1322e95b92ee61fd3eb49c18a57706ee454c35ba28dc972148b16c454a6d3cd5950d5574ade5350657bbfd991bbcab31b310c5d1a0fcecbb768fdfa621a2cc074d4fe22c55b1c6b26c379efa74997ab986f8381f271a72474aea2b9fee5d356b71cbc98c62137ca0c5ded0b682dc017cd67c41b6478964dcf4803d0c55d0e412305809eaf4f1ca26847c7a452e482437f5193ad7d19f167777c955a4445e57ae3f1fead41657d639074a912c7449042b9fd68e94a29c0be37a8bb1de7ce13d59391cf7573b56cd819db1f504638027bf092a971721373f6e6e60271a2ff4feb7f7d50fc9275397af31a81215b3cf79321b277f99f93db4161429ff1953909c1dc8004b10a920110513703827545060cba8e3e97db8db87f12230b910bfc2ae39f7447ebdcdf8a2ee0b0eda1f847be7205ada5026846b140be8aeb914d3c47fba1ea6423388da1dff81144a983f0db8bf87c55dbacf84530cd4509c22eeb268525ea3882989a5356860673950254b0dfbdbc08c0a1e702396779e95b11c226eead67c92621a66ff7c7189ad03bf75901ba2528f0881ae900fcb8d37ab47f6fe06e3be9c43587236c2d642de14d99805f1c4e569064d242c96c5d370327c1c3e029270fc1e3d3e49e8cb7cda29939195e8c8b15709955c00094c1f04b129c0951a5ca975ac5d1aad0c9030d955e190cdfdbd970095d18c319c7d5ca35136aeb2a701e37e40b8166a3706f421e49b91f1781e7b8f1509934932ce72a8febc61f2107874eb55712b340628001cc7db577b820a91307bdc87c09d61f747c1ee8b28fe151ab1691eb414fe807505e721fc9fa21ba51ebf9f50ba9a55f7c6122038b8763745ad156414270304db2e4a35f95d13b0ac93517b4c3e2217022e8c65a54d9da44e4a91d2d2f0a72c54422983e8284a155ce0d182f7384a6ca28801382e484eeb344632c4a63abdea380961a65f4d2992f202b65704f368163d758804514993b96c3dfc9955e353d02b594e376887b1fe104b60547a5fa2799753312b6fb9d555eac772064df1a2b7a31e681f31dbaf05fa903fe3dd112e62e9fec93684a9368139cee6997f593f9c154a5c12b02373e6e301b2b3e56d4cda24b756c3de3402a9ca979b1493eae1733b570ee6cbf07a48f693a345c67314d05f6f39c40a703dcf14b866dee443f2f3aad0219aec4cac2b2372be619b69beb6a00ac1020804420ce8f9a63585eea40275b555f29e5af47d7a8101e6aac1184f050eb157a1430f75733113c7b3d4c5760206200c408b7174aef0e4c049278e8951bdcab8a15fe4953999c349caaf1eab00b04eb2afe96f0ee46d84b0eddc6f38d90f3c8ad2f20516f40ed784632be4470ebf753201bc293b22b1467160862ee3f11cf0b850fcf0f9152ebdf1fff8334a27d228888851364da3057819b62e0fec8969f9f63f85ccdbf2134811147fb0366114619b3ca98b1cfc191f4e185de21cd0794dcd6fe5d90f8c101a3e1d6134db68a4916616d4206ec77d4a1a3ccc761d0bf2aac83ae6694c29210b75df37b6f6b37442408db26b8d36a30556bb4d99d50469bdde0fe3a844857dbd8ac60ec8ab18d7b5a6c2a37e9d67c84fe4c7b0225aa3f3616692a25b1486f44a6d29a42229c3a385274e31ad92671f1b92f24b3c4bdd7189c2c518dac36df16e32976c3a77e4130c239edd18575a1c01fc5ce5a82c3027f00b8e77f03187b3821c51ead59cbe6957c44324a7548c17113f03d0223d16a822ca414f1b1ba2a9514091bf4de602a254b214fa30689e0dbd7b7b5ac0412cf7a2e42496b3474f8acf211a2046c10450593f1b0d7586d5644453f9257d7120d20c289c64d7d5d2496af930b7b8b4901d8a852d668f165d10c9b95fc566804e2561705e21551a5283f1faf73a6b08da582eaf656b9b3a27960ed392b1db68c173c8f002088f9a755556b467134e62481e45c695b56a1163c03c0fbf88c2b49e929e06368e4554b625e06b38146a4491a890e5fb374ec361ba93a1b15e3101ab7d336180779cb69c793c7eb58af40c062076b0b6e3500e98bf11d9a939ef8cb77503a7b61fbb57321976fbbee949e50c5952362650cf52ca246c193f7f8ccdfbc9e476014bd4321ff786d9d0b28bfd134c84f33607cb1510b668ff0c04d09350c0dbf21f3fd01c2d01df0b12ca6f74a8124c87c7bf20b53b06a146d7d2acc9ec88cfaab5711f4505c33cc80df76a58589f7f9849c8f723d116dee6d7d673f56fd559d5b76a448818c923612f77100a9b2071cf2b932b489aa00a28a227b61a0aee92b226f83940294036ee587bfa125885a1b51526418e26d32a2ed43a69a04054d0384f4616982a17470ab94415f6a509760fa613e4eb00987ffa1dead20f60b4a12984c68d369634a80ad2a3223ae9e86576c29e2477749256c1962ea15d024fdcca34acab32881cbe85daa7a129af5065942caa2b822c4075c2f2e3fe1f6e5ebee3b6bce6c57bdb521a4aae0f819c44277152de237f16d27d9753d80aa584e385ef3094752ba7a0e43456613aa18dd3552d9c56b386b789945c31de01341ea68f320d2281b4e8dda74d524431de4ec8dbf3dec38850b027ec596f8d4f926e357f5cd892326e522a902b2ece9c04ab6d21f06aaa38c8dc2df04f1bad9eaff4f137a826b96520c436cd49e6910312542afe80c29e696a58edc8988783cb6e6aaa168845b14b19de07e8363125c97a8abdca943b7ad546d7dddf387e8a7e89d00b1cd35ac97ce9c7c4777f139c1e4a18a1423b14838b892a96ddc2d8ba254aa9c16dee5eecd0e0ba8e820dbf1b80f0f5f3c868fefb5699ae49507097fdc97ace03293adfc5731343f4a680d8c3891de6b30e8eff323463a64fbbae26a0b18e1d63151b68f5c918703b26e9430f3ea5b6b26b8339106468829c81caaa7d4c7ef00662de87a958acb8de5c600a1bc43cb52e3b9429fbdc01f9a03584c7bce957902f564172571407b36c19a5a8ba5f379cdf2b8620f3eed6b2e37d862809438206c6dea62b8d06a9631ec638f90a8e0bac7ea7b6be05a9106873ae975f028f4f501ba3b90c5fa6e703140d668ca30d7acae65f6d61b90f719745b2f2f96e5bab340705d7a86acb9b0b93e5ca3fbab80419c992dc6ae784aa197fdc2119cfbc4570ce58520777628391c5012b68f053d24514d3bcd353590f90f00b07c12a71e7a8484412502fe806505449a6461459cb57c0c65737f17326aa1351f9d2dc79cc47d20e2ded2044f5616d85f72254ea3ad85a3afc346a610eeb3379c81b1174f923680a90da52be45a28a21bf87caf3695281716007d55f90115a894f623c210294ffc581e1a9f121f0ab62504d68f466c7e513b2509fe197163ee3b1e8037e0ed6a4592801fa928058502aa96cdbe8cb3bb49635c3593b8547092b7eea8d721cc21c770425f0bcc7b6d58d0fd7928c1e12156b045eca58d6454390002a8c81d3d51478e3d536202765785fb51b0a63f4a09bb6ccea31075471dd941fac98e750b3f0d745a4ecaed30680b364d0b85d2a38961bda81c879f132b3007aa1ef54a0bf24866512feac7076dffee45fe6cffd9b6079dc40e16645edd49f4d7aa3046fb97282b3aa7aa27d26b812fbf7b0894da7552ef0b7c5ddbf3c4f2ef4e6ff061aaff61b5873afe5b90829aa085d3f8a05fc351e8363a1c3d8b72e28b2ff4510d425c68972f33e0795a96ac208e2b7b688fe493c877e4208c66f83e117e152da6a5450a5ee5d704b84ab1a2c022c6b76e0b99d90011d55ba11daef8019da1a7ddc3bc4e6e87a9a6c5a60f66642e3263d383e13d5e4f259908fee96f164693d10089c55523f834a7dacfc69280b4daee4b4182603d3b81aeb62ad4a3452fd6ee6c11378ae2fb0b9184e31eb3b779ce4191ea6fbce50f2ebb7a83820e20a64d0b18063eade90642f9e518729c59b44a4c992482b644cbe7cf1833c462970dedffe40bf09225da904810a08b62f0f7fa422397f8a4faeea94d1cab261feb2cbba3479fc5dbac7d491fa7c6f87c64e73ba37818179d9ecf67f26b2a11d2f9ee8a831b33b2b1c514decd1dd84f4921f61c271c79aa319d62005be196c11e3e73d0114dda515208253c4bfd2b5a5224056bc62cb8792bf684f3d606a15018a851d1fd1abab9b5bb0bf03b8ffbb4260ec103b6f9a6c0d3c8ea3c45e47cc42ce0a095b34aa4a9a49d9c53667b8a00311e2adaa00e90000d40307df35ce2af54da7db0a2a587036d719c34df906c4d24ee88410a43ca074bf50cf90e27323093a7e3e9ed5e0cd7ece9f203082c0c53b52bdde8ae3a44bbebadec56d1eaf9584dca8c8db71012f8a98af2b1021d7c83ae05025d8d9340a8531919b31086c48ad4347f751c7beb82dd25027631188327384aa4d9bf81d53373a649b38c5facfd80392f9977f72f924611781f59bb00f0885beb756b158197a23c92007de7ed39e8557d1ecf49dd4d14b0ddc94e5bca66b86320bfba52932b9fe1ae755ce892378730d30d07315ba2392bc4e895b0611f589743ef50fd4ab1445cf9aa79ea728d0f1cc8ad3d3634c21a61b05fc0b86ec5fc14be31bb66c17f35d1367a5f62ef307645bb407b6483c9f35c300d8b485b24a5ceacbd73202884f9205d6a6511b1ae94ce2da64e1011a8b8dff57d35422c6b317272c40aefb414212ec0aa525051f6131eb443be03b9c3faa0d3c2026f413b307a9d56c035abca7e8d683d3ef29c76d39dd69b46a768a748a75a3b8f62bf99038683115c08a43c95317ca20976c3b403c2c26a621cec62a82e9c4c22262e715b6969bece155b1efb0e60fa7913c3c30ec57220b3758e0ce5b400862126317c25bf72dc9a520ea97610d9bb0818190d657c2ecdf5ffe10e0e642f4e92b73d781769864f381b0bf551199436997af111bfc52be47b542b9e1af05ff9a81349c705d419b49d255cb08b9bc14ea21c9397c2c593ba02c4db2e619c78efb828f6f9b2504d1f7bf6c19d66dc3f5c2e159c4b0fe14e1462b728dfc7dcaf355eea0c3acf8dcea7b51ef2116f8d893e9e302bd1651cdecf2550d3a9e5f5fbcb9aa0413877e817e671f05123008ccd680bf681fb03c15a7261e935ff1922d6479139d06bfcbc8e8400fef123760c0f6cab874c634d382b3854f103cd2d9c3fdcd6ed5662540f944cf92b11408be17e161356ff97fad0b0d072a310ac3301859428a686fa60feeb13c6126f7b4962b51f7b2090917f7b9b723c9046aad2ab907a2f9430762f7353ce3fe41a5348199cc65e0cbd4b8a011f3ab0d8a75d475359b60e82f4b2ec1f1db37f0055124a144eaa9feb3bb839674dd0043bad5dd8133c98b3309bdb396dafa7cce84982be34aad977dc24763522d78812c650c77a8096aba59b813baf8b3dfbaaf5315207c2b3babbf6f39cfb404a087d846b972cd8b2903b6d0f5d925e0be91a082ffab11f960a16b38c794f9001619b4f50f7430bdab5ad81e108d5de970279be24a02858582b17ea00ce0c1631bf121864b5a4a6b1c6a7072af5db140805ba08918a832b4cf530560d1d96fb20e24c35f3acd99a502d11e34b16222fc011e03818e69b3df611ae3ee6657d528d2ffc006aee6a7bef91524ec03e033b61f6a10271eb84d9a4e1c3d06319c40345a528e64444a251720cb68a18cf2fb9e7afebc20c2b5e5e271fbeb8d8e1f1d23e5d6b1830e0bd8414e5d89603f62a8d30066573d07ef38f235e89b110b8b24f048939385979c671b1f68708b6b68913b6a0e7b13811dda9ad33cd893fdc78f2daf96e3999bcf726774a58ad8a4b13b1d1811439dea2a45a1ddb73bb686156109905a81cdadfe62a9e50f8cb2713eba56b2a43e5b1441f24d0843e41a65483ad23e417288af689e2518f440f207184ff6210c97a9f0177cd34bc7715e9f6fab838ba4a086b911bf9e6ac38f5e91682442e1fde277b6d04c57700defbc94723c7acf8e94391aa621f279b64fd94fb21f7cfacc767aa8cb618e764321cf7497be9b37f72b1831904f4ef032a2273a35a20da15a903dabbe387d562b8beac141333e58d876ad49078a78feedf2bc8bce27c058301981390cf18a372cf0aa0583f00a046c5b113c0a170b076a052a1ef83922ca5fe299bebda12f08d8cd24a8b108bf6de7bcbbda59449ca6f0afa09f909b5ba153c70eb6a3b5dad89e9148b59a9e710dab7f3c89ece9ab7046d4d6842a1200521b77808e3a7eb2e7904633d7505ac0e42183b3a44a4342122b4671322b95dce2644707b3621f2b34bbb9dc80e8820a191a200410803053d8cd8e991ef9e36238c6c0a13ae4461c4a28438a16497a21348f66ce284ceb7671327687b367122b64bdb84159a80b2298a0503895ae7ac73c6105e2f3a7f8e0aa041f40cc3babf3a87c760dd4f5dfab02707ecbeaf3a00ddd997ac64e9438e0cebbec6ef257988b6258f7453d3d806123975a7a24911284d8a00edd9a488932deed9a4c8cf9e4d8af4ecd2362932830deed9a4c8914db18fd20e6b8e66a34274c708375fba0e690aaef025950823b8bf78bb71d5a9e82d7aa2d4c65fcb16bf4e5cd15b043d9145fbdbf24b62caa0fe561c3ddea3952c5219ddce60529104cdc29910415dc20a0da38455e5e9cf0f4bbab4e79c298d9e3edd99332c4f9f0acd9995a7f42b6dcee4a75f737346f4f47629d8fe1ebd62fb635fb2fd3f22b63f588d9052623b6987ae0f26104c9082ce09f40a4fc90f4484234d0ac315cdf26158c515cdf2a35ef9f0963469d4f96fe7e10f0c454f54d1f62910ead314ecfaf48a5ddf97ecfa95885dbf1a61d7b74aecfa35ded55f6d1fecfaaa6d825d5fafbc8a4e7d5f764ed8f555740bf8a5a764d7ff44158d0aeb8f34e955f449fc120761d7d7a63d7ef925b1eb87f8f51ce91b4d77a60caadd3665d017e94a9b32e8abe89a9b326849cb49c5a64994348b4d1fd3f774a7cbba844dffeab7df88864308e74f1836227c368dbd74992d507f96f5ddc9a69b06d9931ced4fcd016b8368cfd2beea00e0b6240fd19e3fabd831bb9c53b6a638d90b309f8e3c9e4611f6c8bea74f137c10ea090baba63d4092030be9123d3958e834c8d5a009d73252c0aa5e41a1568385578a1c58e82dc145102cf4c4aa9d0851fcc0c2af0645108183d12762708385601584606128564d77c044102c14c5aa7de88e48220856b50f33ac408921988a58f50048c0450c1666d1f50c9ac1aaa640b42a6cb0308b55d39e223458c822ba5e0501ab7a3a51842584e8124a088251253bf8818524b16a6a83190e1696c4aa5d053598c1681744f4c0429413414a100e16b6a862b070e574ae6811584499b2030b53b4064d703ca9154e086ce284c460a18bd3e9e2d38428b0aa1d0547b8800b1b2c5439142b7471c309f12846c066142362b0aa553158f8c2e97c6155ec1bc10843b0aae9135338b1b3030b575de848e107563f8a11b04aaeb27f9844758009a4daf463b0b06a991844f9410ce6c384c2065cd860d30b5092aceb891302ab9a2e718221dc6014ca100d16be10abf64210840716bee720091c1bc5a181a64038192217a30456495274ad9a0256f5ca4f096e301ffc044982602d88558731c4aa7d984f34a9edf8b0120256c9e905284518302d901a060606060606fcb0f70203030304e61a080c08f5c13d2c72ebbdd7af833bf4dedbdd7befbdf7de7befed94dc077beeb5b63953daf73eae2f0b4e286c1b7f385b395b67b78638ab35c369cd681b4b71166ec5364b8a6ce34471167e285b68e3ef71dfac2067e197c971edfa583919241bbf4c6d631610485bb6a01c2ba4899f202d5bc5ac987db264cd6cbe0fa7d52013b93cfdb860991cd5503522e39f7bfa343164df5dc2802b4f3f3bc85dee4d4a979c3d7d6c50367e189ef8cbc260c059d92e658c90762993d343874af51366b54a478ac5f31316ae82983e3503fcc860938167e31c197a367e1cb93963daf80f509b3373e31c191fcc978d2c260db539033a61052004a962e3e923c4c65fca2861db10524226c0507dd9c0116009d365238800390204d5571773d9b062b2f0970400dad88301d7bad9c0d5974c8e8d2077c180b361c574c170845026a7be5a3cb5d5ea71d611a6aba583c9fa3e9d7c5f26a7b64435c8a54cce8459994c8ecc8e030097dbf83d1db4f1b7788474309fb47abc081b3f0086367e9e9c5d92ad9e4dd636fe106605acdcc67354000f74b3868236fe281b1320b7442677367e56cd922a1d9df7dd4f98c7eaf961fffe84d9bf24ab23593bb585af2d4f3f2bb95ddea01ccec27f132375033afd585879c241d133d4c5ec0f1bff2c6f1429ab5dde200f4d3fd764f34893adb6f0d5a6a0dac2569b72a79deffbb43ee99c8c84ae4f3c27243567b1b07c7d50872723559f744e3b27232394e8f4233afdb470ceefba1564c3d94c3fad5c6de1b7a4e9a70607bfd4ca260592108c5de472fae46cfc612ba865ab2f1a6ab5853f8b1f523174af48ea6823539b76b491d909afb863e48e36323a5de779187f1f08e68432f8d8c8f013ca6013c571545111e988461b991d1d233248646a9367cac8466cf278c36e999d15911484bcf22ba38d4c6d86dc0c40cec22ca38d0c1267e10d94a915e40b8036191e67c9d4e68824b280e07fdff77d9f8c920bb046a34c8fe94392681a6d6496380bffa74b6f1a6d6494380b7feffb234b066c999ab370a964329d4ea38d4c2b575faa1cd692ad5ccb7bdb0202f348d498c3330d914b999c95530bea4f40a796969311eac28e8f4e0bea6403f7c45d36c8c67fba9d70b5854f3f4ac8e5f491d9a92dfc57cbf0d416feb0525ae78a4af9810dd2507317908d3fb4221d6da0a1565f3630e853bb630dac6d43e804793a21b4f193339256dbf8b14c0e0d357795d3099c0ddc9ca11bb76c30e064727ab069e009491a789c856336484309daf647d6380d46dc55c9ef7368d0a181c75d93b5f187d357c2d30988a53a9135f83e853eb22d06099da4c4c83901b4f19f70f56561f84ffa8626487060ce94a1e32e1bdb7117c9088fbb5490c8d4dc357afc324adce55224e0c659ba9451b2f1977429d3b3f1937469b26dfc235d9a721b3f8b2e4d421b3f95d1256a8fba875dd1256a67ddc36e912e4d46362677380bbf4988a79e7c72f5a4e32f0cc31fd2e68c69a8660adaf8cb9311a15dde2015b234dd72dbc4c3b3cbd3cfea565f7fca71d74949b6d5d784615cce6d5cae8270b78d7f362e0db96ce3136de332ec61119253af7a7f316894924b999c8d4f4d4eb79313999c50d4373b7c9848b5c47ca4c0b941f5f47383724277e842c137cac6578aa84ac1d8f3edc6787c8cf1e3788ce7ebb89ffa39eab0af226549c007e26149462da323b3436dc2efcbf9f40540db91d4d7381e4512d457e846c13239323acec29fd23273bcb941b5853f2b21fb9f7edc358174cbe62c0c9e7e367e15087e7ffac17f6a82ff74c37f1ac28f92e13f39296138016d7c0adaf824b4f149cac6b88d1ffc1c069cbbca960d8627f86bd9dc55cae47c323958266742616371034c01e7b6bb37cdf2b45f82b51db056c19daecbd913df9cbf3ab7ddfded48db35c2849edd5d255ddeddedd91d15ca75618c0958ecf236d9dd638fd6ba0795740f4ed925b804f4a9fbd2ac6d62f0f683bdf6da6bafb5348b05cd082123848c103242a8d62db200852748b9b6b39d8e4e0764812e12245b6c51abd5786a3c3c75869a6a8db0d58a196aa4d4ec2c6c98ad32e8581974aa0c3a3668a85587065bb78edd3a75ebd8d45081ac7082ddc2083bb3d6083bbb810301b46a458541460d2666b3d91f01c36c369bd1727670684ba0311cda4c071cdaac0a55a0cd68334f0b1cda8c46842734a3cdec86d95b3b5abd9d8dc2867501b331c5060dd5d610a935446c0d119b1a88d450ad0d22d506116b83c80d1c1626dee33e4d76fd54cb11b2ffcd0ed18be1b346cfd85bfe76bc11a9fc287e2803ff735b6ddd6a2be742ae82bda7d6a195dc75962becd418aa63abc181a2a1667328765bf972e2666e064da195096341d5179dd11ef535361f75127b6b68cebab67afaf157b4f8242d7a16adf25da9542a7d772265a35f79afb39dde56dfc068ab7e78bbbf5f9ae2071e5973bf9cb6eebdbfdd7b7eff9650f555535bd5e69a4749ac9933e3aea83fbd0e147922555807c6a7d711fe8904bf07130996f084759a3422555685bc2b1c3b76f071cbc56a6c1948e7a20a56dfa27a79a7808f587d8d3f31b65a0fa5e5ecf07cb61f1fb55577d02137c79b5492cea6b40f95cbcae67e484597d5ab7ed4aa544af5a95fa5542fd23dbc7ccb67dd43921c2fdff23dfca7eeabbe95caa525e52d644c8ac40740c0fc993f6156922d267194f38787ea236f56aaefe58139f4913ebef0b3e187eefae1ac9afa962f6be8969bd54af575f4de750f1c26ec922a7f1d2f3ef53a5cbee5717d69f2bfb79a87e32afea0342dbcfe1c2dbcfe6f819cb565c91cfa49f2c7cb6bf0b5cafe8b4ffd8b56f9ab2c99e3c5a754395cbee55dbee5433aaafc75a4fec5eb687997fb0b78917a173d431b438cb6539be5f0f4f818aac5501490cf4542a8fde1ae9a71ce28d4d7a0dc0e530895511f53efdf1aaafc1cab40e1846132ad48a4d50e1dea0bfc989dfc33680a8d58f228a6eeb8e4dfac24b1dd8edae27156fd2e5cf9f14f5a7c162dfa915679d393def4a0268521e9c33785a4fcdf7b2ba40cf5a79725b92f0302c380c8b0f065b31858f8406418e8d9556c288ed21d3908793f14c7a9c74c275210b4ebdf507f684c45a1dc9bdd77745461b0013b38603e613261c7c4ddef2ea901ba3d962e72f7dd8dc944ba0943f06bdcf5a35e36fad37b9a070ba9b22f928d489565790d00d976468d1e006dd50727254029c3ae3f6901ca994bc64c19f5c318d5024c3ffa1da46f793ade9476d0d868e6a51fe9953f699637e9f0491afc3266e5519f63e551a55f798fccc1f227f2c7e859c237bd8ef04d640f49728c1efc1e3c12d9c308fcb959b4caca8400c92f93895e367ef8329597890fea90011ad3a1ae2d91fe76e05680742eb2c14e11b28805b914e16cfb4567d3ff5d8a70ee9a2253114aba9f984e0cb16d17d9dca5aaa16efaa29f2af211dd3615f1e028602729e989fca2a04d45b8971d213021a2c99e48d893c91034d6eae7c7d89412d9bf8e3fe98de25afe444da41df54573413b9ca411732326e97234f51732bc2c60c24a1f52f145a6fa17aa1ff58b07021b1f0720b091fc0104f623498e173fbeea73fc60c11028416023f9e3070b864089c15ef4c061c24aa42cf52dbffab92263b28a8ca9e5194a6400f4458bd056fdcac202825b03b4c65d08f9fb8f870a7cf1313f2326c692944506f9e3e16f9f0549fe7e8e3dfc2da901ba6f0c3938abc6947c770fcbd245fefe9b42473b7a8fa3be5cc8596fea6b45ceda0a3f09bc72f9b265973467ceb4ccd9a238d855896d57fa6a1e4ee230613219ea532f3b7dcbe33061a50fad98c303e68b8715a06460caa89fa764dfaa5ffd8e17fff273bc71c171d134e745d3156d865ff6f22e3d54aa7ff117dfa8fed353b580d5ab5ec7ea55640f4972b8bcea7b7859bd0ed5bf207b7021e75f3db7a7ba51f9b9c75f699557e9f1533a7f8b163d4ab3fc49af3c498b6fd2a3f75af484a1f4849df48499f64d8994c984883f7a998ce565f9555eb6f232d18f36074d7734ad51db48d35bae30163d612bb031a77444d661474e2601f93c2711f11373ca807699270542f207306c5a8b1848aa0b780063077a8b168a686992e74f2f0c2d89d278cc942497a8ff477d0cac4aa3d6175f90593470bcb20fa1ed3ea0f808dafe3e6afb02de0df7ec505b9624511a7aab48d49efe24f13e326bcb0699beffd46207b29298ace5b927a5428bec64fb948104afb9097298b8797d203da8aafb3d6a767d207b8e365475491bb605ead739839d2ff75eeddb279db25d973e04d9fe934e3aabcab53f79d0297bd2ed5a557f6e0bccaf93b453b6fb9c32701155f797d278104a43afada61b64faf84becf9e5c6cbc6248f979b370f2ab431cd8dd555ba2b121370bf0775e9bbfe073ea6a37dac67d873b4e17ee97bbc31c3c6248f1976a5d6ab55d1806f0e741b93ef911d69c3252d893fda15a1864fbb6f35aa80317bf4650c9b65b268cc0ac3db795f48da31013f76891f072048d4826c618227920c410930ac755903fefc234d774dc57a6e92460565fa2a61386a5b431fa2067f862059fe4039cb49190b570a4c6411b4bb57d9d0fd7d51fd2f01dd7f259db2f194fd910ae81e6bd1f6ea0dd1b6240fd106e2518a03da767bc597ed30569d00a8b8815ff854fccdc31fe882d81a5900b8b7a7d27a44904603a4d517067768b0768c159616662331a3aa8651926b381a6d6e2555910d49e2ce8e9e8d36375b5b123136bdc0010e7ab60b6a458496946a857279617a29d9d7b49c4b7396bb4012dc217b5a5c692e02d06be04713ffee5c2566ed22e9e9401a38e422d9f4f28821485b58819163d0cef360c41c5125a3ba15d2a3482b8fc2a4ecd11d0260fc66e8bb6a8bfa664bf43a1db2441a6d16c6c000602fbfb50170a10a40ccfe02699746ab229779e697166367a4405a1656684fccba608c67b1694c0dc18b64831abf000386edefab9fdcc3ee558ebb6438a1bdb05dc75fd85d2e6c37b27db56f8e0fc00940adbf88904bbba70044b40244f1f104294f988207272dd07d4dc025add39c5c9f07cf936e7f7fea4ec21bb37e587ad0ae37e6a60f64d3727e9ddf4d20fb921d154047690e72a217f9ca398bd29ccf2797a22c12a9d0d702a1fb39501af0adcd6e4ae7d0149ab91c04280d4896f442c07a0ed42120b4adae5145dcfd6332c605d9ffa77a248e7a1fdfabc3b410b8b7dab67d81c7d3097551baa179ebb8e8b8e8a274421d17dd10cff6340574c03be470249734b633505f371337873aa18e8bea3a872a8517eef5c4f35eb90bdcd92b09af272fdc2b09afdcd5b9393d53c69015e4f2f64c7a67e5ad4dfdc2bd92505ddf21a0ad6ae14328d00e45b719a5b95f2b90f54a1e2a1da2a3435c4a67960e418186ecaa7f658de472dab2b74431fb1094866e6beb2d0739f5bdb794c6bee7594bce996dd7915de7d67a90cdd99e2639cb2d0d677d2cc879d650d63f6abe72575d53bb85fdc9d9a1883e7629a134187f6973c45ae98dfe509b77f8521bd88e2ab136b039eeaaffb3afae4163b4a93bd4d7bcce22a9ae4941de01574b96a85de2b0acdb2bcf16b581cdd9558b76a7e3c2884799ce9af6765f7ad02ee9771d59f27037b501dc94eca800e677e30defbb4a6be751dc457ffe35b2b1a63165d0fa596badcf69adb5411da5a17b46d973bca475d6ec41f5b1f86378f021dbee9207ef7af7e7244bd2253fb2a4dd680338ff8e37bcaff5ed0028cd103bda4c9f8d358d189c756b7c49e49b8af19c13cf18e600280dddd6569d63d0c4cc898f136e3e403edf4e8dc75d24ab4b3adb752681d2d05d6b98db73ac6446f9ed8e8052ea01a803c480085c603144912c540186c4a72683309358566238a142a6c6f4d93e9b5cc189ed506001070b339022440a11b67f0c8d23b6fb648109db7dbaa8c1f6cfb721365004c3154ab0ebab4831a70f62982fc4b03d88b5fd71a66d7fd45015dbffb1d0d9fe2c22b65b8108db7fa45123054ea0800452ac0087053087012d07eeee5e16be91d4d8f5c39f2ad056d8882bba26b898624f31c51b6d7c1405158a00b3fdbb112421df79a38210bbbe153d11050ae47601e054a1e2eeee360a0e761de1327dd21653f6cc76541026a5734e2a3292f1dd22365ffeb7d2d16f125036ff097b9ea7050ecea5795ae41c61711cd11d21419d5d2d72800227a17090e028c159e2ad89d2a28a22b488284548f1d6dc25ce3e5a6471c40f0e236e8ec23101a2273c010bae88bd302102172b6451e7c51272c8f3c4154b3471c50f70704510565272b7a7cf15391b206a4c84e4a6508489902725b0face6be79c77de17603c8a55b707a4e3513b72f2a8d66226e8d417406d7af19db5de492b9dddca74d6fd89c4efbd8f474a9b4821dbb3be1c45c93286d27a5fdb951ba45862dfd2969f9c63bfd6da5d5b3d2788ec7b1763dba74183bfbe18e9c9b6372300d98c00b48f20c42e6b6cdbd17900df549c5d37278929cdb7e7cb9c35f70d7653d25bac0ead5bb9410aec2edbc5f6485f421eed692bc215fbd26837249c13c8d3270b297bfa6401659761cea6ffc29e3e59d8c29d4d5f55dfeefaaafa1386bfc428d6fce968605ccebd1c17335f98e766beae0df3501e67619edaca0d522871d6e441e22cfab846621e67d167a122971198b2ed8760dbf79f98a7bec05b6dd1a7377dc3144a9c35774a28fbcf7d7fee7b7dbc652bc59c2c718fc5d5175662b6e8cf2087834d9f898dcbe184308f631e27f4419cbb449b7e1873d7dc94b640087ce2aee92384db94478495a0d0a6e0d0a6e06d535809e6367d5b9d07a746eb196223f253e48684eb6cfab4f42a367d4a6f0f367d5a7a50367d8a8fe02314095d42799a388961979407080a8df2825d52299b2ab14b976d140bc50241cc73a33d1e14777942de9027e409794378e609e11c1efab7073470d0f8e9725dd0b5d9aecc5d77766797766777e7ce6ead0a1f62304ceea8ad1ae409c3bf436dd19f40334807fe8f54dd7c73bcc1a46a3a0b088c67d357422e1d57e3a92dfaf7a6e3ae9dfac2b5ea3ca8fc28164feef9c13cd786c49c71af9554d99f56fbf85164ced4c7c00526900d9754753ff79d403bd5d18f592273c69f02ed23373f88a64ce92206c3642c89bf0f130896c4bf921667368436c4891124613b2d47c771dca5a23b469c451ff3609e4d6bb8f3c2493867fd8d53c5ae64994372ce230cc56c0f8a71d7dcc15d94f40cec30674e35f79869851c4488ed22d85ec52c6716db6f4c02ccd7cfd6fc2ab6ff9c4eb3639cd0147096ff9c4d44b4099a3f0f582cd92e739b0f164798420c764975b65f3b8132c9f26c3fb2cb1adb27490177cd7fcdb07dfa60e3b03267ec6f8c31ae2d18f599a7f1474ed2f31e7bb6e4a1ee5a4b4cc33b08e74acc5eed1d6d68d4c016060bb5ea481e0ae3bf21da78cae83c0f7f3622131572896fb86c711c6d52478051b2545249c84e96abcfa3cd5ca12b574575806efbfb76b264c09e7305a20d9e3274f982375d054f19f3cb0a6c4a8255989462209bee9a0c2809b8d8947c229c30e68442ae7b62304321fb68c384a267778fc36ca9be95b3ea875fd834cae98332f564c75334c1391dfcceae33ca59d5da7f5be33628ffef9a5b86f2574b96b94e18f51211ca92f4b80f333d104feb0b25a2c6b716978ba1316df367de6eadb5d6ee3dd0cba9175db78834ea55740b08b63cf828b0e5a63e3d65d4bf9aa402a84f4d1a86b7ea87f8ff512def3d8ffbfef6be6314aae5e7e84d4d621e97b4dd76ed6d9990d30b31bde779dd26b168c4dc6c8e790e4621b1accc55163905e83bfec1a1e27d60288e2adec571c371f31b46f845fafa2189e5f3d3b187ff68c432ca58e7d14f98bb7af4e001efbcc5d7477e9fad96c0044a5667493ab0e4a7a3caf17737230e600ee4ede40dcec2dfcdfb6161c95f47959337dfe33061df8fb42c7c9044cac417ad7cb5223b1a597b03a5f1d9ae0bf89ee593f8ff58c9813f933f4424bef15e657c317cf0bb0183a2ca178e1eccbbc1615d1cc14e76518dab548d0f7518b1984e3b9467c2c44e071dea6bc26a680cd1b0e0750a80d6512e2d1f5211a593ac5c3ed4abd58befb26a2165a737bd0ad7382c242d8d24b99bf1c00c7d7d600ed1984334bc1aef481f1e266f5c56dfd19833f8ebcbd0a081bdb79e7dec1e0d7751c059f5256473eee2b27a1fbbdf515faa4fbd7dfca18b7ef9957ef12addf2298d0a129b33de467d25b25da81058fc97cf21fecb47e608ff05f903f5a1f82b1dbe4a4f98068094fef432d29b9e87eaa6857cf1e7ce71d22a6fd2e39774067fa457e62e8df484ddc8565ec642817014ad882ad9661310cf555a60c593ca48841aabfde12efbf276e24c2f7ac2acb5d692b892d35616d9cae717b54c7cd18f7492ee9258f4af68fc59cb80c8b0d27b2fd2406458cb8b7a16032bbdb52f5ee55ff4f8a747bd3efd8b468922eac53f89a8f7ef05297379d3abc0132aac28d4c9abd653867dd7284dd296fdd0eb97e4cd09f5f704c3b62e84e43ff8350e7ea713aa92ab777999ea4daf0120bbfbb0454f988bce6fd2a25f699657a5f4e83dd58d8acaf821155d8ea0de437d8b9e65d8e94b5a06440cecf4b30c4399507ad6d68da9a427ccb26a6b45e3a82dfb59cfda12ed5b5b02823dc4b12e2e5fb26894dedffe0e1dea4b7ccb6243cb620aadb5957cd428c9fd6ea4abfdeefbfceef2299717756a1c533fbecb9862d1aafb2c3f61a156dd0f5fa47b4892a3e547df83ea4bafa27b68f9d17f5fe33e8e2e37bbb55eebadd55a6badb5d65a5bafb5d65a6b2d6a0a213b4d896effda1fedbb58bb63cab0cf2283ecd5eb7b9dbfbbb8a4c651fc1aafbac747aaf0eb407de9759c7ef45d4aabeaa71e08cc45ab2af9b5bef8a00a043f5b84675b6b51429688b5b626837d2bf2835d521cf0ab4576b6fd18ec78c3e5738cef42fe48f2fd8afca12255f5515ffaf13f3287f829f2470ba9c2e402505f7a552573a0c8d29f7ef4aa1ca71ffd899c2ffe3591aa5b2255f77ebecff232597e99ca8b2a3a03b5653fb3e851dba1908ab63602e2798ee5f2ade29f1598ad51531fa4af01d056cdf1631baa31fff19b078de8507dd5d850cc7b76dc78cfed8652bcc7eb877af6b0fce1498b3fd2e15b8dc384c9569ef46952066238047fa50b1848fe00c2c3c91bd9294875b34292c81bff0f5503d6405b1447a1a037fa03daaa6f3bef28fdb16f49a415efc3c19451bf06f71cd0567defb15f7acf7d5b037d511ed0160fe814b4559f369932a49832ea536ff7b03715970b82d1997d7be4f4561f9961d8e9a73b3942fa923eb9219156de8e2a8fbc01c1ef57f48d96b190f2e39de9cfcd898e5c88c2483299ca8ba1290a54a9a7a4a170a1e92a474a5fe327eda6211d1456227f0081d121edb4daaa3cac6c9a22fb120f73873728287bbbbc41612e08ec62dc0973ce124551e4f1ba9ff5be6559c2d0cce7f613a589bb4a9167bcb94b5424fb937290cb3077dbfe62933943eb4b7c7f518a49333a610a60a8be4472b4d5967f48962f8e180025007629f2a8609b6d88922398851c024d19fe653823c18f38db8138440f4422c42386300b6fdbc599494a2ec3dcf65f62f3d9fee11646824222520891d8564c1716d345c319a04224dbc3a0ed415e69983365e894e18e3834dae8681b6db5c228c93768d39110e8b98839afbbbbbbbbfbc52218e4faa1bbd3d1c61de3aecbb1b91ff171dcf890e5b8c17140a2f8a22f450ee02dfefc298a74ada2288aa2e80f5e71ce8c2a2a9a357e4e167dd9c572fb278e39435542402959de1d7444743f4c5710d375bbd84f37bfbb1e13fe5ae17f7320799b305ddfbbb0f8b9298efc3fac65de5d7c0fdf718ca318823a3b4b6c3e3920d1c31f84e69c373b623839e410a20ee5de1871229339e9ba229e14e1a48928488c8c38c426127230fc2a0ccbb5e5ab0e2594298571ffb77f507de1a7e40b9695e3070c4230ef314e94cd040747072769d4568c901b0d5d8ded67ee0667390e2d72397fae0ff2cdd9db4dd7e5088158c8f633aae6861569fafc0b76dc545f19ec2b833933b7925dbeb598521a1d2939dab4e07299adabe69cefbdb42135263336278dd119add1251487cae88e539d8fc668f87ddff77d3f9f6a31fcf44f19fe1806b9a4318b82ed1e041532693a1dead91df54563b5e594065301f8489698c43adb69ac62eae335b2e9975e5f5f5b355e7fce504be9a4b56b80fbb883c6f687698cc640f0cb9932747696d8ba9d500517e37fea187f47e99c9452ea2cb2c47f7f77d7de6ba242ee30a6189c227737e762ef3f5c53d2b3e486292e6255d65e77777777777ba9d3e96e434d42022aa6d427560c99a30332194b72df8724f4a9aae508f93e7d55f74076479674535a276b09cb5933b8ed183b871760f8156361d622af502bc6e68c6fa794524ac5184b159952b204a56cff300b53864b9933599834e30ec45809da7420c662e39229c37f1c7fc8d126c6dec6048662fb5f2a6e0f14b9fc9eebb2ff5d776dce7803228a98c7a811a319aa814313441365fb7f5455c3b8ebfb8b7ce919303df54592336a303d3851a050f74d83aba42847038546480454c5952379466dce8c9474924543a30741dcb84576d768e8e502a22167e5584fae16a0648ebdd8764de5ecfae2e7069336a35612a2142bcd7685e981e999ad1b7c901750ecfa3822bb3ecd116870336a0f8ec01109bcaad248352a8d46e4dc2228ce0a02b508e8558ab190b112a62755ab2d3791a91a481a8123d268f473b3984aa0092c81e0cf6d7affd28885049aa02cd9fe54c859db248af262db9fa81224832fca39cb06244540f5c5b64543222939e62e304614241adaf54bd16dd75284dbb53e0d13d30582b63933daf5534ad85ee354ec2a72727936f877a7be66d4c0d412e05f21a62b557b907c1024c15bdb336af5458383e971d612d305a3831934b8fa8aa971960ea6cbc664f97b578aed90d6c7f636675cea83b63dbd0fb2c1ff5e904bf06eaaa2303d304b606c30413051fc35819083f131a249aae6647b85e981c9c140d94e1ffc6aaa96ea7197bba64d297157c5c212a95a6ac90f0cc6f5697031722e04c1f4c0f414208612303d336a650c6e9622697035b329c34b10b7fd6970337a06101b80ac0034b84c83a3a8166230b4185b4081225485a21c91cb19b59a6dc60d46482e67d46638715709e270ee2a69704f3e253deea23eeeaa336eee5289607aa81601695190b3fc6b207197ea6190b63f8d7f0d9e39136eff9ad99c396d676def7116bd4b7658453ade089f92214c12a2dc4a95fa16d4e9fd4d51e4920677e4697073a6b5333e4c0f8c2cb2cad3e0ea2bc62cc60c0606b9a4c1cdb697fec507b9ab04c55d2fde9f46c85da6f7a7197297caf473bfbc3f8d1477b9c464eec271970bc903dd363477fdfbd7185243c75daad2cfedf2feaf672c31e9194af48c1e67f9bfd0336a253dc345cff08965d4b7bce87db429e9b95d4e339474bf72247f34b8efa3c1d13c71d7646204d0f6a7c9b9eb65fbd798d518e2af29858d06c85d9309cdb67d060d8e66e8875dd688754c8e4862973570b6e7afd2193dee2aefcde6aeb214fb71570933a336c3451aec92b483cca8c12cf1578c1c4c8fbb4aef3172a41841db617a6a60ed121923e79925466ebbea575fcea86dd5d3d12695577ea5639ee8985c4ac700e998201d0345c708e15aa6c865aab65fe45e707366fef77d9f9acd19d3cbaeb584130d9562de36917786a394fea84496191cdafe32d2385c991262fbb3685c997709d39323d2a5a802d3b3fdbf32552bbd5dcea86d7f910677812b0212e5be2f411c78776a8bfe1562b2280ea86f4d5f25ceea59e2f3435fd3e0687051c45e5c4647abb5ce3a67eaccd54cc5af7e1ffe70a55ea91ef74d75f7ed31dca5c45f5f4ce63fdc71a59ee8cde98d735e9ab33e8b71ad18e3a7fadbd35d38ac6e5d8576e38d71ca70daf27717b693dbf5915c7dd4d7f84ef57456472e60facf188fa136372d50641c77e1f7efab3747c7eab27634151c542f57db91a5b8ebdf7b6ffd7a5b96c8f567b5b6de6a1def4a53304a729d2109a81466f6177108bcf71ef4dcd22085596d09ecfd88703b229d57499948f6447cbfebc087f12705221f16434c43a256fbb492160970f665010924e8ad4f2d1690c0dd15c21a34669ed6637096bfc7d8db7d59e41758acdbdd22328cffceb32b9487ebd4c1d3d64c8c3fec89c0c375888cf7641204151bc724081f172e32b8279320729099042183673d0c160eda5008b2684f2641ec2891f39e4c82a0b182983d8cdc2e3f068839e77477d2ddab57afb5d6eab5563bdddde79c334791e9572a2ab960cee95e175e6f5491edbbf6eda5e0b262c4f0c2131a97556bad3cd04d81a0b02437bcfefc162972c9daf52e719765b1585d774fb0438182b6ef800225312730c1490c084687840461971e739f39cef6d2a9929eed305428c14591118400c8c90c76497facd825bd6ddfb79a0e5ab88abbbb046e5294c00841b8a2083aa8ed92e6fcd0c42ea9916dab2d1a535f4076206382125494c0c70945c0ac0c46b0cb690b3117410891c49212902006308fc1f6cf4fb6fbdb26bb5cedd52e616cfb8316dbdac759675b2632296cfb6f836d1f8f3696c6850dbe37da801bf583fd7ddf8d365f0d160c120545a696def0c3106f4d5a5f28f2f6436ce3c7efaed23eea5725de1f5241c806a31453ca240428d89e8df7b1ddb9cd6d412eec5b81c8a5df72a8582c16025b2c06a36f35ae2fcf798ea7a4048d1dc3125d90e9fbcd83dc555f14e63b422a3a617d8a8a9103c6aa09cb270a35279d9a56d1fdb654166acaa8bfc2835cde5a37c7969fcbe32ef7502814c6ee5d5b118476fdb9386edc1563d7f721cb51a750c5e1ae492775776a8ae2841222d3205f5bb57e2bfb47cfbceb4391cbf973714f708884bc1e9ce9bcd1b032a099a7105490032c4a60c52dc82cc911389516320810bb3e0a4701756289c22486825ddf9445a082122d0961ca0f767d920f72175418c2f61f61ba138bc586c839a2e2a8f4ac5576412d999a19400000004315002028140c078482d160982469b0990f14000c729c507c561c8983491203290a42c818648c21c4008091919111920600e3758b202a1e72c41db98935736a6ab3042b74b50864388f23b89f1cef87130ab440f987866fc0b81c16a5a7b15f9827464fe9397232f79a89aba806d058cb464e689f2d6a5c1afa8779fcf659668d6e8c11302245d84c1f7ca6055dda1f1253094126ea5d382b01b6acec852040193c054fd35d735f8d04f3701059b12ad698eb7bdc6ce190b639d1305506caf29770f3302e33f0271fcd4ad636ea9ad74f5f04fc0347585d7061798fcb488da1e9319297521b02d8a5471614e62bdc8470e6652b38cd95d28d0404ec0f6acf8588f1805560be229a670f67fa56b9b001a47a4909f6451b580acadbc5c21ff9066cac40338e7b209eaa51d81eef17d5aacc85ba7ae517685d148bc0592b645cfb3c63f5a8d12cc34d2ef8d9563d9980efd8ba0d422d5eba585fe643ab56346d51d85eeb5110726d904f6784043a10fdebee40d7ffe9a474d342df80aee9009b6659489cbf888df8a3dcb024991f2c824afc4b90b1d20d9ef977668f74c9a895e11ecb94a22f4ba13d2e27160eed64aa480c6c4a108af29a3ee69af3929cee885db8c380f05ce6cdf66a177ef195750a9158fc7b004d33d295f6511afa93eb2eb82a299eb14bf646e826562998fe70c9bf49c8ac2721de92391a27bdc1fc2dc5cfa90dc257ca1f047f6a99b1bfc5c8cea8886364d470cd0b8ed95b3ff93ceaaf09b25c91f115a2f3d780378ee16ff985bb273ff6c3cdc6ae663f7b3b380d9669bc3db5505cd0d7680308eddd52355cd00f799f9cfcc8966f4b3b232833cabf90a35060f76c59eb190b6bf652eb8a42b6c6cd1b29bc340e015557e4b0b5fcf784a010b427bf274f72b0dc296937c0a78a8b6c5ae41ebab2cdcc75b30be81e7e8d475109addeea09abedc93572652bc2f594f6f0588005a656ff451623b88ba977b3ba91d11d3ba3e95585c967a472d7d761f3b2ef711801223812eaee67c49353c15de4788f4514b30a5a00261b38a08eae8332b99ba0d73fb2994b564dacfe9d7be4123f45128cf216c3e1f0a6a32c0dec15dfac6d15750fc3d664da333bcf6b7e30583c288357a387be2857ab9f38b615bf76966d2cf073f457ece1eeb495ae2cbdd9c8c5177f6051789489a45f1d8990462189c1f7097e358318afd18cc4c1bdf506df3a7909a1d7051f93afba7a147651061f0ae9bc0c43dd61ae2adb73fa9e748f207b13ae924318af2ef83111e50d47870baf8e3948d995e843ecc8dbe19553444240b2d03f0f551d4ecd2bef8821cccd29a41f06daaf61fd69ba6c9c17efb695e95da7121e988e92dfbfa2081e4ff1155359e6c723064d90ee50a1469cdf2258362c554c3310829b9e18c1e6beb5a362f339069b1995e7125909f5b252cc2e26c1e7529e68cd134cbc9945178e19a75f654b6ca873bc05c9e20ff029c7e781c60d70ecfd386484b0d5f8659a5051b38a5a5483ff29bd09732d51cd19a4a830e8b6dfb60aa497532bffb9f5ae606b6a4f6292e6e5fa708f17ca35958072feede7fc7c0b5218673de0f83a8dcc294359b0dd9f63485085230deb8178f265fdcc57bd33746d2a863a15a14922cf1605a2283f412000f11f959cbd7c09373d8fb83afe02dc04b92b7c404c1225f42629606dec67de6b88720626ed139a2d72cb2554edf94fc2e38b348b8cdf0b53cc244440686c934ce12a56e82abee2ecaa45f5a66dd86f7c184e12c42026397644b80d5c9573083f8f29e26bf4bc0ae056d9b4c99a4ff36daed93b3b60061444f1a71c9080f4b1d28dee88aff081eb07998ad813b3234a38b52b371182043ddaba1a7c106cd61e5c8c71e6f7e73dfadef3a3d249a9a0d14001bd45b43d08638d65078da8e003f40f3d452db2a67574ee27794e75342b75b9bd5daf3bbc3522cedc10b6ca9a87c952cbadd9b046973c99913c0297cb90fac76439971ef00c116549668beae4db26367afd70d6a291b2b48f0fc06a998ec1111eeceb6d4cfda1a1cd165a3eb186dcac270f387238bfa720301d7682dc95b28f13304239ab055321323bdc54e940b117475bdc702605093b0c645c1881c0769c2631238a51b1a0a82a2faeac7272b981ae62ddd824dd76ce61a97551717e797e6c72b7e70048905b22ceee1229db752d05ce167002416a544f9e8100a1d42422428f3041558c6cfea05225007ca35248491f678df18fc0fa48200933dd817c692970cd6710bb6c22e18dde8844d43188211f4aa455d24577b2b9112ca0e74dc1be4636eb3bedb62337c3d4ad232916b9ae8e7a905b371fc61c89159e876a4f883754305e0cbb5c6280d35c1f350d9f82eea7e61452f0bcaa2988934237967b20835e1e06c47368a749e5b2134f348bbdb50e16e739eda7c0fa6882ded8dba7d4ed4150dd442b2734c52de149d344c86cd59a709a53ceaf008c4e9ac6624f9992f559b4001fbec11c0aeccb3f128cf9166d3b011c275d814d4601aea0f3ed156cdb638bdc9cabc3f61105bc1d2b98159778b6fd2747002a8f775f1048b1724c8149cfff1a1c6838d440194643932795aa1595bbf75bd56c8b63226e4ec92e64438ea0a07139d3893ef3e380312b33010d58f0894b3163032560c7d368b5a94209d0771bc00c6267389626ae4b54a313048c7ca42a6df5e86c652f09e11f6c7645201f4f9e1b38745238d8ddad3f9d9e7f4da780aadb41e9d72ad87fc7b889bc202243af4fb062de12b0946c0e2e05d286973f0f0a3ff4d4e1c12419c01b61037ab3234142a3fd27f7563fd2821151d7781757237838849b6265f2d513dbe6bfa5da50e7ee425bd3ca76c6dc7ccd6f9e6424b6206c3452cbafaeb4e53623e56f41613e1e3d107530348360cb45c9d48d13dabfc20ce3ee6653f1ed13f431c762a55c2b83130d7a92b67c9c8a2f163c04f937dd395cdde4daee943b2ebfe73cc4a6fb1066fc0b1b3e2fab42f58d2a58ba76deb620914fb2ab97c7efb215bc7fef6ba91b3e80661b2216a98baa786e99db0c562d4c865f84cd717819f21345435fff68e45039671dac4017da97dde48a8ce6cc56fbaab4ac9105bd7f7942fdbad9145d5efa2005f5e6eebbf674827054b788c55618638b1774ca16963d7178723b1bf98db2063e3797861fe081da9ad39c9e668d48e589efaf373ad5534b57e276e400ba3f4c37492900d20f0fc1a930bf387b728ab24b0131e3c0f5e0a525501382dc874a91d20c4172656bb5eb6cf86b3d3c8e402f5367156b522b48d852c3183e1e8a187d1687561bd1bb6e2db17f2dcedc13abfa6ab8fd17d385fa1e47a22e365e879a3946649b731c4415b8b7f37ae47947c8f27440c7fab87e99e02ab8cd06ef4c44dd70f62d52590d7e3527071ed162a48b04cb670787e1248f4d1bf93a1a050ad9d8a65c837e2cae5e2155d4a21610dde28e8ecd9ab6e8d4e71a185b5fc6a23b7661ea4b7d8047da4d54d7e12830eb27ce6df137cc76593963fe41979d2b0458bab5b83f051c6931006a33553d2fdd8570f3d1130165fdce89daee85df92c0ca858f1eb49c18ada93564206ea584b5dfd2572eaf48b6ff0ffdd73a0811f7162940d5111f0cacac09433af95496841cf4dece0b5b98419e4479f523be02128a5ea908898b79b608b6db3f0e1d87edb4959dc5dc898dc5282037a1ced9084a88f0ceaa39705964ea34a9598a69024f28a6e5088e1a1c347150d81910150933f85eb92f4170fc727a7d04b4553564f4edfd62b7c678859e978e02c37e56f7444be10b998f579167f743b44f203c6e32af55f3802c664b42dce381d2528ed306dd6b3cbf903a289c6dc99a5f918bfc4e097bc220f017c94ceb1059fcde4a26f19814e25a6080b740e4d741bb2ddb6d504cc434ce454117299cbdfd809e830b4f45be70d28c15b3844218c11568e9a286542ae7907c72cd6ebe684e5f5b968968279f4e1f2092ad8ce69136c162cf872cd3766dd968f952940f9ffbb8071907892abcb8c4c8fcda91de1f8744d9f9b444f856b8acd6797b23e1737ee54a6a2b38e6f11e468fd68ac82643ff0380976861327b70f58690094630009b76b3b52343800e13245c1601d6d525812e99c988ebe5d98654e91da79a6aee3e2e61ab83770c0acc3b3395f6f028eef91f4268b88b8b9b9abe30fa1ce680949a152f174552ca2c1a1534b5768d82f96c0d8d13cfe1de833cf0406f3856f04fe42f6a0600ac606f0a344a8150e97ea69272035cf84e4447a675196edea64d30506a5e4e50c913e0bbe6017b9fe34eb4db393cda67c276d7dbd0c229de47fb74807d1a73170bd442d94a42a58b5a8640723fa13a692a834a1cc88670e03b6014020d8eca83e48daa326ed16e5b7b2537d12c8ab287bf8096ca7ed7c405825371c4254b1e3b222b893dd1697223686863514cf99b047a58631a828ac94445c3928de4e23a81354d1533a11971676a9deabb636e4c6e145f93fad9a9f721ae4238a3074226d5c8159069e2c36d7edda427f8db01e3837c65f382145a42b8f8b956ef80d85af735e448081344a457d088f9f6dacbe97790ab25ceb10339020dde167dc1db66e27b5d43461d47c478601e2d105a86a6d126d351ed2f55e2981e39d16c5e55ae5893f3cf6e85aa93b88a907daea89470a10790af9764b482ef4d407b318aa0affb9536fc4e2ad0c7204710d96e2586cab8d10a2a50876b8dbc446e46ca85dfdfd7064aaf2a4c5b02c6d17d1265ab083336bc4db4e8c3d8c927c4b2c1708a09031ffcd677ef6922a885a971b61c3b3d549c017e6aaf8d02b7c651a9125338c267ab709f41f7ce236b581b5cd84be08db75e53543e7967a4485bfeee1886de3372f83573b0b6dbb0f40c57ff1d0c60b068b98875c7555a072dbd5ef46d402a2cd207de09639b88a0f0c0a5ebb70db2d076b0e1a9136e5b1e39cf7ddce79eb966e515b2681d0ee2bf8543cc7c7caadcacf96d415ab06d56fd86e20ceab0df146b5ba2d6a277ea2b25b2cb563bfb15257e196baf948c82ae1cf18417c8c6350838ab3dc84d05507b8e7813467ae10b7b07cb4be47d8f61aec70f68c254dbba40881eeb69805a493bcbde2ae322a8ccff8246f31425688a427473d4993ca6eeaf1b24b2f04f8caf72d452b9c2890bd0f61d33fa15929b21286e9b01d4c1a2afd2476b2d372f35a669bc0fc637f9e8be2415ab4fd59ec2e55a8bdd05979da7aabbb9b392139d7d1b090591142ef5c6eaca30c4a985434f8bb6c8e12079e802dc0e09f1c7f837c2fcde8a726d54053e13aac9ed357479f9616085aa58b4468197a7df1acdd7d359b18f1a98a08d1fdcd1ba30afb0cd92fe0e0e78717f96f1b0a26774c673ca7bb46f45c812c6f8bc485613332fe73dfb2bf272ec562b9e8087e7400fcbad266cfb7e90a141044f3848480928d18d52d6a87bed42ae852105d468711408689eae4ee4d7599c573188b17b50eecbe8a4346de7b1051a1b4493c0b958df40ff3928b86b491ae4a089f1e5b0cda2fe9844c190ee8cf9c3c36676f9f96a8cd3e97b0314582818cc80e8a03c2fa4d2d45bb8445454f1a787a6bca6a2669d289a3db793c20ceb5ad8bfd6c4b9414eb3007aff56c89dcf1956e3cd07c1aec35622e113065c5cb323585bdc02c5a9be18b79d73534eb8f9e26b4a8680fafeda0e09ed5691cd3273d56556377b8d697937e4518b2a09ede5457b04f058f01e6b07933a645ff93f1ad69c71fe82e495577882d222d573d5cc10630b066b40e7cda386a9685ceaaad60c5d8e2b206d976d3ec0c55e5bb921f4cca4ac5cdae0d5a94b1d7c2eca063f81f397bc552a2de1d1c7ddeee31fa085330321afbc68675e08fe8be2a5bb4dcfb708ff10f21947280e21b39692d01b72139c20980742318e2280a7a452e99697f8609f4a450b774d468c9541d3919eb1baa92d9d6de813b9df42517ab8981b7bcfe9608a2fba0fdfaff9c41f1b20e99be0d61de69bd21786b13ec22e0cdbc98982b584a6c33292adfe62350a8ed44775d9d92a14fe354c804b455e2afd506cacb44be78162c8c7d8da41908520d33b32a6a8d6e887e8698da069a3a9b3d42f0b9f510cd648a37187df8b7463a6c5bd91726cc744d149ee95823f3169355ff9530e87a8e988937488f44a3e2e53ee13729b6954afc3dd72b595f7fe4277dd6c301ee873fd2462d3b848433a26dc70e28fbbeaba6b44a6a350002b747924ff3e1f2d51c508c378b2509a3e9cde6561740b17e368b4aecef8dd09d5184cc63821d690700a6b85a03eb28f4c6013ff92f3156ee2ab71b2451d9b4231ab4d01f4df7df5fb4e3f83117a80793d120c58548464a2fae93b5370c78e772700eff7c27b5d5fe4d2c28ed0348e7fd1ec44787f78dc811bc6dd8351073e1e9691dfaf502f391bd8835fac95d4a91ba8cfbe73b8bc93e12cf7b6ea3751bf45d8613a26c3f6fc403cd5d2d9ee54505232f7267b1a00e9c93442f8b31cd15c14ce232d805bf743c7fafc722efe841339dd3bdb409de5a335380b8ed0b7dd30e1cb3439f055c1919b8025cf9a2ecd444821ea3592747403ed15b23e57b47185d6af28ae4683b32203e5f0670bff56091ba01975b91a757cd27f291f77552f6c8e6650e9ab76f2d30811a3632db0341da1422968ff103a8815d65012ad4ca3a37f0b64b8f6cb651e9e269e4b6f626dd8d817199c4e2bce4bdad160cb8b885b4cb286ddd95f57b2ad4b0d41638c4142640e7bb08482739d4852f8d00885e3b0b5b5bfce10aae1ea4deea25d626df82c8165f5deb3429dd484217ab8e453eaf13056c307b6fc4d11ac4d316b2c5d6b0b5c8b812ffd00712c0a3929f3a0a45db5f97710cbe403dfe16a5c9235b68bfd336791c50110fb3b42f7fdf98b3d8e58f7cb11dbb78456fd7e3e581b27cc140cacfe6a14edc03363a6470ce89e3b464e0b438bd98c92a52a15c54237ec4d457bce3a931b16f51356f14e9cc8bcc7aef7fc477a1cb68b8afa260378194773867dffa719a685085b839bccdfeef89962ca4a95868424a8a014b0582837491b93180c39886cca990992a6a4c0703c5a6577bc3f2a4951a73a93c8a5f18948b30243fa94fa4b5721e9192307ea7232f07e27024abe363a7ec8bad0993e80158d5c1a04ca15481122482534ca7937e3a964a5520984b3492c9dd946d9d2b1df096c05160721394da1528c8960f8e1b43d6857da62dc88fa8566dab87847d19526595725f3b2e0809a75e099a624338d5656b43e4dbb21767b9d1402367c060de9c882824fab390cb1e7abc118485f20406edf04080e48655234e61b2b5b98d5153b175cb63ae781b0c7f8c1c1aeb2c95c8181d8df860d5c5f78a58974423d6a904354e6f8fc82d28c061c17ac6083ba1385cd0384e14c6a8969ccbf1552ab3856b253e4c00f017610c554c03c104b6c9d3534eca49a0724a6035af34448dfac8721913e3018cd2d3b5630541c898773229884de3a89428327d50b73a54b81d49f084a0a4b0f26b71832d4a7874feef0a48b3572f93412674732101dada9c07550a87b9886b1b8adf2dbae83603408d99c7cd841d74122fc5e0ac84195dbe60b101a6449fd05881ba2d7adfc9dc7f9160c38c3029e8d69dc46a59312d0b91dd119093e80bf259b286ea442804f758791036e126172d03eb2f027c1b85ba419c0d3c82e5953832d870e6f1be630893108a82dba19227429a7d91581369ad699f227b9c6b18645821edd9799d7b5c1bd05ca15cf22b77962316d2e8afe4c00bad0f2c0e9734fe3f93baf4b074363ea37e81d6221912a81ca0c13d299610cfaa1af5bb6ba2e06314991bfb55798a50c4759a2b7f5956f75962dcf2fe6e854eb2b1a66f465fc5a03c374cf48b2514fa195c8ed37168b51d8b4a15616d6854c5c4cc70633955a1c21f6a665e42a26015cd5a12cae32241a84fb3ce4f6c9ba99446f90847fe213148b605dab1787175ef81d645f872e6523272a4a1a3fbfdc543b847f2788b5f5a734f2883a0d3dccb385040d3a0e8f69b43c48fc9d82d22bbb07c516310cc5332cf614f0439a4c48311866a37df6c565da45cf53e49a11ca1ca2990e5b34317988949ffe74fc6be1a2f14b1095071bafd37fbbbed6a2576a70015944bb314934a66745c4b3342d7af552dfd2223b056110225a00eb2e6d0e2dee84677748df189263ee71cef0cc67f05b1698104f9e79c03d92e92490725d04f38cc535195e9ce6eab8c72e2142f191f5a320c62fc9e8b8ff78e13aa463c4fe8f75a37aa01baa3dcb22b177cfce03c4e74990e01b544198f44974c4d6c4e149bd50b370520dc7a04d45f004d1343c7ca6ded5bfd2daf41ed62483ad23b18d385e630460b1de898c2ed7b18e3f90e8449296152b6298e706a8088752e5ad1d980ad326e6be2cdbad110505589c49d5d4451a9f4b76873bfb153c3044fbf2d5b21d11676ec1b57e7ed2d604cd98fe4f550fcc13369e13600074613f06f7fc33869f54bd2558162c151b1413dee90153a1400edf283fdfdb29503270f870a78ab6fdea4a5b68cb72a23828734c169f5a7f5d10dbe42ce246291125bf5b28b2ac51d6fbd2facab446e9ea542a81add497958790b044efc71838d24a207e202ef09a877432bb8eac20945fa325105be1ae97b637ed9dae97f436655a0c1f37113be9b0eed2854a0b437b6143cdeb4ef225861d015288d7edf685c89bd39b1c2fe3365d3e31e7b8c6ee0a425d3536127c09832d9e1f591ac9cc2e710c24519a0b3748948eb007d3a2766bb08281aedb60481bd5d68e1ad2bcaa68f06db274b638c988635d94e46ddf5f60e4f161826ee422adced021b1ea7495d978349ced9fa4f01ee3d040b06746c135e015c7b7828b9c711c32f642273b9e4b9ebd4ca3071d4f40d142ff925035794d6d29392299848770b58a71baf0048319c55bb8ca6eb03effd3c2569423322eb03fa68747c6d7b0ea3828b3d93869987158865ac9a220fb9ab7ed0f30583b654c5e505e8d3932e7c617004001bfe4cd3eaa4cb47eee0fed0a520a04eeb9193e8a6277212b68ed541b707682469b2cd5ed95afd7307ef165631b2efcf5c74a28122021b79b000e799736c0e58e68516d22aa9ca980f92e788efd822f2342410f163f4016a2453820df3737a6fbdbcc59b12e1199b1aa8ee0affe1365670b518c3b99972bcf63b4c810c0653fc02a39fde8182d7219cb323a14428afb22d88a35f6edfd879e6b0be47cb57f11f683a3be987cd86cc633ebca576e047e4c85127ff4c209521cf14c7ff9526bd4cf3532d440f657b58b1a3a2a23f0326724e7936f2d30eb2da1684b1a26f479e8081a75ed35eb1a3273364d31d20fe70ef6a627a9669891d3deafa9201610921eea3bc853a64064054fc969d6f6038c54a760f4d6b94932791f4eaa136ce3716f64de56d62242ae1a19238a6ade88bded3da87c68edb04bcc69d8ab20cb6fa85d1da95707f6f3ba7df8c3724bb6b85c90243df8080bb65a583e20026ca70d2801f2d3baf07073d78265f5f9fb9dd8bdd0adacc337d00274a26a13a2ef2145d9f1a80f4b6f40adcb55d422fb0d2faf001bea550c4901a482516700c7f58e11be55fe14d0b739c2d6cc6cee2bed74a0ab59bf05b61d0ae6bd835e8728db26cb0ff3734f37adccd8e4308816a61c28f3f9a298c8c7b3b9447aca707b280527b9bb58f7c220bf8cd30429767bff57c1698b10051549576909ffedc719f2ed4473d241c1900b0abca7652783a14d5521c602ab5b8f7d64ca2a279e62017a54d45c10845d9b2c40641d606b6f512e7a9bd2dc73cf948cd911564c5b1f404b81f779869f655af84a5935e3e221bb3a88c2ec3c8c5c3c460e6d2ca66d5df081641946e700fb66684a61475554b21db9895667465eaefbe315174d8a5ac3f8f54f0731d7e10ea433bdd0735a75f6723ca8259228c6d75dc637a181db9e4789b768c67f3e5309fcc9f9023188f97602d88a4874a2d09a4b2b894e7e42c7c52cc5f11fd22a14c04662eb6ed34bcf30a456ce82694fa62fdd45d108d509ef2c9fdce020c4d2d0bb1f56b71da11d867a0f43743d0002364726ebef05fcbb689f3d20a1eddc05653d017946fbccdf7f8c93d5929100a6d9fad6a8ed4414f826a3e3822092da74e6c1f8c5e0feeead6d626cc9b840d38fc6a7500b59f871a67d3460f9aace0c8df595d644159d91c7b4a03eaac5066f46e3bfacfcff0c44a196dfad3c70df2784e4f04eae3b8e151b1d98d3f28a9c01ac7d73dc355e4e41648bb032468699dff73296609c2604ff1e1159768dfabcf8e2c6395711340ad4a205dfd20a7e93de7fbc95542dff58b00b1b0e8079b83ba32843c69a266deba3600393fbf908c16c93fb5c56d3fb93d944b0629e8401074130be0549141d508403cf1580c4ac0295ab642c4405a0c871ecd2c8c3415cf2a3931dfe42e68db793187b05e7c5a23d045d9259cbc4684da1516c7c782725b6e4f9819022a1ea664a7af0e8ec56b5db3698cf03bd8c27af509a9fc63c591c33f7c848da54f5a1f0304001739c1756e5d62457a611e0e8350092b002ee7d940e665d905b5e12bee09bd8892fbad8c90c6bc1df9ae58887b9da4367f363af8a0ce588077184a7d9b6e41f8eff9ab4c9085451986aeb208ee607c778391c26d07768f92ea6b47d3e0b67b3665a0b54b0c1548dcf72cdc930cb44e5519bfbe217e8d360b0a68ce38b6c0f1819036d6ce664ce4b8468db54e6969033574496d8923a5e8b58c4efc44fb17e768e0e800a7432a16289e7826481f07e58fb59ceb75fc13c8285d331a90440b6fc7e362400226cec93b0a413a4350a85cc5509e9b5d009593aca6ecba5a1565a7b9f7c2c1e49c112e43cd73d20dc29c0795b0dadd3946bd61f64eb615a0b18dd1b1b297b73c3e485cb6fa2f62c3b6c3028b3f62a387e1b4e6304e678aac2d696e5a956b72d4199836cd0ae72030ea55016d4b884ee0ec1575ac349b409ec8718e5a98a02f3715f9f9f35b296e3c392f310f99823aa162200e331b7a4790e8d3fbc0c06a7692f9492919c037452f9ba98466e657c7baec16a474539d2a142f65f01cdab84691590420c1f4c2131702a35a660de8d6682899f9dc43868747041386b5c8c94674a964b77a0180c73fa68bdaa0092f316f1ad5c8587f80f56e4c70042453b0a1dc86f84459c9a78b30f588a8aacbbd42b1ae2174490d17d16138ed8348708bdaab59c82407a07b8624943279d95fe56529478eb58831863d52ae4be8b912a6b6e88da10fbe20ccb270fc7f5a4ebc5bb0692289d5af9c1528a533d1d940c6f148332152880eea6a038770af014272438ae9dc67bbb0ef289f2fb428aeafb45560d2c589f2d6a8428ca679fb42c735f739f786a048704e55b28e875db37c8b7295c4b033e5c105013230b5e85cea8e4998858e910a5263c6684e01ea0409159de118a2547eee4497e72461524fea81f6c966ef79983944d56f24e612884c646922ccc80c6a5e0df96e4b5b89f9e2e52657e2dc6a8e20683623875452177374b776f444c69a2acc5e4d30e371b99d621ae0c391044b260c9ccc500fbb25961999b83e66c0a3b9b389a1b117a33a1f550a4c5dbedd81f1df607b4d37e19dbcc40328a004df00164e436a8414bf15d1b6b55ab0a65a8461984ab553442a7cf1ca383699b07bb8463ad55d55b1f015a3bc5486be8d69278b243f4d04c80c0886d24841a4adf0ed670ae56f9ec22b0fce7935c5c5d8754dbd764ad12c08815569406f055d35fdefa33d2efdfc7b726c9af0192eb51ece1f6875282f436ed546cfa1b721f4e801c83b474ed7da24e260abfe1910e9df79fb67503f3ede6a404fa946ab5b1449ce5fab81d9c554ae85eeb7c4d1983c365822b79ab41bac6630b005053773801362cc7a8d64fe8e6a77dc1ebc1fcc70bf7878ea2974d84bd2a2fc07f855230b8f026a6726269b45f0475d3b21d0b2774dd4214b611d81ab08f0d88cb80bb68e99cae2cdc4e0d4e803810ca9d4b7826261c56497821d2566f8aca3f45885c7584c7d1d2a0854ca59994332cda981854cbfef4e8b316c506ef70e59aa591418f0b7cca38bd5e083746271f242dd8bec361bb898886582b19b21150fb22dd176cb1385184a5f01c3bdc291caca8814074e201b29b81335b57c4ea9d4bf06ab9001a71967d549cd03018627daab51f4cf776ef496c2f26732dcad6636f61eac10b37474521cb76fe1e8fc9d2f08577620df9570e3755907206f9476c7bf503a4c1898ffb2865e9d400375b108ee297fa6e47ac7205a27cf379dc002c20a1f6ba6a6374cedcf0825ae6fc3a3b369a1ca5b79e35a67a2e628a44fe00978b80280c44f813237851ead1dad0c064f21fa415437475fb84a27f6ea2d67dc4a1f635f3938a6561916c3148da8ecea4db9d4a4c86168f065f35ac9eb9666850059b316849440314a5f64b8eb80cf3b03f7b700a446e61d831964da97ccf90934e4bb268280070ddd708e9164f336cacd291a5ccd8b313984516a062ea0fd7533b63fbcc4539fe2b00670c159703e0b6b49e6d636bc3c2232046249359c158e934fdf9e3212a8ecd153cc991d98392dda2be7a5ef6803618a5c3409bf49de7eb1e3681c5fa31af3e24f90702ee330c8b01b2d7596df8f6d51c325c8e0851a24795b0af5feb5eee7b5d6ce4a32256bc79b04b597b9a23d5ea75953d29c20baf956ada9bccc3ff12f4d0f3de0f35a13f8e4817dda1dbd1077eb5ba4721148d4caa915f4fa1d6625f4f03d6b79638b59d6c66701de5b830210536590ebd5b586d1232e230dc12d458a4dcb690770e20c38506c2e2bc59ab2fb47446b8cc270466da69fd841b6029fa3866f0a99c4bbc20fd7c2a2c4504da5e23aeb0b63db5689593396da905736732204e6ef0bee161a3a4193302b3b43e2bb6f8ee43af0100051db7737092dbec401652a074da90c3297e6d29f26ed26312203bbeb1489ce99eab575fffc6aa45fe99d3cd23d7a6f3549ac17c7be5d914f091aca4447f5d0f144d74991d922e20c2be1b62f20437eae0867bce25897c38e1a785c66a21ebd62fc1c852bf5d3d8718f45e721db50b6dfe96838720e8bf0b168a9057dea970b858ff54e0b06dbdcdcb810bd08d4bee46594dbcfc2fc0da9dfacf0cdb51bf9b9e85d3b59c3eadb9689ed89811b295a79f3bca10192c7c78fa4cd06f536a41a059bc300bac32e7d38cce47f57a20b719cb3620f768b49138b8c39e52b14864aff7800235e010d411acf5838745e4bfc14051a3b62da5676f4baf24dc8967051e35963aa1a201a8e7d0a2d518cee27f918089aaa7c6a070185167a6b64348952cf8e581b444df81ccb5ec1c67f82ecd1429955b918bc2991d381e007f35188aefc306aacb2bc186a75e7385eef542ce8238ccf80604384634ab267a526a3ec892ba3c08da3819228909d45b21afd218f98ad369085d4c64e3de7d0e2e32a053aa2f94d847ad65aa51058ec3bceaa23b75b8717363f9609071e7acf45c48e93cba727bfe1fd2f76297c402fd094e7efd06f7c4e2b1432dda5b44ff93f7f84a28afcbcb6e08849348515064507317aa17bc95bf8cf452e13264e792e652aff02df3b593e0ee90113d8ade15e5fd383fa212cf3420ac15827d4a974c2f215ed38b83610c957b8ac24abffa47b0832e0f0d73dbf5ce5b919a7ea3068c54f88847a238455dc0bb3fe4cd312a4eb71951c03274e269263c7bfbd9ae346a555524a4dc60d52aff7f6dad4f452614c6eab787f25adb4f7ea2060404021dc21d4f73f1c653372d0f2a0288b396f9a5d679a13852c49ee9047ac70c8e36c751321d8984291293b684501eb06f2b8afac4ebb6c96158bad625efa71beb772606d25cbba0f26e7e89b25c1e89b7ef4487c7af9ae8a25c7cc78fce350735d9200b154d4c1be93dbd3e364ed8fa3cb07b41197543f346ec925a2e493b222da7eebf9ed786acc1a8ae4356d09fa6bcc0b24d258b6520881a2112b92bc150ea0cb777238b68d2b662b9946a69b4a4978d28cb892305e4aad590112572225d68e6bad87dca0ae91e3f53978fc62d9fb773549dcd808352930509aea00db9afbbf84c86972588ae2214481435152443e5ca24e73e9b4a1d5e3bd82b94d05d6ffaa64585517bfb257c6a915d8d5225496e561f74edd69d6cf0b006d74c21dc4953e34ead6eaa6f29050ef26884a58541b2e751929eae219cc43658e6cb36403267104b4a8179f5f2b889830b71891b213cf4ba957e8c80020c03e7094e15e31297b0f83eb3f77980c2b4803141f16929ffc0f22dcf63202189ff3cc94262c6466be6ad7a190f7fd646ecd51745d9ba33f44ececde0910b9bde7f1eb1dda25050ec64cd0befa6ffd71f4cfc06d4865c15015f693c2c52148b89d99c281632d91935727bc35aa94d05c08b645bc07f0e6a1179b050249212acc15ddd01b1417cfc45a9583f7a948ee6e08a8ab31ef7eddf72d9ec6567da69315f7922eeba98807c01b7a3f69dd2610cce35f2470894031ec2b5ae9c53aec2e4e9ec1217ed5c4e125f615c2e466c31fb4044f120c8f215d519924e6070abf6b88ece52fd93dbe33a148acbdbaf459860942c0a51047530c2aced2f492ae83ff38bfb49fc6512e292527d18dec7ea382baa3774a43a09c1fe0dc702db66c1b87254350049d6340533187958a42cda72366636329411a0747e29936ef2c93fe60b6bf81095872c0d8e1d277cd1949a45e58f95e6c72e58ad5fe9cecd87d0aacce0dda99a075629a808d63a5e252ee12e385d06b1ada0ac4b9115410e5e9ad1774f252502f7da091ee5e12b94903d9a9c7c2bb22fb31af832056bbd4e91525e1d607de21551515e254b01f11b5415b911092b52de0c3bc11f6cf72726de85d3708f81e24f332c3dd65646cf97a5571b1d5ef7814aa3c3588c924054f80331e3027095a19007c68e0b11a8e87b363a05ce5a8d7169b5f62e749fda115662bbfcd3b2d579309ad984ba0026630458e5685e9f523e30d3ce4b12f015293d08ee4ae6e49ea0854ffa80a8ba632520920336a09c6eed6e9691267e81b270f74112353e441f34b229e457486ddbd2907666c8c966880414f3a3a4adda0f0686d6a0143e16fd1a343d2f81270a381bf700893835838715c83dc97f116c68448086c3efa8aee42c2d6376aa43571b0417863140ae12d09ea3147a3476b79bcfd5e785cc87181adf00425a83a92bb2abe5fd087043c7f3452f72798e06118044dfbb8636e61c42564a3cfd1ad860ff00a0dabc24d24b5f1aa4c52579aa9d43fc83c5a0bbf1e53c346dadf2727b821774609f939315b0d4f55c9f44f10b20d50470fa6a73f8d55f3fe60425eb12d2ad34480296554ff78284b01850a3ef4a404993874e98792cc1cc1b361b5052b5ffd69c255805259be4e4ce3f620d0c69e10d76b310fb8e626b854afa8d90477c500bc5456c34da7e1b182a5edecd892bc1a8b9bb972687f450d459e3b234b9ec12c636bc4dae4d1eea6b88bcf2b72751ddd79c4c0993bfde2c7363421b477673e0d44008e6091bd311784a94c0df7397b9416de42e9057544f788b3d4df875b233c97c009c26cd5bbf482f519f00a4245fcb06dda9624eb2387ad06cb9b1bdcf944a457bd78c0c662e1115e1af68df9dcffd0e55a65ff68ba1bdc5dd3b4b9f960078162027a351034fdb23d103fa8cfa5278af4132a26d7419c4f13e2761590792ea1c2b15c4dd1efc2861af06846f46f80e0a2ac58b7236eb5117baedb8a1a1ed6de97e99c2a4674911e01fbb1629f0000b4c7cddb2c127880f9b6b6116e31a901b9bf676b36387e4eeab3be72c338455113327b255b744ecd2524273b02a3108dc62131fcb0917b3a2e0558ff1a91f502e8ebd630a19a14690e64639285d9873c0a2105208b65ad312bb4fa8ed8075d0aa91b6a6af9cef8a62026950ecc8173e1f49f7338ad52d6156672a83efe6bb74f13f6c34763668af4998b60278bd958f973832401005f364183948922185a954959088bdfa2b3ba250e1c3acc1b4024208baecb3c45147205370620daa5ba562ba24334a522920e7f929e54c02ea9ff4268bc8ed355f306cf45f223b0ddd30031fd9ee6b5be1c89952ecb0956380167c001f95ad5ce9373c00a4d8c315639515f329ae8382456a448c7f43bc095a8a87b34f628e9f2fd56af823b8df01d6f01ef64843121fd090b11057d76ab194d90e82bdeea95c2f60d292e4a208724332052c69edee0a2d20214317b06b35e930c97df8898b395f6b0785227a01b7b68043e417805abe12c11ede8ea688703a3b9882da994ca942f357768ce878cea2d103210ea2c8b128911432f971ad08db67991e403cbdb29098f0a3b62f92154882fcc3bf8f3e461d7e99e94073508cc4891c097b5a9455fb2082ed105483f54e122fd5234b420917c7e17c1d71a8b8bfc0aca7e1bee7168bac38e060ab14a4278c986316993c8cd2f7fec82135c028bb95d8abb6dbe6b607a4bca18a1d0fb733198944d4943443fa9cd83364aaf752c8331b7924dd3194d14f73872193e334b6c3f000c498e9b34f10f2d46270192191b5220a42ba68ff18027716ad64be27d808879decfca0794f66aae7689b5b4afa7d1249ee0affd6afb05cce505ff80081dc4357ac80ac1290b5fb784d74da73fc6e23a8916475a67e723a6c479b93cc04d669723dd4d28c2257b59c9a5a650101c985a033ef7b701741b61628b266438a173fa079041a203618e85520a8bb9d68b708bcc57f7027d54f47162dc28f7f676e095180a8efc2fab2ae7b26d1ef24a4bfdd561f2e3d0c28f1098ddb36dc613c1b3f8812d2043fe8e2f0498c8efd42d56699e485f132609a11c3b85fcabfbfc097a44835f173d6ff4b0eefe9eed3034f47a21722c87f267bff8531498de390145539e11711386bd81557e4acd9c3fada595c1420d14e9062c541239822bc05d22d6baa6fe5c03418a62dcc354ca992fb53ac3327e3487049953cfe5590075c319ebb0030ed1eaf7c6a70055b0d00425b8ddae47fe9f3a005ae30d32c8ca003a22d86690d2f401eda42668edb66c6f0e5455cd445c8400989a0fffa73993d4b546a480ab056bfd18599f1075b1f45e2d076dd44c1c3b8b9dc5ab285d69d1f3413961961f8d3f42d667b9177f30978faee6adaf53fcbea3d56568afdd97c4774cb3c4c1d5fbaa03afa84e4240b201ed527aa44cfc2b5aa57957ed404010410823915d05f739171a6c8be15659463918e2a840b5e2727c323d828185c4cd99e6600f4d435c274baf1d9c283fa4057cfdc849f0602189d07124868d816f11530b5f20f50bf08bc8ddccba14316876d1ae88f07e9dea45a8adad5e3b274d6ee0fdccb19af46ac882e621f14953e6f4e49161bf929128cf47964a157c4b1e50d676a908405856c2e78089beee565e42ff186220361c16f7533344e10886a514728b6baa1aaca50331a13229fbde87e5cbdb1098bd40ee1cf665919ac35ddb53d892b62ebed7e45b008c7332ab40ec37eec0998c1d078d344f84f5afcf1db5b7c3b0c8566c7e9a06c02f3fcc4ec018c849943bd4f6976bf463014f4b0a1101e5d9cd3f112158ca3315f1d115df69c97cd4588a06848538588c770a57be6eac7a79804dd2b415e8838b46be8aa42ec487a07161b0da100ca43122c6f6f06cc6e8c4c3714e98937ab90d43d0e2f402011d8948a067bc3b6030fc106481fe43857695e994b170e9e0c603d26b84afd81a71659cd279eb5bdacae0a506a426b31725f874398086867008bec47dba3cae298528248665869d6bd2e7fd6cde3596b9df4166f6f211e097f5b7c335c65cc53c58540b39488bf8b699d4f4d0815f40d70a0c9494f053c3d7f4165fd79901be0b090ee3c177a3fc355a900d837fda6a4e70702f10085bec0f53b4037811166c4831b90af93a1f9c256f04062fc78a06c80f7217300f6467df19c3ea9ee99852958e05180414a5f7b7478d017cc1374878ca1981ac8f0faeb19f5e75780d4303f2e93a32858c5385a23d577639ce39c42ccfe2a846ec048cea73252a12a2f80156b58d2f7e3af6131af816fe68d348ba680133df2983829006a313bc6307201d06819f77700e9de18a00b21082a751267f9a19bbfd67eecbdd31c6c926fabd1871b0eae7f684cb0b137dee6086c7a0d1399ed94f6c7084f5a1468057f9f82da858ec042bc3ff1a8c8efeb028bff81673cc1196c17158cadd03061c2b6992d48618c29fb9bffc318f83795e10664e5ebd96ab7cd0851840780b016648b59a09302ed8be1915f64d19370f002d1c8edb3792c7a4e0a52c857806073ed4a812762bb86a6c8982f4dd21a873d0abfb5f024d0bc53162f8fd779c40584e9c0291be95b50493b82751cb7c087f4811ffdc69415c8c7a91a6b4bef9413280a56f55660c142d89c9b3f118d46ff6a711754927c2aaaf059122e3c2a622998d825d66d24d64de0d634c1edec6db7e49a56b505eab9155a45ba18a6ea1d33f28c8340514d1138e2c40fe87f063716559b4ac0604acb3665b9a88ec3f52407a26a3d917fe507951d098df843496005aa0f43c1604098db357868cc190398d41b413c1fcdf27fff1895f4f7f32004c0b2b1ece9f6802d73a0c02fee2227f8b6476282419e59cb7d74611b95761ed76ab9e286b7abd2b8f794558a7c49acb9b014640cd7d3ecf7ccc8d580f37659b18d2c978a86f468c6aec042be808f1a749d2ade374e373d05c27b3ddbd332f6216e7c40e72f327147e4a48d4605ca40e987bf26486946cb87ff25266cff9547b8b2bbd4774431a0aa1f24725cb5baf1e62ee03879bd60370dbc9d8b6da8e1055b7c462dc62e06eb03bd74ed76fdd18a8daf085bb4d50cc2b0b0a0a52fe069f7720a86552a426778cfe750256fcdf016a329bd314a6d19451c5f1e320ac19e2feb8c89b09ed988edfa9808cb7e497cbf7d81c2101d387691a705e374ee8bfdaa03076072b07f1759d46038c1d9469bae46d51cd0510517dd0a977267f0c2ca162a899b0bcee1c229043906fd91de1de815ad1fa893c49295c4d2f4c27b581353d1920bebb4c673b1c752822071353d121e59e52ca65521cbd1b7daff218a8b39171b62baf930be198c27942449a25061be331f664610a4526d7baede5edd22d71c029b64379971e910af8305c20018df70cfa52a8250d605c1a87a728ecb5ac9e55642a90a63558024e4c9b9ea673fa3e3a046885a1fcd57950d72774a83a969b41c88d5d7b72b1d8558f2fafa3df90e414689a1cb4270e695d5fd30ebb049aa9061804010dc350863cab0f5240c0d351bdfd366299f4137ca44ea34b3ca6e012c4d424fe878db42da11e5ab507124a691f2624139668ae6602ee1d638a6b464a0a30d6023695e5ba5c203b6cbb98007229fb81bc8ea2d25563fc1e5f642d03bf4590d7bc099a1789ecc35851f308f933805d953c230b8dcd397a60950c27ebc319a7c56456580e51094ba12b1b08bdb708aed04b76e88b4b938e4b9a306b75d054a038d210754fe68c9a57679d2d33cdc62476fb654b067f71539d21406014cc69b799a2f97fa7c9bf9d58261286b356ea18e89a53b591a69b1ce27816f9108978ccc8901f5a23409580216f053a360492dfd4cc4567f7002f9ea26c72b6a33710232bf47c26314ccfea09f8ab1943ac0df70e1ca2202d9e854fc08bf0a993c38006f662a75a27f77248b6f70fd85ba4a2104ae0208b10542a94a586006ea86a154a0a141a315bf97fddb0223c2b5b507ba6f24d25ca2e8bdf824ea4d0f0f6ae51285b56a5731f11011539a824dcc24105f5c162744b5dbb6e5431f95aaf08174f9d02b1e9eefd513051f327d332821917324ceeddfd88a8e30b9cbd199a3a6d6619652d924b7c363c9dce01544452c85e2b1a137d0b802ce6c24d951c8808c892f04f26578d1cb182b741da8158270c803c44a5ee6995282813074e331c73dc7f59e118f55488cff258ed32e57eae0ed8cc82b0b8d8e6fa3a9bdba8669c0386cbdf4d51d3d9b6b6ee85b4d526e2cd8fa2acacdea13e1ffad1f5095e087af9a4f7c98a4e6d5d8efe4d15bb03023c3bdfa654ae25a7706dd8c554d1ebd85a5999a6745e766853886b239ba044895d0fbba9dad3a02524d5622d107599ec912abc3bef9ac278297b995a86216fa409d52eef9c9a2d8c9e9f91166f0091c00ecdfc53aa88264a0bdbea56bf5bdde72d4ea60384227a0d0f05e09a3807bb1d83d30e14c960effcd8e0ecd9722bed40c9976ddb7bf78cfc6f0c7cd32d503c2760b735e7178d3d5db8ad4eded23ebc892d9f9c95252bd809f9ad4fcb14e2b41d627270b69362fe8d4ac4e9add5c72210e25b6a03302b1490ad1b40cb4786acbde23ce079973f142dd1043adcceb18d84b5d290d1fb2caa8ac02ecce824a6e9b0b94c6ec950cdc68aae56b712131585e41f3bdb27b5136984c1c32aa2a3bb3a75012d2cf6dcde95e636fabc6423875349eae95b43a2090e31cd0f9f2e9cf7e700901e4597cc2336758fc0fbe2f4aef7e3ad0e790dccb8a94174f1391945603068a16a2558bb76e42e06b6f9104e06a3256ce6c13b44a2c34471f0cbe9e2ffb85a15d672864aff6ed8e6710391a8134369993281b9b7cdeeac43f23a54efb71a4449403716b2f26e69a9b4810173248071250b5131b56ce47cd41caaa3d50096fc5046d0785275906172b546b05747d0efabbcaa85c3db784c9908ea9215fc39f98b0ccd6e4c389436b70259f35ba155567457cef71478d65dc05db08485253967d62cc1f10e0c5aaf6d0982421fe6925d73a07dd474b476609d9fb63b9f6871ce092c7b04f0f6c013acd9e891874e5425b08e78beb0f12726c25217f6de1e0515c1874b1ba636b61b737f62fdc50a782470b0249147ffbf3a732291798c614cd3e47812c60501d6c8fbab24c07153d8c4ed5aef3e9f2a4f7379d2ff03b69fd1672f99a74ce00ca196b066113d07002ac43fc38c715080f8ff8b0c850a0ad5817be4c42c3d0e69668889bc5c1bf8ae2d1c377db27bf92f85e522ea64b1c26c3029de5bd1b063b7de6ed4a49035daeb1e2af8577eca4132658cc979d1a037e3cdbaae8596d417743d1ee1aa5f9569f01a460b55e895e2377a082be5eb89a14f4f96e70ee5e85f34643cafd7b42a683ec97e4c4254d59f0af8b52583a87866fc946559c4957eba08ee7ebe2ec51a560162c43d76ed2ece053d59246762e0041dfde9ce67a87fabae2cd95c4d414f4c91717bab4830c99c212c76ae2b9a801904b858b89657b086f5bce94d19d6dc673924fc1fa694cc4df2cd34e6b1fcd718d3c12a067b90f7f8181b6b3bc7a80821d7d7431f34d65d36a372ae3760c66401ef32a98a6803baa31195f72fe508d0426655c051784d3b456d2bfaa441352236436381b9d030b44a7e32b162174e93c743199e461e306af6610d9c7e4367b4f375281d14586df7c38f650fe8de4ea46b1048c99932075bca4e326b8c49645213582e732d01cfa4cbbaf571c32b1bf87a01419aa76053d5bf4502fd3994301beb2aef693459e55c8320a903276917818261a7ce47cdb9f6289e227c63d765b653f6c0487f42078c8bd800d065d8fa18e45dda633621019446ffe2763361f21df7f32ac205f9217066e8b5d60ace189c856787255fb064fe4c970ca03bd25be57053623c70a318ec92659f2fd29a0a0b43d3d4e591a4ea0578a2f5b95d2b9b962714240efdf0160fb97126a275dd020a4b804be0603551edcee12e8d110d013d3918e9530c58032b418ee513c11eff44bf93a883771ed4471825e9b43b34964d6cd25a5a196cb26fbbcd278b0758e573a4f96ea4480eeba59f9196e0a1ba3623fd5e4d745ad921ce291610572851a4495774e3c47923f9c06ebcb6d30046688260178540c75d5d3c884f5c753762fbfeab89d0ac1a5f1515344f23b347dfe65f969d46525b4a0fdaebdd9a0a22bc6e3a944155344f44eec16153e9a68ddcc312152a79a593aec6cdcfa42bc47ec93b05666f6a6a1be4c305cac67d0348caeb39c4ceefc01f5d08407c3c590724afa34418c6e97350e4ccd9ddb43ab41462ad8f59de0a671385b74590b816cdb27123142108f5f78ee98e3134b24118906f6b6e9837bae4a475f9f371463891c04d9837e4ff3ac8def19d6df74c00f2cb355a40df3b4da5348bea0edd68778e3c22c88757b416666f46647cdbfe7d4921ce356921aa1b19800259f3ec9f98c9830deaf665b317786eaddbcf12789e1e8768142013ef96b9cf590fc741700df791b97b5bf4462135e357a16d3992dd178bf8e1bb355961abbab0016062f78f489768559c234b2bcc563e8243b8d3d1ad4bf6e38b72ad23aefac0e49d4a6f38f8c45a2ecb0ad41c02beb385c27528766011f056e085ad01beeb776888ce964fd3101368a90d214be9ab65d0f9b92ec14f7998f1c9a7a945fc2c1a35ae33517be9cf11dbfd2674adba737e4b2f8eace698e26aaff596ea3899d8744b5f975982f38c4d5d0a6f352b9a19ed03a832691d006673402fb83879f707d9aec0025f304f04c90bbda1390f216db332e8fbc7db4bc24c77d73b73627f402245ca3e26536d02d4d623f815efc30965d96a8dca08eba91fcda7165e0e5d63342602293a38420dc45f8a6ef6a430adcc1ecc2ef7554c357d7038269372a44a4e97450b582c9e55dd9bb2820faee78f88db6ef92fd34b6f0110533d3708c075b1ac105c041f73c657c04c95a85809c8fa35d294ba28e656c7b30b55d60582df88bd1361cc42b93444908880a43048627d9cef367a64495693e099f0ea443608b2ededc8cbb695c3f9f23de8a5ec1d1f86281e16bc212495c72e1f0c83e17fa7f55d40f6056ef27bf95c4ec0401dfbef19264c9b686d859d0b07167735344b915e9e0da6b2e69288914007309344f173f4a0922a5478a2c505e5a65bd7cf984d9baf16f84f050be745450f509750a62ca22c683a23f7bb5cc7d2958a81c1a7a8c61aa30dbf6c8a94248c3f44230af700974191944dcd2a526c7dba97780871b5e750748eed0bd6bfd6351e21a2bb7418cc7411b0ddefb78a03c9ebed3855c447c91428974e2a103fd103597c771fa9ec39187068e9d97eab44ab98a3eee8c3c30f03ed7722b848580243643e3b063c4bf80b911c5db2722010cf8a8fb25cb575065f89d5aeaad453da78a6e4647dee83e23624cf0ee45700d016fb51a14d99305c42d3cfb92178ea53b8f2f52e1ebb0276dc0f1242a9a1a2024ddb391419777f94efa7cc75a371c241a3aea706991da73f6e9fc1ef0a872011ae12b643c49fd50f8f241cc9f37fa8a2931d10e86573a1e6d8e341fd64578b254cd1c15ecc26bb1a7df93d1b8377b1fc21a783241f5a004ff6280f93f666496a47eec1712c27747640614c8d04a8e77fae348cfa33f1153216dec503588d13f33ccb2c500dd1f8701f68099915a7f3345002f7ca53e3dd2fe01d4d1d68e24b7b2b044182242dbc51424c8a0b0acee71e1546840937e92b998324ba946caf6c1d076f16037d5835c42e7e43cd87f3deb7cab181000b3abda286899148977bd577a71e0251d27b6a905bf2e2ff6897e66b103b8af3aaa079803afa6153e61afcc61e73ae8d11f6f213db1ed2e118e49abe5868a67964e131f2fecc05c91c61261f70b0eaaebe90395953cabdb468a71652d310e68751abfcdd72c5fc4da6ffaafe5922a7f219d6f493ef251a9b09a6ee9912ac686565523853e542c48b9f833697c97497b7874f825de324f98213c1c2a0c80211d11dbdaa31cfaf4eb65ea78698103c9d99cb6df81e1892acfc44962800286f11073096108d4b46300328677ae324996464387fb270641f56f5ff91f4877276c0bc3bdd580c50a05985821f50d1d2d94df76d5598770ca269817088fcc4b5540538e0bcae23e26ed8e109ef6f529561b98de7503ba01daaf4dea755bcc46a6e1d71a78ec757955bf7fd3c2f108a5bfa486624d4811375be59977c2170d93c1f712fd1211ae6a7804d9a8a1a0108662060bb104cb6480e161518ef10fdcbf7a58dc1265a5c32f242587314145c5fdbf02d9d00d3126b8646f671d055d051f486757071a7cf0d7aa6c6e4e870b2778f97437215f97c182067bbe96646e2488da641e8150d896cc339cc4fa09dae5f113207969dded914689de0bcf9ab796e4a00befc21390d328888bf7b5116cac6fd08faa171126870d1964ebf2e9d2acd04d3cee8a070fe0581952b23e98d491ea7d7a48df8700d6aa5936a06f87f4c448281884b40b0b434dadce673cc8814a82657053b510972489b66cc9b65cae5c939e1271a5cd2c5529a090260e3e31b59106c406d541d347c36583d570d6dd449ede9fecf5c29bc579a6a8c047b92a216302047672484167522987d0032582060e0da1069a868bfecfda1023aabdbe91ad72132f29122a9a890124d9f3899c9cebca95755857e9764448e979f43de555952bd1712cc04108bc59356af1d43a731912045ed63cd355ba8e5917c78d3055de7f766181f08c6973d440288176e87b5257b0c560ac25962bdd40248863d6558e0d98cd624988aff9123148586dcc9a25e63fe3a05ea750177720afda58e0a7144bf4dc401fe6796cf268e95f070db5d48e6463d2f3d310e4ef8317949112768c65a43ee85684349f4284023fd2e31176afd9369c647676a78392cc3e32121188700450693f024f2c8f74d1ab23ec5bbe1c34fb1652c7a465d2420c8cb9fe838f2bfc4279082c411970854ee0e5ccfd6e78863f3e47ebeeb0bb12e9f4a00881a28fd33533d1d958c279ba339d227adaa40d798577526b92445b368bd5471745b14e13cbeed7e7642486c145ae0942e193905d63bf94141480c843588bc5e633c6b6841fc1354375395e1d6839d11c4e7a4823184a2bc3fb82fcdbc9516ae9c282f3e8473755151a475dc92cd3312f204947cd2eb8cde95c466e5856848934709ba9e30871fce6d782857e0a216a4eaf4b37719700222aea041e7aec4addfc99df3205aa9198832715fae4f84844df42c094820cb756d59afef235b95e641a22235b7c6cab7ce6171a6d5117a9ac308166eed3fd62722d54d96c34a62cececbf5abfcd2ec15e6d8a348b441f304b1c7ebb772fb794bc5b0820d32255349b33ce158734e02556aa3679b433c70411393a7e913346a50296f6832c74eb657c0521b860f89906114c5f36d5983ef70f900d9802d41a3a08c1df354f79636bff8c737eb5f0cbf3435a18e4405a54b30f6a4d604f9f5cb02dc31813dbd6e61e9374b4b8d1f5e2713563db4e83f166907d4745bcfaad845df906daf491b8695903f669a35e14b4c04acf36ea9ebbb7dad0ebc96d97f6a9e6d7aa68a8837c6f8365a42f25ad8d9b804266e342a82d809ca0042aa5fbb02f579b333890b754d1c46f0f1fb2e7592516afde79ecf3c0f20ab539f5e7c702162766863890209c36bd1e57ea9b2e72face499d785189d613338f2d3067f3ac8a5126b8ec7665bb7349c31b1b6b68f9cd0787fe7d5f7e295df8c7a945ce511a52fff156696bc01244150c8c18370a123cf977c3f38b497a9559fb12fd7e662240091bea39bbf33afcf87c4db4b99d98aa487fa3ac835309ff9116a8a9dfe6f781ec80c36cc5033bf2fca1174e43383a2b15c14b4dad40cbcb3c59bd09a6c5c14f0fb83db55bc176c2c27adf5a5ee5c1472278da43f635acec0ba4771fc3d1eb2c4be97a30fba3a8f64e2d7b70306ab2a63726c4efcaaae97af574f4750ae0b4ed33238926a124c10a2a0a88d9fc2e5c5c2c4aa93c4020bf181199edd2fe823991da472f27dae2f72151cf9c7dbf43910b37230806dc841e741f407566353bf875d82b14034824e8c1c15a45403e1a9c401ffd515287e0cfe1c8728a6e9da34b18a7c6109ace3ee0e4211c93523ac121b83edfd4e5c1f0586a00fac7e7d06221787053c1ef0ba0939b4f4f4078e08a5fbd06e33431bb96b0b6c6db3fcad6a7934be97aed1202550fb6fc014c4b1c355415306256c06d9cec3c623dd6fd41d7f55e68e67456a6ef48e195736ac51bbb852b4ef47bd734735e48f00fc8e1dc334e066460256597f2dada9ddfa82580c8a643d1b31beb2d3ad8df94bf79922755d18949fa633ae36b273c4a5984974df21977823dfd4cdede96bd125551dc7ed9ef674edbb75f9367e98405b344bc55d9442d68364ca925d66881e5c7f1a350aca2a487cbf8818c4e1071a82da83f1139fd1b5c5aa66b208a51f9c672c8eab0d94399a8b0c6e98957bd3ef5003b1d11c238c0800d4ca2916806053f2be3d20c686858263c122bc22bc886374c029406888c08ea58a6355e064fce599c95d5576d854b43c521a30f17bf0e6f0f01a95daf32c4f0fbcc9d1539f52cbc4cbd03e3a0376729de415755bfe54afd7ed2fc0a8cee3062465045f88d5fcea9ab96bdbad46108f8619ee92ba58a7490624a83931df7759a529740c00481d4850098185e6dfb8aaf2cfcdf21d04d8497ca029c11cc76e5e464f7f46f2023b78467170232410569885fdcc0950cc1371aae27b5e464097ccc9d677db051ddbc829f962ff39898030169d3fdeb38120a0b6c3ab540dde04d5aaba10e147f2f6c76ba2b717251043bef2d680974e3dddb287de49356c0f46411baef662d7d98ee361ac41221a5caa7c3f0ebe787263267f38500eeeb78b7f79f2668912a501f1db8c633a6ece19b2ca35b97f39655733a988dc9c050b1487f8942374d647ee81d56618b3aa048599b17232fdaf2655a91c3c9f32bd7525aa25dbb0077ff07eb47f645905306885e0e05a91b14709ce5b2cd4a880582bcb92896df1401446662714e98c1be2871c818e1e85453c1881e003111d4fb62d66bd0bc9601cc19382a74bd568536fc4a127131740bcee58042a9603de6282d6b0cccda7f5bb9090e81dd842faf2afd6a1f16663f57d9d18f676bc43f751bf4a1985a178664f86dac90d10e2d43cffd0873481e89abbf5c8e49fdb503092c1c0f27c4cd3e08d48fa6dc91c26d3a3994315f86e9c02573fa072397daa1d77d9d6dd689fc994c0e5373da8156191f97994cb6a17e67c4fcdbcf3587b79754a0914031547ecfda050b8925532bf14e1d5ad0e954de067ce7046251246f2225be55c7335f32dd0b43441edb1ee405d615abb14690fb5a76e395b6497c09b27a2ef04d44f432fa4806fa8b2e1d36584f0b92c6047123b9b056728e5a9b4e8b6a65dc4c512c261a963c760a549c02a0b4f36b587d0501b714dea7073402343ded9db5d4f32d86c659ea781170dfadbcc1440bd4d0d6f967558485b9062e0221a9afc828c794bafc8c362d5110f299a1e2cf986634f53b91da1b3897739ca2793f8b5b1a80bc1fc171e96900b5863acb9a10eb473cd89e8c4f0b77575f5a3c77f718ef3e8d8fe18a10349a41f7532898f5409d76f113b445f0cf9ea51cb66113ec9903e6a3229cba27f31fc8708db75a81b723ba5ed6ba27e78d7614dabbc4a839e4145e3978efdc018a796039092d587b2f60d68237516a493a4b6972441d4e2bc0a607521cb1d56cf6c104807f4c0c4040e2cb4bc0fbbd57496cd798a4ec77eb83947f2ce49f573f6af2f9469f16595afc7b84f70080d12a7cfcf5ecffa7d5165c342c90057dbe75c01cb8c831590526c9145dc4769afc5fcd61193722c5dfc89761cc07642f199088201c6da7aceac1a2987eeab2e5ca9d2b0e1ff6bc10098715ca9f3d776bcf4c9b5e1345736f65da0f17bb1d9c1cdde974f38f857fecd476e3d614074ca8a44160e63a6ff24464a3e6ee854a24fb2b03a7b16ae667856b04d55179d0da9758e0c370723854d8fb1e568fa0e2ef72f4b10e2134b7988bbfcb27a55857df25e3a605461e5eb2fd4245fbc6fc422d4a63346a03777e296f4134dc6c756170fc218dec5270e31bdac6e310fa3038d40347bbce9852da406ed6d88c7d514324b0854d47272b2984086a3db454a512be7ccf87111378372c257447d8965bf1597380de5300c41b569467cc21b019ebf34f0655c2e0e67146d8d6d0318ef4b2820f1e41c6ae1e1d5c852b724882deb5bac8f1c9c0779ad78d72aeb6341dda394392b330d20871a29aafee6e7b1aa85b1b9367d23f2e9414c41b2d848865d18a2bd5101611fb55a6b834047516a16cf978055a2e62c641ef023845dfc6afeb3ed72ceda20b64d2e79388b53b0d572478b84bceb1ef21860a13c7e190a5758302faee5e50684d5085187c90518489a91fc49b273efa58954f853638a6e92dc3b7be46ef93245a07dcefb0fa78f0008748ddce928d4b3a67b22c2f50e937683b02e48e99f7e122c04f2ce36b0d4ab1f9d205ab8de9bf664c9a75ca33992b541793cabeb7f44a1a0b4966d0e6c2de08ae4deb872e2869050d26f418082daa0bbbf0428082e46e4b6a9a0fc4ae89e9d6c812fcb8401833719c3e2b54bc0f2f02422c76d5d3e3477320fdc657d61498a46516ccd28e394a946acf7961e8aafb20a3832b2cb4938010413809105b135c2d668f998f564cd5d05fb7cea89391c298a6470810b5a174dae137fe4361880b883ccb21bc56fe771e84eb9bb06c9264e31286b89457bc21e3855b76bf177ebf5e26b8f7925c14f6a801570e3bf1956afdea2da4452fd05c399888354e2885eba554dcbf484390d447d7c532d3e71c200a5e1a2a915d48b0b0dd2855049c39ca15576c3b948cf78382cb0bfb053ee08eccf4dd8d12db3edaf317ebb34d673775d7f343079374e0ff7b7b73697443f13964c6b0215cbb2eb6fb1d0beb492817e3b81558cf6e0a4a1e0e0940e69e293d6f4b586d4fde8a7ec74cbf40a9c1473da8bbb610eea6f530837d939f06c3b087bb7db3e023d97142c3c360e3e7403869f9746c26f096a48046beff5310a29dc49ffb84b128241ca4902340d6698938d6098806d745344198379ba07780e5c1ab93e3f5df5ad05603c6a24f43f160ea687f1143afd44d3dc845870f40851dc8a25f2b1f8c80947c6922010dc7f4cb5ec074c9457b0b923b83210e1d879d56b11bf234f76cab29fbc426b3e6fc30cca5c91335802ec60ba620686e1e0abe574736f097bf4ed7d24a6fba444da1a124107720e1c995298932e572dbf50167d743e30b9636835e1862c85ce675be7296e407c76216f11c0be8c2c2e05fc51ac84f2b746174fa0bc8fc569fd114436cbe968f166f19176dca8a86ec8e214d976753fbaeea39f7c0578732e89e5ede113dab4b13cae5e675e1f98d32a68e8b80ed41c14a29e19adb0f3f63ffabb1973f238bd04453aaff69577180c9fc57a4eee89de0f4c51e45d3a7b273e35700f51a9ff605c98038fd58105ddc6af511354f6aa5050c28dddf28dc375ddb86a13901b03525ae22212bec2638986db8236de27ea66a1ca3c9ffbc2945e2083e4e05db08e38c27a79fa010372caebb76441ca49bf80e6252f9e00e9f895fc2f144f019dff9fd4fa6f128556f4afb7f35c989a2d9ad48e3b12e0c6931d761e7f8a6e8c4e1683ce6d8e5712a695ee64977cf7814ade927c3671b25da2174b3f366e875632be45ec179fc10110e574732ea1144e3f65ec1ad9f76d2b0afc6489e0c00e9c635544e8dedaa019e6dd03abf2a9f51c13d506b728e1005c54e23122f54af9d8014862f5d6c84772172a2262261c381a60036f926988c9c9199322c53cd61ab950bc67dc480ba836ee5e35b06411e98240cdf64e76ad5d52d394a3f5ca8e80a5e4df017bac917a1ca774398c3fd8072296ee09b27c6fb8ea0bdbc11af36634d987550482c5c75db8ddc6733e47f05981781a14dfe092da5343f18505a5c9d1dc52f5e6fb8e849d490acf0c0f558d1f389c0373531e0862602bcf189901b0fbd442db496f5066ea8b8a953bc09cc2acb512eebeb6d12792c1f62e189156d08a433ff82f43411ea6a93338c19026396c7d0190bbe15f149449d18718f003af32f35370c6e0028b5794d9d108cd4d70ee861864b175aa905955e39638400bd66ddf442945ac0ee05321c1fe4dc4446e343f70f6e882e950b6817414caf2d5a61aabe16288dd3331b60ccf15f62a7682b6008d2777ce3f0b5113c6fd517939beb2c5f5dcfe3beb3165923618927738773e05503f263da54890f626f29c79fd8d6f69fe8b05d29162d06bca5f17b2dee6d1aa16037995e0a8387b48b1d9cbc272905c102f78db8bcd46fba6c505c1a80f7df0e65cc6147babe222bbcd9d51ee5e9760f407bfedbce9b1ed4b2fc0d5003df49da2d3883eac91f76b34c95f767017e6322f094bdd524d0c0e5789f379c7856c1c0c8c80623b91b43cddc6c2df2c0dd4a2d323a625904e8bfa5290e997f67524c9ee40e612d08ec9ed1303e9dbe612b6d46e2ee6c8fdcac3b312d0aec9db507ba73525c8990f1d7021332bd6b3934c14321bd1b72e602bcf098ebfabcb50e43a33d6cdede4ceb89116f3894ec4d4ab8de58f483a8164dfb7d9d6c5e4dd17738daa138fde5917177568f0ec532fe4ce2e44ab51609874813d88cbbec72149d2168dcc1bbc90823eb7cb4c4785a73f100592a8dd3350b20c6e5bff04c657ff982545cadad14a26316d8184fbec64dd11f798455dcb5164dbfc013334d6c0ddc107cfa068edc5f79539d88f7c196a552a550ba660788f9ec57fe14f82a5ff02a2fade5ad368cf1aa0576d2081a593cb10a80c234cbe5603859b7978ae458fdd1c255f6514bc6b2ff7c002486efe42044ece791aa1ad2bd52748b03ba86f28258305e11a903c923d591a7f1273a5afbcf371e72cffb38501d6968ec494c76dcae7484786933e02e0b38dd81b785c9a985012af52c0560b914e3eb9bb718c009f4e8093714815b04bf451061443321646fb9b794526e99a40c08081e081708283c5ff3355f5d87bd6b695a359af60182d316fae2423c0875719a0f99b449732139cc65524580edef2ec451f3d268caf15b1749a4276150860dc51a2247a3bbcf1e8bf140d31b8fe1d214e226a72e8d1fb4b3dd65be43932c0eba007ccfe717b10915da05a00b3c0fca32e06141431f3e7f01c9f2093300be279c347a72190de7ca4d5f112d4e215e69dab6af86f04d270b1f8ad0159481270dec416e1b0ee11298f4f3fd4834053e184e5bcbf61bcd617ee3b149932a97c9141b9d3912e541e4a442a23c0a97adaca8a8f8a4511af599cc079a9a3a9827ffa75e28bef7a229fff7e28a0e66cac1ff421186e77328a23eef85e201b63857df85e200b63857d308533685b05dfb409b06f05139802774d416276d83210c1f5c464f7e237283f698c35c7643d161930a79f21757886c9f45b6bfcb668ec300dfffd21c655de6329781a50ae327999911237cf9cb5ffeb2135ab42df7c3d5b32ff4cbd5cb19e7fc7d39eb9864262d1397c9cc24c7a487896cfb9bd498c44c8498bc4c6e485a242d921649eba5bd4a274729e528891aacb5d6d24cd2922a6fb9c96bac0713b5442e37bd7404ec9e2149cd0e6d91a445d20ac08f00dc10d9fea1adf463fb976038fcc0c18744619854d56cb3bd8aed8f5293aaf9263eb6bfc9cb64874cd5953cf9cf1d6caf47c894a8454f1509929657057d228516452d1c7648d4470687965465d4e70738d4ec4f122633a9eafe8386163ba4ca7b500e9396e7830a93aaf95e19562b514b5423551546a61441a26a0ea4a9aeeaaa0926ad7b5bf28a66fcf303891225216a7d9210ddd020928072e017cd38aada902651121c8fcd89a29b48d27ae9ce31ca14ad16a25b8b964c11626e12131ba2cc8268b1ae4cfc8a7dbfaeba30857b5ca22d3285e3429e0fb64b2acae079301e8e1f928aba9229dec1648abf090e38d838aa9ab4b6bf89c9bd302c19936ce4070bd9d7fa665fced7a3e68b7d42bed777b3fda5bda3cbbce31d41ea52e518e34af204084653de3b28b6fd8e3cb6ffe7ffb9a46a24e7786fb0fd436cd0e2d7aab17a0343494fb6c67ea8352a39703e75895d971b03f9023771c71abb638ddd11df7c32dfccc772d3d7628d2d256ec8933f0e53cccd71c7af0502d938aa7edfbd2c641e17082023b5a46c39ca4933520ea90749468a9184905ea41bdad23692cd5a9b8d8e5e14d6e894c8810b8b785b5454ee78db9ea77f1f6b4133e8f931a8b5dfbd2d91c0baf608e6779f2d2d82b3518cb5269154dca0394a3f86969ebe49aafcb10d472516a127792258442516de973c112c996e9f9be4094d6a4955b8fd493522de9ef7802f8c412fdb359928f8026d385422f9d17ba51f445f12d62879d167921238c3e207d2933c29046758e02779d2979458fc60f2256f12822d7a2285608c9efc41193d390b1c5ac0594abe2404494210e6a24570a4c5517864046fde0f8137e869aae451a1f7ef4019232f1d35128a13c4122a7d4500955280b797df2b2939cbc84853a4711c19c7919134122a813469a4a93ce671f44a4a48ac8dd4b236f9c3daac6bfe884145b6545f973e2dfb604988f98136ed7d2309c82b0cc124763bef1defeb75f77a5e77492d94977d6d7f92ebbe8fde9fd4c351b3ae46258ea50817902ca2c7d5da9cc5da688a84b126d95c905ad444726dc7a39f5bc728acf1021651486add92dca4962580cc1211f297c930695bec685dd6c6bed8969db13decca5aa9724a7900b77d29495368ebe2a92c5536e48babc26f632ccc76fb92431955d9a2948db6a89233123225cbf8c9c150236999655b58dbda50b43788d831af328ca3aa18b678dad3dfdeb0a3cd61632c8eed5d685be8c9336bfbc7c8cc92aafb241e7ac716738eed17a82cde9c45b86db12ddd9e9ead1ccd79080e33ab95666998c6d1321a46bbf48d9ed135dc89edc446d327357a42853c9962fb4914dbeb09907d12abb31fdb7f14338249157ded118fd18e1ca79554ddf7cf631e479af268f4e48f692eafbaeacca37934cfe6ddbc2b2847294729a5943bb19dd8b8b07e0c3fb1d193634d02c0164f6cfef9b4fdde7b2fa594524a29c7716ad09ccbc4131bc8033eb1d1d428c7f6b7ed68a13cc63d7add60fb0b31f74aa6ccd21557b04ff0d9f3473132556f8c585235bfd5ca91232606070ebd1a9d418b75b6e278b87b8675bcf9c54dbe0af3e826bdca2b8ab1d6249269db6646348eb2bfd22bbdd22b8c57302c99191c664653188173ea1cad9c9da53be7042cc6321e7c4f40a6cc37808a297b5f060dc0c3c7a33f201d028d8c46df287bdd6884af1d8d46539e705c96b55a6bb54844eabacef3bc9cbf6fcef9a0c7436a211291340824d22323229168341a91909094949490482413939bd4393939119d8c9c9c9c9c844e4e4eb277d2e17b727262eb09474f7c0ea0560bcab8b89bd39b2ea41622520b11087a3c1e11a98588d44244128d8c88449fd168e4198d46a351e5a88fa634a9f65e8c3b3de77b3b7bbec8135b88e67c7065071d408fe7f3d1201d44245028343222128d462424df2c9590481f297b1d8944229148539edc8b31ee3a93e77939e7ef23d9d173ce39e7fc7ca8cf8ec39d0034e9d339abd0ae23d9d1397f2018e35f9d2ee67b79829e39f59c7382a69fcc681a84ad558b3e7ced4807821ecfe7f3d15a8340211d64acf0e05b8e8c8844a2d16884b1e7f3cd9f932b9d404ad9b669ad4119209fd42312b13f7ace0902cd506864442402658c46a00c2799170402658448369a64a3455f1e7d246ad094869f9b26c94917340da5ad1c2d11f9c4120472401a466c200c848131d0e6a81d1007ac6d7ff006a4813aa00c94de62b5756981ddd85499ca9a737a0b4dd51aaeb5f9b5566b34a51d7663135346e794d3ade70395a46bba465b9cab307f3eb0f419e1a2a9efcbd1840ccceb1b6d9e9b27146d9129bb7d2d9f8ba3c05bc4daecede2b837ae8d8f28b4b66fbce1ff8d52f5c911037af1c3f111793c1e8f67f42af345a542ba983fbf50633488f9a23a41baa0a94ae3a797f9a2c6481773be4c060539a4a62261b3e9f70769f9b948469cc8722bbbf22ce7e41eb926c7b290fcca37ce4dd05a6b2dcbfe94924469de215315264f2da9922abbc272144076fe917d6c4f79180a2d05a90b2f06cfab9d61fb7bdc18646a2cde13c1d285a2c661bea1f89e8d9e6a744aeaf148461d7a61857931e9855e8ce5281967c0678aedff999dac5c7bd36173c26c5a252868ee452fb69d880dbbdcdafefaa56d1c5573cef79ec83e48608cb387b327052daa6c1d262a2551f5a513d8012f6ca62452e5ec63d455519199b1a953708d735cbd6e605cb3356d266bd24cdb61dbe10747a92280547d5cd6a05746eb7315ced14de274992b99e22efc396e96a3ac8ca3ea388e302c19177ec265415012b4586760e0f7b7b6e27be9a9be5a60106330073007ec01cab63f5803c64021e00bbcd9feb8a5bd968b5481eeb55ab885334887e6403a4edb1f744995e52c7882d20ce1782df76adc73857648d4875a52353f00a19a4d80ed0f9a49d57db986edaf5b52d5bd0701571a0f8044551d9508b50d6b10ef8e20ecaf34558725827db1b641bc3b5a640a680a226cbda30dfbeed8370479adee8598fe5eeb4afb5d096c855e211b475db005b6c056d7b16466685ac83e979ae7679880453344107374f1d466787e96405aa67868098da29389ed439ab0a940021249c4e612ea8c698250ac4ec001388ab3088fb442a2e188fba24cc3f628c5d7e3f1783c9ef7807488e90956398b6ff6bec7b872977aec537cbbc7d9fbb68aac79dec15a6dc6e7e3819171d38c8d4c81ddd0726ad6a5c5e39f13b0183d98a0c6042040d39109c15a6b93499b74e53830f5994c94454cb4ac2d1f305138c4f1b745dcda60c515b7913acb8254f99e5a905d9028ee1d0c45bcf14c7251d00ee3da98a64c267737e5c02688435c066daa9c7d939996390a0771d485c162749dcc8ca5799e8d107a95950f04b1830e329903c309a984484621929111f448c5d9fef539fc16a6357f606ff4e0be7d1a5295278ccf17b7ed694528142b188a501b0d8562c5d9f35dcebedfc115e0797f047842b1f660cf7715a9028944a1587fece9381e8ab5b5e7571c478d84d24d3ac667193da5a15028140a918c4846d762afcb9f87342bacde388c918acac84868042a551999325f7f3ca50a932937a51acb912935370db1a34cb12f76656baccbf618715006d6da0edb0e8f28a1adb5d676b8c3b6d2b89725337eb0aecbb33ccbb3590fef3a52adaec9e2468bf9e6bae55bbe65d1ba6668caaeec6ac67e33df4cd7ddef4a419076cd3137651c2e68d3066b60acabbe7561ecb2ae3caba08cd216f3ed465396873cf9fb68f96181acb6685ddb6f56c64d725bd78b7926a9d3b0d6722ccb4a2648d0628ed518cdb26c03dfd8fee158e59b4c71c11c75e3285c7bec29e22d639f62cf63cdd0b46a5c1facf30f0c5652cacaaa1961ea8d46c5b7869c50092d2029a5297c71e5ac5d5d1900d1b981c28cef1c40372018900bd402d98058201ad2eb4d4bd3c6765b449ebc46f7f971a9067eafc5c54d9f17cf8e9b3e2d3e9cc4f40efdb8e8d86a9e1ccfcc2364fbe705d63638643b98030a0169321aa624bd125e42832f6e02632b3769104c8b3a160371a31c3aa66302f80f8d1365dc44820983fe2d3e687c98013893494f9ac22fdaa21e4faca0453cd293b7e8c94d2615157165f483776dff77398a091cab3d7500a9681c2d731410f40c77585911c1163dd21468d4a31e41a31ef5081af5081af5081a75cd02b66c024667eb180c4c4eb8651330b01cd812b0651339b0d93a4653201807ade615349a02bdf8c91f06cf5004c968100de943038d2d1f195d8416c199147ac880331c0cd9e08ca640e30cc6053361ecd72d992062db1997348e9fb00d75cc0a6ded101913b03dc4489ed0a0950862fd307d68e2089c2d9b38e2658ba02d9b38a26683469ad2b1d878dd6da881c8946feb984c711868119cc54023cb4d3ab6fd59dc9739bb091c086902072d8de3301eaf2d8246180f97ed0f7af1d46491b023e88064b63f4e0b0aabc9529292e5fbbe1a2d8258a01668b43a7608677324c771b6c6952ccd06d9fe336e721a4aa92d8146d00b68f5e98064404338ae4a5a7393f809d9feb9e526ff6ab68357e818fd6e4653208c9ffc65683435db1f082b82c733d08641dc19350ab030c1106ef2b78920d6f61a4f43dec097184d7d3abe13dbfee08ba63edbeb1342db9f0bbf348c15dbfe2db2d69eb98046ae25048da071fbdf1568048d94da640590b13c874d039379394eab078bd5834b0f3122448810f1f719ef2010b2428bceba994143da66cc1edc63205fd02b384b3a4bc69c4583f88baf463741c05133ecc159b6c65195c572560fa4949decf4a724c409e615fe7542807b08bcbdcfb9d6ea53db5280f70c69ae7ad32f5653bd49551199c29e0fbffed425e6131736999028eb8444d50bbbd50a5e49ee6b29f5dcaf45e4c9df138ad6c6f607c32f14ed0dd2b638485bc4d574c32693e775375ef305abe9dcd06c66436278b258ee5f27274e5695617fb2ae7467f98cb7b60a3dcd97540560fbcf1ba9026df79e34defc441f7c69aefb0bb2a13be0274ffde0a718f9c202d2051252850199e2effe201b1abf3859a08c01b8a44eb725c84397b70f41786a06710bc8d4f471236f76d841071d0c60000108608b52365faf29636971b258d22535598316e7eb66be982559fac14da0162dced78ba6b4b61c58019923c3b79c2e66a869ca0253e0388c9807b3b912b3a5b9492a313b739410477973ceefd3b6cf16623199df5c82e078cd777ce639566a97395934855958c47eefbd31a91ac0bed371ec4fd16fb6bfcbdce4b189e3b712e40b0a86c91282e6b88dc3fce531378154cd8fc914ff13c8941791274f39108f499884798c010c30994a25131310c86f32c5e79c99ac193373ec983c262b3613128bc5eeb57d4080e52c936517cf83b6879bbc4649b27472835d48e345e7d0beaceb095a54f141721c37ed5631d1b12bb9841259e0706091b03b90a89b3fdb243a7cbbec791db681ddc42c2cbbe48cafed7268a6450fe6d160de8da3841802ef0fe3386ace84b897e3d91c755f88213c5990dd79433c9dedefdddebbd15e2cf4606ef260f5e69c73d65eb6dfa754df53ebebd2fc0aca98f7fb94e4f6d40f63fcde835f411920f7a117eb0b7f8611fafde590cc4cabc6d5e3d5e2ddec68ad85b12fb6656d2ccbd26090520c7e18e4829edf18b498a9b0fd3314dbb93d658acb9ba3c41c1362c680a0428642aa648a7df114985f784648d5cd01c4375bc12bb39ce39672129926fb9819f30c5edd5618752d8ecfeeb0396ccc17da7126e790aa2efbc83cf28eeec318630c82618d17b07c3f8eded7611896cc0c0d188594d2931e0d8ef3c0cf033f2868faf845292bc29f060b5a40844459cf9b466cff0efb90af63ea75b71b8d9c6d6037ab6ec7002e94d1518ef242b941814171a1b4506c50582834db9f4a9bce2e33db68aaca64d926b3d59bed4fd39a61b9c9e73753282f7e721165a4b945a6c8a4caabacca505e5070c8d415c0f6475949d5a42863898da66e8832a2a08c25b6129491a64aa69027ff92da0eb69750b1bde4454f5e622b7995d8b67fb6855fd0146544b921535986324ad524e5c8349418190a0e2ecc346e022370370d516a504637b564caa88517998d2a146105296264305fa0d9fe55d6e2b2f39889c8937f2ef2dab01bd96543196ddb1f658ca1a0c4f634800b846265ed0c18cc390af2806360165068d1b6b4388a7ba73368ea7bfc5bc6a029903e8e2153fc6dbdb80b2bcf2d8e8c7dc15897bdb133b6c65bad28f53ccff3bc8fe713ce44bb3242a69e9e5ece22e60c66cb7cb1030c606c7f87f1540c7af27c8454759666bb95c9194b95b7e8d431fc69238a7369eeaf947748174490295e937fa821dbd8728f5a6e795e28da18d64ca6715467633c9687dd6173644f685776e59f3d9ee7799ef79ee77f0715138cd5ea7b7ff02b61983b6303bbc9b20f8eae93c964f4e422a329124946921293eebdd7e28fe1244b025b44b63f18854c819f2944aa425331050988fe41037d5c29b67fa80d5abcb25babfbcebaae6dfbcb68f8cae849091d72394afc68bb870d06458033008d90291539c2d1ea854f0c12653fda7c11c474e12f65341ac944a6ee7d91b6cef8235dcb510ffefb9229feb23adb9f4653a00d79f2bf41c361db600cd872c356a7c30346c1a34553a00bc80f1f1b6c812da08d1c03e2006f6c7fff7cc4ee2a41a6f87795205f7435b8b27aabb72b932939ec5eb2cbe8a61619d8025bf28b602b7b1ee7877431bfd5831c21419878b97e90f34dac45f65115d6da0edb0e3fa12597bd0e67afc3d65a6b3bdc615b59321726e7942b2b52d69032255a6a2c2f903f5c44602cf7e593e40ef8e51601b0e59b48182366c10d184d2c38acb172637981080a2c3714f902fc451477e9d96e2ac917df74e199a1aed1b7b6b54cf16d02448b2bd60d724e297138914813894a69b1219086952eb070b8a325c771b564b2cdb49ac0d1aab0267000e1ea740adcda3699696bad90a5001c95c31e02023bc8213256e974f9cf2f535c7c8ee37e659f369d35c42b5893e8ca4b95afc470565656aec85200a2e6585c0e33c8a49932f33571668b9b524a2a31a63080824524d5903a3a7ad0b0bcc002b9028bcd2153345a6ce7483e348f1d5bc61780692a86c4124b2cb1c4f903323267b6fc45f98519235df88b928a08b074a04579a329c9f2012ed4ee772fe808dab728691b007f78fb3274d8f0f35311e68eeef064b51cc551166bb26a85817d31a0bdbeeffbbeeffbbeefde5b83a5cafebd1c9df47b7d546cffef877cc1a4ca6effa688623b90ed47c08f3b99ba3b5a5debb6648affc7054d8323e41de73442a26e205114bc33b58584fdec49cfa9a7cefddb2530f78d814c0143ab9563efbd63eb8577f0f0f1e3b65ead188ec3c0afedafd7bd16d7191a5a12f0adf86241735e87afd7e16beb0a86f5aaa9d99a8d45a3b31a32ca5c97ee76a3e13269d0949438e47d39eabe650d60bbbc3bf6cd21ef0d796d74d6daaeebba4eef68af72474f1ab46c922d9bb8f1b239f00473db97dfef5e9433d1e6ec92ad599b9b721c255a7be3396cb7383ae8305d581fdba525834cad74ef6fc72055df4f61b758b13d0ddb47a9b23599e243a66e8c3cf9db2a6c4d02913f18c00093a9543231b1a3add9cb9229ee1f0002b4010d87f11cc817380642d81a7691297eed0dede8a6900ecdbd68c7eb2ba3af648d9029fe40d094b5b20c56d6aca4d9dab8bbf95d285a22dbdf768ec23888d4680ab4dd1bd7461130b4353bd63e2f478c68717860d7755de7d1c184dafeb59aa3fcbf0c7a24abeca29c6cb3367bbb33b7c6ee5c9dbbba34b7e5be783c1e8fc7e31e924cf1847668d917e425c3beb6ab85db8521b3ed59d8fe570aa99aff8d4b5c2940815fbc4cd8fe72a2ee8bf7092f6c6dba5842cdc7f6bf4a4815e8e7cdb1af1158dbe64c17fee2d5c1f6bf39902acfdf98db83ed1f841a6c5fadac8e547ddedfaa41bed0ef6fd92055e0536bb343e60b39128a60281c32555e4c40a5933823d14844225d6b3d76ac81a188770de44310b2ef33876ff7d34553d6464f39f3c5944d17fe20f88539a56982f962be4c176e6d362341c0f8be8c9729eeeeb1580fd6d2a2b044d7755feebe0cd3f7f37f567f3ca0fe78c0aeebbaeecb5feeb0e75d578f97cd973bbf56cc608cb387b337a3a54ce174e4bff6bdec610cc3ebe6cfd0b46ab010c59c2c99199a3bc45aeb92e737f222e94dd37bac47423645f212c6e845d2cd7692cd768cb5269148fe4526d39431bc081068a43f9eaeeb7467ade5f2f7596bb9ecfdbda49638b7bd80b38c7ea4c4e28711921f7922584225163f845e147a225844254ff225252f123d495432fad149f719e907813ebafb749d1ee98fa71b8d44a2fc41af98f7b4a1fa231e1dc92323a1919191d0883732121a19190959ab1d005898b1d676d87658095aca142aaa98e10edbbae2ad9737ec5c41f306096bb1bcc064b3e30d2d5dd775f9eb3a4a697ea9927d4a2e97aa17c850c540ebd2c0d97a3b1c8bf5e079588a524b8b8f8219628dd9b3a5ebbe8c311dd181025bbbee76b806eb9ee05cb8197372bb84721fe73164cad63a7ac69cb25693d993a3b5da8c5a615a6c781e0d0ffc3cf063424bce5e90fe782c203dcff33cf0033fafcb19a625535c6eeaf162d1d3fc0fd70008b3da9c73561a15df8a2f0f4d5f54a151c45a6ba5945285d6d095cfbee2bef27d0c5f890186351ae00263f13c112cd7ae7870d87e80fd797d3e3db4046584a0d0fe9ed29c69d5ba45398b80694f16141ad3d30b71bbaf3265027a72154893bff759059e3cf91b4153ce4d57c6c90eb410dcfcf64ccd1618e3b37ce1a4f1d34442a6c448172c9912ce19111b74e80c5ab2ec9cd373cfca65b3dd666a76583a2b1adb625faecd26efbdf786bca02f8b76578ea23ba48fe11f983b52755fdc1ad868369aedbecc5849769a21c08b4153f9060e4cf398c7d15665f4e4195f50d2ea4d4653b5489661d9b55d1d9e98221f8bc3deb036bc4f586d37f22855f84939b6ce312f661c9f7befbd1f9badfb60cf0c4d0bd4b1f5a0ae68c19355c10ab24a516bde17b6db5aaca5141c6d8bed3acee67a6df62abe9db5b5fb6ed775f6f36e386ed8f840f0fb56302c999921bc5ef3f57a590b6256a6cd80f8c00823820ebcd4a4ce0c88500e2d7a37589e652be427f21299083909f9959dc84cacd80c84ccca4764236c86414e22ebd8b9883c83ed368affff9fda6cb521b319109966869399eb3b87dc23bb783b99956bf298616eadd66a0d0dda7efee135db903eb6e71e39099a63f28d5a93b2de68cabfe25a23ed195e9917ef0ec42b83eda16877b627142d1bb68767d82eda30ccc008328821554121526c28a2609db04fd4d4578fb0656d6ac5b89329af8877f36ede4db2dc7c70c19b82f86dc95aafb5796b8c4e7c7115446a63d0e2eb51bf380cd0bbbd77f36ed7d2c0340d76a894e25bb91017b4586d76b63fccee48951be48bb985873dec658a4f21d565bc2d339c17c3fe0ee93ace03761e507a421f5a94b4c9d11569c5a53cd3867ce1bd2abb385f529eb01b7da5b9bddc815d873fd0fbbeeffbbceffb62b8c733dfe646056765b5caa0d7c1b06466685a1e1911ace65083549a0aabaf8a535bd2ba575695a93230d3823cd8626555d6b695c5ba17862523a2c1f7a9a87c1fcff7f130418bb6e57bb1c65a741c7555a68aca0cc53ac59e2a9c9bbaaf54631463ad29e9f7178af5f5f17c42cf9755c02f7b1d4d8dab4767b38415c6586299bd12b8fdc3b2ed4fc3185d7811053346204b20515ca692c6c0d72bcdddd9e01c524a4f06f1b4077e1ef8f940cb14eef10356542f1c2051f689ed997effde772b2b2b397b1d9699b181dd546c6382cbb22ccbcec482581a987d591cdbb23d6af5baea752349680d4c22ba0a49c55ea1a95a8bc06c6cf73aafab36cbaa36cbaa357f1000f101ac8954516bb5406501828579d81d74cee979618d17b0746f5996d5b93c31dc2dde96196e9458cd9c8e14c673a810bfd117afb98e7599d5686ada665235009a548de4dc6c1c2df4d9cfcf60d02d5fe37db7732f77e76ddeba699bb7a9739b3b6e722aa47bfa62a8c55162e865f6f2bd64f1eef319f07ca10bbe07bf074316ddeb2782c505e0b3d880e7f5fff079d0775df84326c2c74fc987169acaef1f9eb9e0f3d9f7997a3e1b69c8253486c4b9136ad969b1b1981c2823cfcea31db5d3e6288e4a4bd5e0332466b2998ecbcd3ecc86388c5c9b6d7f9ff9ecda1c5a5d223ad7448882c57294e3180e8269b00c7e611cdcc23d3a87d1144c3b4c13d1443411ad710efe21537e6d21d2143a8aed3020241f211dfdd5e60c7e61163d2d018b0e5be2c6997027fc099932396cbe8080c9612687e98b869c9b068962834459215322982e3c766d1a2453bec35bda5b5a7b2be7ca68ea16b141db376a36cc721866d19407b35a8eea5ec707ffd8fe380766e119ec03f3c03b30ebda885c1bbe816ddc221ead3d1e0499e23f43aa401aa9ca41902f3a5c6b0266d5302b48be89e5d061f8fbbc09023d88006632ddc462399885591f4f09ecf3910c388ee3569c32409efc7fc47c65f3608b7265ced20e32c5e5c95f04806c02488e8333c42b4fd02b56d0b456cecef24ca5c94a7394af64ea901cdb6261aaadeed44aa3e14aa3511a0aa2456fc751a5ec8354e1f79f43ac1b2a4d5669b24a932976c7dbc92ddecd4d54ae98d0920a4d7945b20deaddbc5ba5895256ab70ce7267d89eacd6644aad82bb218c9a6cb1d25a69d8b3e9388aab55d06ad6462d52896cff4af36e5e11ef9671e41bd986fdbe5c6995866f5ec1b03288c3169b31d9cd5d82e0ccdadc99b39993dd290542ce28e63e206633a942e5708673feb9d7543235a70b04774ce716854c01414f6eaba236a7a0992e204c1a4a9cd30ef1e986ed530ddb5768196242be36e8ee73d6ef1f022036c0010f48d4fdc8b0fd573e61499766d0d49c6940a22610d901899a3833b6b232f3198bc9a275a8640cf807b9756846000000b315000020100c870322a160405416a5b50714000d6e804a64543e1b88a35192c3380a43c618c40800c6000018209129ad0244c28338cd58698393effe591d967f640e121aae91d78a86dff55ef5974638076546ae99bd1de72826466c5cf3b2cd858240eac77e99ba23f809f9c4c37253eb1e3f5255291c22459d5e2790fce19786127a04dc2f1e398387747ca9ad0c333f4cdbc863cd62c4af09729acdbe5c9684997e6085dd5293103431a97d8d36596b48775cbfbd0024ef2b990fe6127608fc7a47a6a2e7958a9886aba0a93a961f26f834a935a00473a074bc8b4cf0bf575547b6544f03973abb172a6a6b7763bc59bb30d5679c3a5c12fa3a9b71706b1c3db9f8ecd247d666dd08b66f1b9ef45118b83126304cca9c4b8a5afea51cfad57265eae6f7b98e34383bf384cfc1f111bc7994b7d830635cbdeccaaadcd4bf2f57b0e8e8644579387634568c0810e7282c001c82b1f8939161ff7be9fb0e779ff089ed9b69ed0879fcf023a0d42b1e73ee98c2ae3c4840c34a297f53d63b4512e69ff1110d2e972b68ac53af758f919b46b697afffa9805615a11db1dac4c195995d090142167851ce8ae134ff5d9992289cfe048b47675fdcda41f4a53478ada3723bb67ed024232ac7b75e44515820e54826583ea898729a42b01b8de9180787d5661765491ce73e9441156391dc63ae4964393e1809eb137f28adc5be5c5e66ed36092c4480fe1db1160d2bffc7f87527263ce7c5983d545aa02d5031f5bca6c2b7724438225c9ff51ce76f0a15dc290044f64ca2fa65530630ced4374be44eadb5c23e25562302365d532ea814915281d9f4951b3259d58b627a6d8b7c01619b6c9e064af0b49321dc3eea364f9bd862430be7a998c933b541be230e5590b3ade4d51603b2cfc262d04cbc4fb6913109e7695de1691923416ed03252f1f4d461cb0f2fa5b1d3ed99691beeb66e2f4d483d4d06ed69f62a38afb3cd287a3e4dfaf3746b54ee09ffcc9f6fa855075bad0b3dec92193ba55a47052543e25322adce374ff108e0d38581091222f94384712d551be4a81660256550c62f04be7ce4b60f94b06821f946769e71c7114370ddc28fa463cc70f9ac14d8621abdee8b779760200d14b16dfd84e2e132d4baec5f1b6e73ebfe74b3e0e76e3e089e68b1ecd62a56765758c1b9199924fd8c60a415123a6defe7d67d7de7df257b6d5edfd06d2b3b61e28f7a0d1e476ae2a43df1a63f87148776e71469b42bd3e8d98813ec8b37f93a2e3d6f9f45411f2cd17962dec2794384132cf3164439213b4ad9841b85e084256ff6f4ecf184a26242ad6a3283e85b89c1c4c70e9a7398d982515e8ec9a71a8011ac2edd33a57a3f784f1cb29c6f429f4e5edac8e1daf9ee6f5c26949629b5801d7c0013340bcf9de7785ed153eeb40996a8789d21bb89306b31d7169af6e62c95f237a1ac5734aae697d054314d0c72bd330c5c7d05282f03d059d0be5d6d1cb4b15aba4b9fb3c5205db3cb378baf02817da588c3e1e25c71519ea7e8e6d07b82979666faf3f22ba57e027a64247e1fcd84f91a1d53c5a659e35babbc4e4cff8670e7563e33c1478ab0457b56e501d7c3992ab6e386533bf1bc0e59260a5c764b36d29ecbe7b29515ccd99ad75b74113cc7ed72110fd1f46098a9d416701443a06103847d4668c060fe13a94097b36874bde38114b655f8b3dafc7880f2ace606136441b5d7483693bcf83c7a6e72e30f9bdc9cf543aac886b8588d3d1bff84899c74c56c30b6e4c34fb6b89543feb1f28e3cc038e401cf72f8792889a36c98f53b3b10cf64567d477f4bf23cfad51090aba5e6ac1ce6965faa5e925f18a3a62895d01bc3a7c1522c9651a1920e88aa87ba0ca88acca4f280cf3e9ade16202a412d52b2bee06207f7244b64aee6edb78f5f63904d355b82fb3e430cc752335db88ff5d7962ab6ad9eb14682f37193e16154df2107eb8877281fc36dfe68a9ecd99c067c9f61af26790edad1aad164966a13a448b44b7de97830ba068eb1e831315fc438a2f07d03ea402706657cbf2071f551feb65fa2f05b906540feac2102dca6a89153fa522d947b77a9616b23478adaa059eb54bb3660a191564b3d4bfa214d716a3c257a2b9499061efcdff070549cd6cf1e94802d5c91b70ecaf38587c2f124bb7ffe0a2263339e17714260ff3894808a3360fb52870c43ea82e311678f8e912dc8d5091e13cca87484fa46b2ac40d009c18ee91b8422cb08041d316b6e4c708e3e0aa1576d4d28102175654ef3f00698f508981986ee5a74c0cc43df95e0779407035793e875133065e52324c435636481077680b0d98ff94573ff8c9197194beeb052c355588ea0c08a8eb0787a87ab5ea4c7e5cb975f501c2116dc559f09160e9bfe488c75dad4770d9e72f09e82ea6c118c0612a11e92c30931a877d2192e9c8962b5a868e397dceb4de90d73974f5b442bc3465b0944556ffe2761e6e6dfc44540e56421fedda3e3ca6b3a2f0c9abb3f361878cdfd6e16dcc265bd0f08b2967f755f902c0e5566e1c3ea43626a25d9bab8d94c553db44aff4130c0b860800a402781e127a1cb5ae11526c9b6a437ea045d254a3471c0140975d02503cbc888e94415ec5a8001ba078acf7f22b5287d4e01728cddd500a3ac7412d1d51587ed7f477c6b60bf9a3fe0f0ece99646b74359976bd9f0593a0c0e8680e6275fc45c51c198459228742627b2fd6d37a0225bd945f6a486a753ba29bcdd2b29f9aa4a07108a021dd43d41d1c04b14b144f042b733022d460c4a2ff53d46b1815fc1f82a2bf8d129a64337da57aa06262f35f3c2fdf6246b636ef53170c32569541c892a6d8207b05d6372bd453eaa9ac508dd64d6dab0b9ed584e244582be809c58bf11b77c1b3aa628828c821a9b22851273c92817e130061dc8c75af7a03901f766417632cecd091f54476c7a9d36fcbe6b7ef0f78a589b7036f36d1f1520624bc226662162cf34a3bb070b42861693d08b36d440941f002811cbb5bf5a1d1ed1b9788612a78e5784288a56108cc1d47af444ecf219166b17f221b8436c60e0346eb0ebeaf0cbbea7f3683c229e86d8937b4332b6021584bcc4bbe81547d53226cc7350665570059b1d646ac75c96875b7b892f3bdb82f85efa3b3e4452230c58979adb6b46a5cebd9d4cdb785b79a8e36e3d8df6df6bfc3554ab23a64bd9a9f145ae7d8dce35643bd4d6cb790087571de77930fd84667af185c7d21be08951b49f32b9e9497624ae7b1ee30e564a796b2a8d3719baff61f0a925c26703113e3d040b9789b0afd3699f8f8886959c8fe933fa58db0584d5cd6e5c609da723ab8ef60fc3c206caba6cc39dd4439b6b1a0d502846a470607c1c594098f8f3cd22a25397d92ed29e2dbd350915e4715656995697cf00dbd76d8c4d8d2d53eac08c794231cba59db7b557eb5c2f55b72fa4b7af60555ff8747befe42015deb612288bb1f2ddbe52fd421c52da26ecd7acb11c7cfcc2e17b5be18d9905584b6a5220e0820a1b24e1b655a988bf1a48233b4473d906b2446b3ef110413f52dcf5b902283e51fc178b47a87c0a36527492dac0d221844bb366349df1e4abc8909bf891784db3d688f16b5f87c17adf666723c2211b2c262876fa191232374dde5a13ef26ee7d6698fb75796d8f1e548d172c2ec7340434e278651beef17b511985f28daef089cd11affc449d2f3aa54e04b6cec53305c4b18eecd2f34788aeb6eb0f202fe2b6b7eb3b8df236a6f73b4a778f2871606bbe58152e204be1f1e7f5579034fb933a6d2e1380d2ee5f533e13452f4a8375ffa985573219c95646ebe4fd2fdd7a51801aebd790c696249466d83c2a1137a0798ab300708ec013f4844b4e00890bf5dd54f7e1c4a21216d08a3fc9bc564adae431127a056c7564cae523c752608ddc1e7ca1a4a000322b37e84a82a7679f3de268832ef50af19344fa5917cd64e53739083883954adf65c29af9eeb5294b80264ae8c4866ae3cdeaa62eea4292699aca48e7fc8ca277c96de99662bbba36f572ce2e496b67db7854e14bae3fee71856f8a2fd0034d26692a46891726d8206bc7bafa14e6a91e19403b0043f82d998651d24725f24a71a3206cbdea3e7d92dba00bdc5a10901194417c91958ee376d78ed693e9720f557d26fc12d820618ac1b1d5cb90b05743c1c7cb196d9d58c31013d1d499ab078dbb0c4e1993bb382be4dd300277c090250f2b38d0e65e0f3fd26608d55b51034088401b985282b095e3e6564f4cd31a013d5a1968a46b8d11f28251550926a8ca76c4a52c7420027c9b6a5c9a93484d2e6ac6c9d742f20ef3d74d39bdb1c00be9c4e26818049be35536065ba241c445685505d1c862c8e27f447f8881ced525a46da43365674ee30837396a36de4e79f23134c13544d3c3aef586c34bfd937c0e4986b2557cd164243b547e86f4483f17b895aea7f3c117cef6f536ac7ff4d374116cbdf9f9d1674b54afb297d623d8a8c5e9a67c62e10a3770d0acbfdd71239e78b6b3503c021c34ef4945ef1845fb14da7ef3a82402a6f9c26652165a7422070252d6b420e8969afc491f452bd19ffd2f13cbb5593c227aac6f47aa7c87f6effa5acd18873cff974474c60bf3483463fd847def0cff8404669c03e2c3d7ff53d41a23f6f45a66246414101a9211a4e8cca0c10599ee0ec37a5dd0730c6c6f9b2c406d1a1928b623e88d100f67d13ca3ec84165ad2b3652612e0a799b31f82de248530e35c6cbba0534d191a3e3ae82ac47a82765e48412741b14824e5b7811c6c658d550710a0e2bbc3061121fdbb34d619c6f87729d704035c4942381fb4c380a856cd14a601a7fa948f9b5a54f33c61c015536844bcc25431004ac7b3d0761ef207810150fb8e5ceff88f7a94e4651484e22b6905ca842f05e292f868981840412519918283b304423c788cb8dc27d7bf30128e08f8051409fa8507e974463c134b9ff85661484be44455187ea632d1a72a02ae6672daa93523a90c491deb57cbf806b8c5a4df806ca64d64b34831a9bd214d873a3c6ac009fd31e85ff062087db1bca9867fe752baa2d79757d31a5a77126f5d34ffd95208668241b721887b1fc622a2f0bc90645f7a50c9b22e3368cf662ce8ae18343f18c0574ff3133203da6ffe60896ea1733e6156c5d9500f7bfd7102d3162adf9b8361a129adc4ca037ad045e446748032cfb791c6d097f33599da2faecf09bc3b723b95fe02a7e19724d72f78e929ecbd300db8c1603ad6a0ee143a8f454838697825c9877d430b850a2681e783af50a435aec06893ba96441facdc388adb43351408eb19d0a51b128d8fdca4cc1cd61de3e00800fdcd09b746c1fcb0765134c4d4aa8f77f890e0cae327c5e43ab1baa80a3eced5e75cf3a60055925fff78aef7503897f57248bce38a3473b00b81ea4615dbf2ef1d8961e748c1460b1876885e23fef95040fd42b301a089630758257bb5c5bdddf5da4a2b68fd280c5dd17d26734807d95e35f4a508447a83811c149bd2aa6e8418889875d94360f133d8bdee58bcf77af15d543baf15576c524d387f798b64e1b5205cd9d0bb26c0bc3301be9c4c2e44d7fbac4ae3e42bba41c5f729e1d5e388fcef2501a66ac8878dd21360316d94c9843688c96cad499ea16c6560424cdabe3f082b2d6e0171597a2e3cdafc245776894054d644092b0f667c1d3e24f8aca00dce8b4c7257260d6dd3c49ec99ebc1bf15354913c5e8ad29591ae6d3c886f1a4b7e026274c501166f7f51b276f6e72c49ecb467e5dc366fa722039de2272c6f3b1f054573455496a8e78f294171d24bc42f0232e0ba47c0930a36e5b6a2f33f2c2b91e551e70ff04d457d5e7e3776c1f425e7cf4c01c2e79afe0660bbce7d156b6a4d342012d599404ab1fd1b5d38c9f1518385854b81ccd3435461273936df3500be3590a775eb1731344e21d90d0ffecd194342e2e92a36689941d30b8d938912b319e0e389c9c671e8d1c89d3e5004e6172455363f01a747a2112a26ed80612d8fcefe509a4d3eadfeb64554eb1608ddcfd5104a8df38d8ea8737d20d76505e5fd4fca923f5bb8f48dea5033cbdfa68503dd495b29541550e619d62187c2b00e51e26aca06e1c7cc8ffa6293c574caa4cf733c9a0b8f305f41feb3c238d3fae4713499401fc6a4bfd91ca41dc33878fc4b2819d70dcb4b4b44ec4580417ec8ddf724f9e8dfdea65dc89065794ca4f2f60c249f354b77bce96867451bd8b443c40e65527f4625108879313fb57540ed04cba855f8e682c439c527d416183dda2de0d223ac16508a29d58f228e936ba2390a358bf0c8431ec6228f70133614b042db7a980524bb32b44938a614d6c1f25df281f429c865d806c856fd86219e3337169801f438ec8604353485cc84dc291b6945b3b52ccb4c2ba5d86be9c03c1bd7f8c705b4bcd7ca36ba8d9fb8e7221e7f4187fc63fcd4cc33c95f9959e70359642280ff53101df41234cc5989e24638e392fc521eec0b531fb3cc1ac4689619bf19aa4e5a74e9b1d4d2917b31c10cfad644a615a57cf1f90be158f5ef1cc1498a6441f00b7b35d82dd73e112411e55d680ce0672bd29afc7d37898326c95e5837c9e471ee073852296fca2bef97c0d7d473ee4b0edfc2483a6e00ca733795f43008f373612f5ac12d4cd7e07e8782c1b03a993ef8e4e7e41f574f49ff87d9768b4cf7a7c7fb372136c416022b3ccaec0bb081d4bc8a37309a1c5911a85193e0e4b2f37e4e84f8bd8f5f3ef827e9aa8e3e7fac167f5cc5bfb878d2858f281a81feb45f42b5fa1ea2d4abea40391359ea0871abcc398d6419b7df1690fbf3973f9ec617b626ddcaf8233fd41480fb2a00b77962d50063b82a0081dcd2599a56b1e84312344d814793547f86f149fcd88e9558004275ae22d58a8086fbc4b22bd05bec06a6ac31683e5d0635e76312159bdf1fa50961b89cb21fdad53494671acc962bde8232e923828863fbb738fc7eb815755483a5418dbe6b0453d1baed1b4f788b196b378525710be67d63d9a199c3499d864e2edc47205335d179f79d3a2e2de4cec4bf3b935e95dc441f12dec531d3965207838e1a31ef2e2375dc677712ea969777e0cd13646c1eb0b4b27b9be7314f0ca44848b70ffa591a86103d024e2a265c91f52ab91996ca8eeb976343505defa53de920751ac8a7860033609046d6474eedfa9b28544ccde783642a2e83b462debd4852716be32e51200ce98eb450aea0033d1ce0e4b2ff404df2629e9b939618be46c5044db3be85525754591c47b3450f2dea4e8585cd20c11b2aa67a56e1501dc70efd75c3f2f474ff730783405aceb94d528f8ad99d8a264956d29eff53cdd7ad67d3b88ae9357d0231974bfa0b49fb11db8b27d3ca5ee5b1309aea1ba953973a75824bdfba1caf0fe9988a692ab0de77203190deb68198205c5b567259869e8fa45c5966b77083b38a8cee5aff48b3beb2d7d77023e5492a25e82b6744351521ed076ed549efb04856e4a390caa29ccf2b054a0c92301f77e2d646ddcad76ab2c60195712f9f4ac0173ccfad8050545d2218f4a4c0fd9d9c9197f7dc6c53a2311c58db49af573785c8a31ba5670afde1507f62421e2ec50854a920541e695b65158877303c23b0992dd06d6eabfedf15b020096f99f94caff0cf158fff9dfca9b494fa64a2691585aaa9730fda13da4fff2aa58442c54bb123d33ce641c89bc5e497df5176eb6f008f4ce8e16d3866ad837675fdbb82e28df03880af389f80671379af10e747e66f257be3fbcb86929d1f763eac21d5198f621e93cb06c3d0fa3bf6bc7da5e332f111ea320431fa7cd05dbdec79eb41d3d6d0f593381080ce203bb92dc08d75b4ec13930070b615ffadaec779d6f215454fafbb35c2f2bd61be9a71bf4bbc1edf1491008ac68bc6366313a44afab823f62cfc4c211fdc7e3b4cc559b17eb6f5c6aebca388c1e540d961034cf67bab3f32a8b241cb12c45b021b54f9c93ea6b6b702afc1d5479eb05472a1cf85281ae2f32a5f5664061eff8e0104b70295db75c02197d0e84c298f7de4659f7770908ae3684f9f1c267ddeae453aed25475ceb621fe9f3539350d6494f8365b4a3077fe09153126bafedfd2d3222ab0e4f20039323028f0b83a78470d77ea3f8c4efc8ab6cbe316fda3148a29772958b2ad786065d2c5bbb51e6e3ba48b885b36f9ec9480671b108783d5fa6708c0bcf45028b49052767cc9cac53f61eb335cddd23ab7dbd2923b5487044c530deca9d420d7631ef360cd2fbf495bf105517efc01fd6a3f756312a50c080672eb16f781a8b0cad685674038e6fea90a42203e66849d17e60f2e69b5c50ce0aa26fc0efd00ed519106bbb19760b1d9a821a461f41f15933f25f8dc4ff9ef48870bd78c15320aff6f5b6f6419537b183a7534a396ca7b75bf014cf9e9ff932d279fccc8f10cde89a89440cd425239f06972c8da4548430a330110be5164d386268cb98cf928fc7747c63af9a9287888d4c6b210d914d6f051cccf3567ecb613d4c364576f8984d9dc92327e2863addc59ebc0e96074830ec5f5f57cde3b8491eba89ad30f98c5ac97ae76ad9f04c14d56a87f8aa6c60c0f8306e655402025188948d25b4aee5793ed5f6f8b179b6ab5c5837cd7188a69a1704a6866cff105eae9f5d5ce77d21f2c58dabca4440f374fc9d1e94f2bb204afee2eba09a8c5bd4ea8f506139132accdf768090c00a8eb7ac3a984a92ae6cebc65d6796744240031312a4e4034a695b876af2d142bfa410ce6f6554cc7e65364d4e2224c64bbc3b78ce672add79546625efa5c1c4d2d8de6e3164d533c215e66b26262fd7465322745c12825ffbdc79c0e3303e72baf2b1365cc47cc9ef946fdb0bd3e199a390346daf01658ebbd3da2fd1e7c18dfd73ad2a20eaee2b6cf5f28502ecc90ee06dd2cd2a405fb71e9185eeaa6adabbf3681bc3958912920762ed18e5f1bfcd9c007895b4a09e1895d57e01109e54a6050170419da530b9b54aec5908dc3f61ce8e275babf0a06b83a5d6baf0bca2fd74343b5921ea61bcf4527781ca4024d3806d46160eba7cc51a8440e1b1d65aec3d0525ac9c7adb651932a14f00e5ee7af2a0b4f1e01988a4db1f3d3ecc78d0ff6a2d0bee6c79907cc5d466183a64701fa5bec105038116b85d756df849cfdaa4f4a54fee70caab6c0fdb70129a113297a8e01502ec25ee7ea23c592b8d5fb3dc9e4327276b9700496a64d9230dc5844a5ab1eb4f6838b496a5185648a25ba7c243006214096906254e72a4707827ba253b1dcd783f04f5f60fc583c82e02235ab84ebc19e55dbf2de7d8c1d555332a26c13e297233496fb353d0cc3c98ca6fd5b46857d227e2e150dd5b61771e4dfcb14afd43073fa2a009dda0d68eb21ec2b2082f4fa061ea6a83673a34788840cc6ac7f5589d29c8d7c9eb14a3d4aaa54056f85051c2fb7c40aa48c028e2594580a175a93b37f55ae8d55b28a90b0a791c6f103b34cf704e293da6a6377e10975bcbf433e8860eff5a2b730ba9bdf23f3286a5296ed119b8cb2b837bc9de2455857a895eed2e4e12481c2456232b09026b2324f089b04434ea2a1ea278f75ffdb970353c8b35ac8e4311032c50229f74105526f88d00892e633d2ce3fac4f8fd27f0f69ede5b9dc7458ae678c9b8ce647421aa09d88703bebc31ca836402c96b8ead9c24429140851f739f99b572a0bc5a0177f91f4106b7e6dbd85125955cf2dd1600e76a798c3743fdcae1970059e8ec742478495981d961002b8108d230509d847acd0742e03253873f4345b0861b943bd86749a194620ac86ffed42ba5d978eda8705a810c003b4c7bb9a80418c11ae3b9bff96028aa1898cdb8738998dffa450d69f49aee02ea70bca3e7a6be8b77b48ab8c2523ca88d991a14a728337d12bd39468bc1890d765003e705d41bbb260594f5d960005fff9b2c5128e7c7922a52bf569a66573d2b6d228a955e52f09c279af68327137aff33b88f13ea02faf77a1c0eff65d4c612fde828c65d941b066e3844b8ed5bf270d5312e33da911f4c8a1cbb35bf30e19a6f1c5b6af4b0a2645529d0aebc336217fa6fc1b8c9a7a651d44de67fe2f3b377f4b5e036f0bf13e64104375680ba7bb10fa75185d89c6a4522baaad700314d98f8bc3425c235dee57f9d475fca2681218b7477c8601bee1af0c1c47da8342aa03ae7a80fa463c8d9a472f36e7253231e88b0c4396cf6bc3ca1f07836fffb9a786ea3409f23a12cf1804d5e0673883c89c37ddd2f678090c7e9f16300011a9724a0c49da9e5787f0e3d8ac44940d6178237d96e678b15b3503e2bf781e1eccac302a740181d577acb427263fada9e8584048a57c667f380a890de2d6740c0b9095cbc9f6b1b68f9176f460042c7fd1351f6e5aa8c224632345cd2015f85d7b3378ae5c9ad7c6a86eacdcc3fc6a7add483ddd089fa7d836bc503191947a6ded76aa231403bd0a8a57ce7ec838e0bec635d01584c6756e94d78b346b51ace557c51455e93c16a0284b279dd2a19b26432ad92d15a652a8084fdef7a32d0de0b896499059850ef42bdc0e8b4719b9626853440912555a447021dee440e4dcea3028a2963d9ae4fc1aef74d04e0368850933717662bad4496e3ff58a715503a036214b5312a0bfbb45c974c954d679fbb68109068ddfcc74d3fc044e895f098c930d786caddfd4c3fba14573705c0dc9f7c2870a82c675c6d068499a4e9441311dbf15abf32fbb560c9cbacf1defdd0c38353719c01d4f65d12f804a72968067fb468349c9351eb358d87625b5ba7098fd3ad530187abd1ec60c064a9b8b54b051d0ac169e212a9a724e7e266a549ac6d80a4c148c756748291275117e49f8fd0551216bc6496b52020c708515012877b6e024a57a9f8945ebe8f3b50caffab79e2c7148d194f8fbba48803d0be0400a0ff604c75828b6951a729b2a6bee7057c68286648014aa705ba6fa3e8ce6a3c9e742383dc556b9404db31221a9494809e35b68e3c0ad67e55f00eba32ee2c2ca3668714406a0b8e3be11b51163e2c7b0d26b68f48fb89fb0b9f413a49f72bf9e07353559ee24921a20790a217bda8e1cba6eb4a884c532efb8f2adf1d9ddad6a01d8ba09628405adafd4fcfa897e342b8dd74b78e839712b39627be40aad215bbbce44d534456eff61db09435f41874b27d14ae0ac466776c6b7e91332a3a2c544aee55f982b337cbe68cab922a032d34143f0aeb56b96c20729049cbd27d2fca1528be365ea0e377f35b785327944a963a6ef9cdec86f32ff74dddca333564b6cc9c4099ec416b1d05780aebb0529a3b0dd531c5e7e94c302989f7ff11e465098307d3bda1d53f8018a74da8353a2d83fb9b70cca531fe38d9d24af474e0f9485ae83178d58854a008ed4534b2b2e82f136fd03c04eb71516ccd79529b1570344104ef557b55991ba656df93bc6db82d5556554d1a6865a327557fd551177a8ba6233657a8db1b841ac492fa269981a7f303d1dfd14635cfdd459e3eadf60a6c0c180e35829a89115aaca833b6540f855cbab605cc6b604acd599c810ce7fc821f8689c88957fab375409f2065c0d6488f21e5bf65641a627164f3f2e9d49741874c7af04ea7f2cf81b27d488fc2c6f2f5ae659245dc9a177d7b83072fe0c5c4606631c520675d92494a58b079458322666d81b2fb453afdb01852442d0740ce5a29f96a9e2a62d8b7263e7f3425e4ec5d2cf37832988127c11695d0b23b298f68df50e94f2676249796623c44ae3935b5ac8e9b5dd1f9b8c51787c40821ecf05f04afa545fe512149f2485c4414ae9191178fc94c5546becf82a14cc43637470c5b691be8006149a688b5ba19823bed4dbd178d61b1320fc9e02828eb2245b0a041278a930065aabd575de1610277d706ce573f68f292542d09a2d8050405064bb9da819e95d78b3e79cb584728192ccb274637104b7385baab6b6127763b0362fd51eccfcdc10b2a32c5369043f7323dba0c8c896158b8d749cd5b5eb8bb2c7be77c50471c790e256630b49945f0b339de9cdad1849c0b6a3988f7985dc5d534505895cd922f01592b4a94b0959bf5f6b418c48ae6f7df9c15ebd1e774dddbc62149568d5b8b4f3b9fa46282971c5c2da559c23ff1aa8fb5561c7cceaf97aa92a4140c6808a152347f0ff3cf9b7ecdee0d66bfb1327386431ea081e971d19c2d69af95b268cf3e4c180e058a1263fb8c1b646cb972892e007fa177b4ac95ca1576a2cf6f171a8a2e4d26d8de43090e5c302d8b82797466040b923be3e25224c52d9d798f0180293bf1e84fb0cb539e032967bd47b0da82e4661d0acbb25ce019717d43931ece3340765556e1c42b6c66f4b7af47fd018e38240074778943d43c113eab231615556531c886d658609408cb979ecf3ce5d3efed53bc598c4fa5307706e6be2affcd3c31cbc68eee1ae547ab75602606ac748d56107ff5fc629a4d5bc1ff8d6cad0386406e06d1c2f5519ff43dc47abebd2c2c6ba86b2ec26d3c2bb0fad90c81c3664e661cca8d89687b79ec05495e6db6f7c211244a246e0590270cbe019d141568f8fae227f2898fb6569cffd72e296a276dd26b8069fcc8c0c13b6f9a7b585158f9204e48d24faee2c84fd889bd2924445768b299402a0dd7a7d77a6c9bf6c8ffa3891b484ca950e8abec849b59cbbe05c7eebf48e0eb037d1b29c58a501ae5b6357dfe00aae2252419c2833881749f5b872d9350d461a4cca256efa63d362e9e83fada688532f0f46f7eaa42500bf6b6f0e44dd137d5e7df349bcbe3d9212c156bcc806f0901baab86f19405bac358bcb256c32d2c6857ab8bbe8223c3eab0b8cb2972250e0a36ed1f4dea4fd7a4acefd17c97518bbd18ef09670bf6d37ccea654391791640b4eddd0096722b621c966b916b0b9264042c1e0eedeff0815266f1f19bee3a808c463de1155b811d6457a1acf5b616f358141e68a5d1b004651a644b291481049ebbe66e3fcc9662d38cb622777e5a9ff9d08c5da97c096927bf7ded065af300cfcf9a65e2af3f181273ae682d72c4cbfe9bab093dc00f3c237055149148e79474f9d78a26a862742686d59c91955ce726f391a649acaa1f040cac2b73ccb6651e1461a28a2954b358903c463cf407ea97790c4bbb9c85dde276b54ebb9992985f0f1ca0dac08096e313bf765834ce993c94cb3303b3811d9ee758d9e948022f2a5ba8f9135acd2e8d81642af337b0c2cdc8a2d248aca10b22dec90a9a1db79cf033bf67092ceee8864d1f25dbf569cba550ee0b2670bf64d4c8d32231cf815f727daeefdbc5dd64587b47dbc6c51a3c517119c8e3043fd59b56fc2226b751afc6e0135a74ed227e8db7a60189c7c8120872484ab6e281d25d90c227d2cbc4d077a2d55aa6077d8a530c0fc9f8fef413469833ce002931a06e31a39c993825ddbad8e27956b423da208e9790913971d0ac1463bd890a3cd79e21655a638b6c2716abee84784d4262c5420eb92847a5d691451a6f0d615fedfeffd03d91bdb482181867f08842b347f8c59b061a7a8828bab157c8ed2d5069301484a9bb0e4b597707ae582f45e539c5291233fbf5b6af038f9004f677df96776b0ac4e3b49e8532b9e381696b147e0505c92c14710e2c9c7b8c05e03ad2fc5602a149c33b81bdef9cc9ed1ffbaa8e82fb356f4879c444cb8a7a1543cc8c1c21ac6f408f7f1e6f48b8c716970c21abe4c0ea9eb6d37c6538896b13a4789469c0e111bf20cb29aae64b8477bd61084133894dfa0ad979bc0f90d639ee8798c74e104758d4c2271e9b08b7503847a3d7846e6d34c0d7c8bc7fd3ca407bda5db87a70830ddbb7d8bef66119c63f046cc25818a040a2a2ffff49e7323ff33ca52f1d7aeba158e49108e105bc921acad37ae235ca2104f2651d9a67d8b81650b6b456c2112e99ab38c70a0b2cb703e311342888ce21aa97ba02591b0ce81c81f733e70883fafa236f41776260264e88d4ec72259832a73ec36b6e412b183465d4db618af418e2ce93fdc3d36f0a62758f4657332434f58f4e229b319a00e5fec7dad46e71e75d601ca4efddb659a7e3c5738e730906fa4369c30fff61050a06a3899c9c1b64d8d045cc7883955c46d3562a95dad410028db5ebe7dc739b7354614d97eb104e437bbde623addc071fbf8879cd689a73723277e1134eb46577084dc56e6887bc67e191dcc639ba1c8b6de9d7f5a08d17c4287067482933e75316648817190a7ae730b1bb8541fd6639e9a2e708cb54e87d78f3426fa9cd98f7109c1f8708950129572e4993a72bba79388174b2ece1275640a33e892a6e182d8e77be3daff03dc476d334542a820ee550327171d4c3f369e26403caf98c103a549b4225128d304c5f1d2c7b773c5ef1bd2eac4b1cbebbd0b5e6f97f49b154e61aaf575f84ef63219cd668f0dc18be688272421f6881859559db4db3e83938f11c8fb321051393d17dbd0b7b4e6bf31c0f9223c0bb154404a5dc4f010a1fbe35ab3b925b5c303ea11ff9aee8db6f550baf0efece811f9a9bba530f601e409037af2a380bd6e30ca8d581d3cb7df5b02c7309fddd6a113cbf0699351fc616a3c3144aecab2f6551e5aeb18f4d008c271f23cd27c8afd919adda98824892806c6a714792191d083c121e23cfee917fc2b4dd84e50bb8541b076dd3d6415d05b7c31ff85b841ea6e31536b5fefec6a36d9315185a1a03b8a0eca798d08b5df6ba9aca37502e4b1e87768a284aa56120065669f7f7c0867d4027ed9c87b66e8afd8183eb71a669caa103d369b77bc9989de669a8e0b54516677f8f255d3f817589628e28ce21edae0a25f311b11e41da0dca98d0755546943416e12ed992214ba291ccae3df36e011db25404ec37f45cee071d7c9643d14a176379b1fe31d1b9accab10ec0c555b9c45943c44e95925050b7b77e571c54f96dd89aed7fbfa1e6ee958d6502fd9d820e8ef960d4cad74716cac785c1e7b45780845c065052c45589475b81084426919104757ffad3a2392d086127e171c5ecbac7c3e348ea1bb9d108a310a7c83e2df7d4ae421d66504574be33ebd26c06e4a34ff41ff4de48e58cf824497d6e824417ca3613eb60fc3a8b4fa5a0b2ab3a1517686dc166ebbcfb9ab461ff03aa2ec960687a084a48e75cae9a6044f28ea070bfcb61f8249106737c147fc4efe5442f0f9c80452468b60bfbf04be263bc8311c6e350774cc5ca4c88d981d18d4064fc09b17aaa03b57b7731ec7dd6a5052584eda59ea59e6fc2573119ea04bd3a94d16fbeb1fc924a573e432ff84a96cc80c3ad0aad3dd6a53c2f8e8f34322ac2eb090566216f641432075b5df230329a8c4769b216a69001867f5931897abf78d19fbe8b8849c17b3645faf5761ffe3b841a5ab23ee002c04965423e8c2bf28d60202f014099910560f7135c3202bdb1fa091ba7729c3a3189fb20a5908142dafcc2c92cf5782b3fb10600d1da11f3071489119bbfcf9b262b8003da840efa69712f6fa9a2921ef2532854a244de86e869297e59cd1778da67dfc601b0e7524753f4274a15b70b5376abf96a27e68703a7a39133b035e8cc212134fa1177979f815a9b0329bdc5ac548aefc28cc3bd2ede93edbf6147e00111d6cbabce781c5f486d207c20032fc0273e54fbc2d553fa29a2f4d05e63a8e31232808ed18ba086ad02a501f6b4901747fff09abf5bb74cdb34dff7571294d8f2d147d22f656773bb696e9611e1b6fe808171111bb609165a699b33f2fe86145637b5fbad367864bb338eb77deb1f6b94a19b58e71480502c8281705d69b4601a2d9d61841d9b3b610297fbf292ef21c5ae0ed6c90bb1b6b8bfd60aac495c308ad2d04f4cd29e41a764ada98237381bce5c17fd2da22bc84d31ea861ff49204d50adc4bb70136bec5ba4b89de0f4db03c6d6168bec9008447f492ad0c6e271f6f8a84de624e1862c9a398184ef826be4337d93a163622f0a94bc96215c7db2d07c247dcbd6bdf0f7044a0becfbf856bf253c476019f4f4b4f4090f5796c5172ee209ddc624e5e2af5c1fc4cff4c837e4cad6449b6c270377760a0df4d742be9a45cf02a094b923a2876b7bf0c725e0fa9bc2cdfb504206a14d6d5574558a8625bb29791f7ecf4796051a0fa441129efcd60c8d203030282df1eba4f4eb2de2c36bf48b1670ca2208a846ad16ecb4448cdf01de9dccda13891204dfe495926405793113640fd5e6dd2def4febb6d14c6d7e33ad05c19543bd494651b0186707c18c43b7398bb5fece8bdfb9278c7f0b8a95799d8f60c83246fb85fb7ee869ca5e2db62ef4d59025fd14aea387612629dfe9001a827d135d82b18583a3912d81644d54323fca75985a3809f63fa2732c3b90ac59a5be61bcb08f78cc2c3a1a7ff06c6cc568d29bebcd13036c2652832ecfacacec434cee8c26e76a20fc3820b2cbb90124e11ff2c4e7b2cce2b0189bd522716d7f6ee31948c276951587eb4712de0835bd97eacc0b86523b7c4632a5704f167133327f74fd7e8e5f8740b5f7f478bbbf650358a03ef9ce1d02fba6f440a68796710d0e123e3e13705d2951d4f57ebb5329597a10b877bc189e6101de972cbcf4c1017b94860a5a3d56dd88eb454c31292dd75663550ae664d4ed083700a89db1f32b79f2ebad722edf937338d39b42d6894e72be82c3b093423202fd7a43c9057401567799a4b3583ee5650db3854acce8795d33aed8012ab67739badaf0627be9e02e9fb8f1c3421adb400649b585e3b91a5b423ec5ca44350d34c9b6c2182bda976eef6ca7661ed12fd1992b84cedea470360e61e1e2f506a5944346698e9517c77f29435955e995cb3064905db10d40ce867d44ee7cd302e3bc1b282107cb5c8c9e5c7a5cd9e248f3a2ae03ec16fb03c2d33cd3d0c4a4910f1485f351098f523e6335ae03e59d089d90386000fa2775e78c12040af3c3d20a8fe8ac5f455ed3d06a56df8693af848852ff08a2c1789359cd1419f4040b2bb5e623166629896f02f9dd8c3649e41e4bd04e9317fbb054817039a56d9b90a47d0da571b06074cbfe1ae3c004318a5d06e696b48d3816e4621196fb1382529c955b98bfb5e113df452055a502803bf43423d60bdabdb7982d4f6f370f203d28d468f47b0aeefa0ca4ee82a6f75038821012c413473291dfedd7f6289b2c957dc484d4bae147a873649c8a1043a85170396b64ab83ffcdda733da82ae9e6af4ee3c7e737ce61e3ff181c14912cf655fe8003483991433522a8c03f91a59c04f591497dcc4cc26710a60e99331d56857e9e2765f0a6f31b372bc779a7d6fd0a6170e3e99c06b5003103d45317402ede425bbcda89409dd40641e00ab7930e4e2840e3bb0ed051c17a94139e2c70a9a5fbb8cd018b740302161c4cb58bef807918f20310cf8fae73750a05b52a41c6ccabcd2219e69ea65855591e2d5c534b090c8ba8c49dc89b0dcd7e8970d65c3340fc278d957bd9cf99b6b86059259dfc05a5528a7185dbcbf8231f1ff02346a782301f8d970664394ff08c6f42bc45fc0c399f52e1f6a3cc3de404210b155c2e3b204ce73cdcd63dc3c8a1eb19100a9c331b965cc1efd56b5ea05a33245eba2bc826ca86abe0afad578f6a6f15991362fc4910b5b537ca8cd58ba92af8970c9d0c209f0e923acbffd7145e6fab3ed7b145ac73328637dcd8a921b583b89ddca72245d53ca863e830264c2ca7b8cd2de64aeb0049d9886b9bf5aa0591b2577ce24b2df26179a6ad9997725c94399f1638f8a77af85715bf1ab2391b19c1b2d4f1fb3881534a4c866275a9f02797b55394572c02b3e6f0c700bc9387aaf8ac47bcbcc6968cf919546d0562e15ded546558dc209950e5c4170914792b564eb9fdacdea8f2c210becb476e365d5b2aaa3465d9a395618ca4634f8f15a133ba78ba544942405f798c11995e9bc8e876766619d663d1789709f0eb8a9024f932fa77b57ee5a0cbc3820b7b0a2be4a5bda709c7b10f90683fad7a13b06d468a129dd2d06cf408d89f7add213cf3ec052f5bdcc992f1e00d5bf33feb59bdd834216e757c17a9d7d6879936b8744616a3b2c4db6aba54f69758131e472acb7c3c46dcceecf00d56caff800668c17633db7df8b1d4afdbac049bf3dade79f750e76acdad179bce4089b25d9f281edb4a64b99b77ca1ea7e7c4f367c8dd09141b77b6ab81d3ea7d8dac810907310196cb34d3d1758c8f666600f9511bfd08dc3537cc6151b141177e2ab3b254e1ed8f247b685c650d7e1d646735184d4b610a47f96e574fd8a761e74adfc28a15af52763b74160da84c3aa2b422c4dce4128fa378115ac662144eca1088394e164de93f75552ec11cb23077b3954a937243ec1552535ccae9f88b6c3a3d7f544f953d4d550e3be8252ef036952855469b2a0b1edf644b49222dc4222e1088d05bfcf20bcdb9a40825d889371a0432a01d96b2f89d358b1214a7aa0c9988aa7faa5b9312dfd24e38d751a4646a5282f197149820d3552f0bd3c30706dad2b073dcd738e2ead9525303986b775e5cb4adc434eac19b72b1789e07ddf241e5ec94de3bd5011db7700b7b765937835bdddc368470a09a15f6e8f77a12b278389fd16f58ba73f1b08fbe5222ab4a70b17b2dcd0fddffcd67886f92e06dfd62ca902e7fd6119f0e77b3ac3449052a1f6dc690084a3fcebe25b77af99cadd9de6c03695c36a6d5ace641faa2b26016e0ea3ed305aec20b0cca1e022588a41ea3f0c5969a3674d7dae4d70960ecc701322e643542ba35057535372b5fb3463e0ba229702b55d88c6c5a052eb62bee9a1b1f6af2ac30b049720411d0a4b6ca5fa09825e58fd66654bc28bd3bc7ab3303030acd3ad7e230b34ccf149a24f2bbf409896b84d38cc276025d827897c8eb50cde2a07f02bedde995f81f857bfac4e439f2fa0657cad1dbe4a4af17f24364f8cf9cb8cfe664f2242cb3f2648763fae3e62eed2ed0256f0bd2070ccecce8c2117b1988b0a54a9bb4d86568858b14b990cc768cc0f4b86479285abd7b0cb4f3152fb417751bd2a77ee3eca1d55892b0051e064a67755adbf5f5f7e2cd88de063619c8c3d7a16566a85560e380aeb15e20437b331710de08b25aef74f2e3802ce2c1fb23642b8e3730a98fa46d58c19f35cf07dd67e1b1c10a289407a7c11ca036bb65813eca27e6f89884ea0da62f409564ebe8a6b5c5b3f7c6c0320b962b9d5a8e0d4ddc56efb979202ac792c763183614b9ace2793d0eed25bcee0e927e93cb89956d9ec9f4d544e958893a523ae8f4b747c5945b91186f36e6dfa89ae7f806b515ae4c5ae3fad55bb21a601236d126701b8715bfe0635e23a399d4d194011a65897b8f8422a099514e13bec7364b958ba06c8b15abe9e250ce0b21433756890dba156908343d033fa136ee190a163b436cc9703c691b11927bc1749b3344c4acd1adaf7b423b5482eef64a5bcc97675a92d90bd891de87853e773e295827c273c06e012d0a80d97d71b752e5b170b09be51a370d5db5d89c4938ce1d8fff1c979a4bc037d0d42b86c0c6b305a7649b43b562ed66f166d767850a088293fd804df47a735e514a369260313f7ee00235ff8ea394a91be96c09d72900412d10c0f7e6595814c81623a3d0e76a76023879ec3cd8d468a0298409250f6b066a02927e2146f7a77ec8f39c06f45cc0a7f3f99a61176c93c4e84dd2e8d58367e8157db114631e39a4eeb7891ee23011fa987bca823c6a01b7d937878af909c6ff5139512d09fe42d3c21d1e51949688692101ee4df866a8829b881c846027c536fe4c060345222243d0820451fb6e6a2984bd2f6fa0de083b66824e0a8985c40f9091266b617bab6e07c27a09a1c4147396c9676d0bd21500c8635b200f18d8c1b1e88c1d383ae6901c671af3bef937a3a00b05a89a1502db5bc4c1cf1cea17b9f5b64387f9a8a3ae8a78a99f0da4cbee79434c00789977e6da599acadae7cb8ddc50d3aae8a55f98bab748f1ea23d7f5d64425fb30d30585c7e40216092ad92b205c396337b8669d3025b2a85c06a7365718a01604c74cdbb9dc56f9c5526051840027b1743c33b38bed5a427cde5fb8e898c1f7929d12b9efc1d341298ab35d998197ff4654e6ea7de01d5e5146c9e74ff113c776d5939b2981d4a6080780abd31cabcffd0dd93e0e17737e6f58276d0188a7a3ef075d1789a3ae08b9bc1d10751ff19e5baa9c3b11d580538f99b87507e4087b12ad5def4133dec4b3ee85e9e61a2a4f55b0dd87195077a284ec617101470b887ec825b51d060391c64521304150021220d0208059acea101fad2dda188abf18205e46375d7561d2384ad239049dd25dff4332382366df1620198f1acf61ae7e91cf01655350a0ae27808bc5f9a70d4e025cee8a1a84b8c5c52f685dc61cceaae889b9eadcab3ed984c3ff3885d58e675aa2aa89b390a415f28d80aec6383f8e3baa397a450be55c8b183544216e870a4c0b66c417e346dbf99fa3877c8ae71e3d58bb8cfcfb90af97e11b1a1f831524c1bc32b474e6c6ee748fb1bddcc4f9e1ab0c354c21cf1adfbb0a75044850b545bbef942e8f5180ae0c17588daf09f9c54aa9fa55ab2b8600055117ec8741444e08afca9a541d19152de4fc1b2c3f64b40b3118724152861dfcbb71683402b388f360bb935a860a021630514fd95fba47588ef6f3c3c8711fe8968362193c5f2a75e76c1d63fdf625d2fb3624e00b67ddf9a5184d9501393fbf22f8528d359326e89fd6079a0935fe2937a7266d9952cc426e3cb3961be42b36208682f84f34235166c39e50c82c476f65425437b88c4a1171e9f47764de28e9dd84745d4fc75d5eda811434aaf36e926c3aee5679d58af09d8fd78314a9310671330f2c1ea52b9f2df761e6438d61ada02ecad1ada455c4973c57ec04060e93e4083dd04002d09c53cb3fb9de55ff360479a8434cfbbc26427654a3e57e9e85f6b6dd1fcfb50fbbf95edd6f670706ce18097221b0048975b3463790a2c07eff27b24188c8da125d3a1931559950a5d63c14647dcee043a4f475c94e36d45e60d851166d4e15ee64f13508dd6bec62eefb62da78e5dfb78408cb7076f3546a0d7056a0f2a268755cf35dc3481d8e66555a4708803e7692977d3e19cf41db6edff485afcc23c7046401a802f437fe5fd8ef53616c651ccbfcac29b0ef8b64328f3537201e882a5bdeff5ffc8a2340a421956dd5030f6cc3114e299fe789cadd85a56baff185f9581e106e3353da30bc4a273f1b4c58e066b453b9777525e815e154cc5d3711cb22aa90d79ab4ceb4fc10c71cb723459040e1117e87971bbb4bc0b2057911cad8abe8889b68e1e9e93e1835fc2fd89a343cf05a26aafbefdea52f143958c9ca929a491db3d5a161c73a4f76a128fe98752b93c0855e94b61eba3aea94b2a9d25445601bcd8a83cbaf6fc913233aafd7bb80d94d00c1958c48018b66a85303f4d83bb17dce5a0c3f86b5be390d77dc621b136638b8566397485333f568f49bf29f851a67814b079ccd26bab565969aa6b2c742541406d308e2eed0580660705ed53232541a08f1824c31880caa88a8a2221b0a9ecc05fd52b1bcac05ecfe157b7ec9db1fbaf67096c66d47b4ac121b2f63c0245aef55cadc8a5811c04f5a9ad0e3ea98d9c8e87f2e6e91335b7643b4e04fe3b7d80d0df83fb6fadcf6387df9b3d5e5acadfcdfadba8ee0ad229f257f8126df2a0e60e38f47897ee6e8b55d0bae76cd91288df14ae1b877981c58908ba3dbbe77a963fc9ca629ba81c8f45524f7d597e09aece8f5bf4016be766576cdf50665c9b1af74abe246d38a7d2182476dfc7972ab50b06296f0015113fd35c3aca7fd9565e5e76bcebdaf1921bf1b8e3ddf3534a61ab46386ecf72d3b0816637ac010206e4d92843d0bf52c8725afa7ae3047b7e42a91424e25b9d52db7a3d4cd73e841949eaed6abe886da14601c12ca63763b385fc15e14b0361897cab831578e73be2ad9f6da43323f211cbdfefe4ef33cb4bd69737811c9fdf7136864de70be029683f4b8ad62d0426c2fc69b932e83227a8dd477bfe063d42820972e4a565576f3324d2be1119670458124e88a07b19df4e9ed04130f81ecf7593bffa4c5c7d00325c8bda710cb554b4081040a2ee8817ffa94f93f89e9a3511a48d654a9abc5c7d7bd60fa9b7bf16e6add11618abca062c29d8c3edc21af10c607ada659b4ea3e1a3361c220c4b8fe2b1b382877bde62db14c09fa17962dcf2593b444c704271cc7c18c46e4f332b382ffc11ee370d9c2cc07ac32ce765a3bb9b7e5ec281a25e7d6508ca4259255502570f262f95c0c9cc83c6b63a3967f89a3fa0b8dcc72e4d90c0f3216a712633f5ad3b0ab47376db5acc5e2ba612fb0925fb0ea7835ba257414538e64f67ecc4e113d8d9f4f3922ead2d7bef1e69642f801fd6cf8464463d6628a4c537be9985e8eb9d47ac8d38fa9e15f779e69f60be8ccbed97bf0778a6c6648adf087cd62f93a7b792999b573ef4622c97d33d2185c86d70ea7913156725e99a2f29d8c445cd34b468c60fa768a4f5cc0cb93d9cbee54cb976753b895b8db3be400f2c74360ad857a0225a86fe7780f5a4a531647f1b14840b63229a8d557ec51ad88754dc2db3a56de18b6cc7c2a8b8a30868b4e12bad67dd3ff36a68a6a73a72057244b670b73793548abbcee02c6f680a975692fb96bfde7b5097ec57f7a55fb45feb7f73afd6598b2bde1c97f3f0764edb4d559309bece42facbde9ae8f3e1ce7810bf87f6f42980de8f8ee240216f626ff06917cb6346091f18ee6e65e58055bf22eb481882262c5aeb8aa558d4ac014f2d798b7f4942d1d80b628394fa02b9aa8b4a8823206b0b96eaa1fbf648e4a854ea8261e0d13fd88cdbb8447517178163d4eb3a878e03639f76b64d7d0c91007528285d99234fbe01909c309716357a8c4ecb7dbd7abb3062e48404204572fbdb4b1605762c77b3d91549361138d9ae0104e82ed8f18fb7d11bcf5e65561a52d6be161e52d66e4126af90b8b8e4afa5b9a88f7ee7330f661f2fbaf6a9c8cd6356cd9d23909e358f95961d6b1e4df8dea0fba2c2fa1cda38132af5649be79d626cda65fc3659ae665b188983c60314a222348e4958e552e6840052b0a50c9aa1c3739ba269f5ed94c4384af43a1cbe1d6010996238a0bd50714fcd6c5d05c165184755a52d002b6862f8eda088b2eb4cebdd5ec9c36a53efa12af3c955cb6baea91730ac368d66090644962666a292dd3eabfa62b504343253cd30858b8753267f17a734bfbcfcea7055e9b443d92860cfc29d6339b048fceb6b10eaddc3e0c752b38d5f91cf49f04166e375de07d267c17625ccc2f84042843a8719972eb3017eeabd7a897ae1f0bd1b4324c5f32ea9956bed80cc5199be5ded8a1c45ca7b43de5b53b96370b087f0162b3af12dab1681049107ef6a617d85c215e46af2db2da3e1bbf9e576ed48be4642088b617ba9b78475536f5e874df1a23b789d18b99ab715ff2ac6f12f4dca6fd8c74c604bfc9de35db14865452e2b2693b85acdb2f6e27712b63210cccc4af89500a19543a64bd90e2559d2823d4b1c7753563c9c3e83b71245e105d736655f8adb0f722462a28c323544215d2f14e6d86ebe362fb3cd7699bd4c67cccdf2f38d10448e90e47297227a0a7cc0d7f5020a5f4a345236f248e290cbab77548a7e812d967d8c5d0750315ed2abd84b88ebbcec9654d70b48a35e5773e075ff689fd58d973cc143772e68aa4b472daffc0d178a49492fd49c5f2f4726f18ac49d9c8c0d8a0da908c28d3a356c659a6ac3c5f5e119f244b9dacfb86430418114484a4136a6dff2a8dc0cde3e0871b531bbc647c9fddfd3824832f34d18ec5c888d9ac49ea5eb3295a1e45063c18205230e47013ed8bbc7f0ddb7794ded10b8ad5d88ad0eab61cfeeb854b2c25a4b8862457fded58de78c0873d8cc6ccc09494c346d3670454eed8a68fe36e15d6118a48a3b7a9ddc95cac4ba7c94f4006c52d01c07e6bdd28f75f5daf085158f3b7b634136821e545e1e17e0b8952a68b6277696d7739c279b5937def6d71f43221c3b542348af3f753bd0e68d1f9f9143663aec33ed12ebe42833cbf1c9302d958b0e5335410555cd2b2993bba6a14265fabba40bef42e911209bef4737559ecf56ad21ee61c465903e921b38ae60b407b111cf448487f710b1eb8f5cfca670fa29716db1ccd05a30f2c50dde13a0dcbcbf212f58815c6e4e2b80308abd1c6c1f2725d555140df8a7c55f29a6f213d76f1bb8fcfdafab9eff2feaaec1aceac9cd4147ff20b701aeab54686af40b3822abd2b5c3ceb9a6e0752a1947198ad1da43f9842d8283c46833c7cd6b65668b39648a36b2640a8512e232f3dc81646119c22c6cd4bfbac4f340e62acb10ff0778e7aca57c030d70cf6fbe6d7ca04bcb16c813a4e7fc082e7a548f99092049d2e90602dca3d32a9e8dd1c5f4494c362290b0e0fd5c8e566c2b716917619d95c2c8457906d451a1a9964acab25bee4928749ceab103ad46f135a6d252344f132dded693eb002348745f84b05ec19e83d339a2099dcac302eae487d0248075e53acf6a1dcc30b4520c3060d96feb7e5a5499011935e7a3347db9bd075003c93258830df2ba3d3573025c45e5b5157827096af78aa5054c2c686c80f2dfc5b93a3097d674eb407271687aedd15357af786204c0c887892044691124afac8ef70eeb96a083b32106318ca189c42d3d28afaaa832be768eea68efc1c547066cf581b015d6f22d9c782178881d54c7085825acb6b6a73bfe8d5fc0746f89ea494bb802a0908a8fd0792120ef80ff636a42e8ba0a46816508d4e9dd339128c185a248a425c9e3c4c15026e6dcdc837f089ced4a0960b3cd87344b0e77d0704ce7b09f70fb640676374e5052820040ecf08fe498b96c09b01cbe9078580adfdc29bd09eb87a6b1b022480d75c5ac6370f99c9197f9e325f010d82e080415ad4f2f5cf1b952adb2680594df56615c355ad333a5e00fa3355869370526dd75122aa57a2c861f7abea783b35a078336b640e821e0164c12879e96286bb7ea6f97e53e4913d15ad67ef6631d9d3320569206753bdb20d276f8351cdd07ad5973c298216be156f188f381186b8402a545ee9a435699d309a3369024657f73109a146c4affc0774652ec384e775859478d751d375f1fca058dc8a61f427956a92c0a8fc159c85c7c0bd81c65557051b6142957f5860e36a084b0dfdbbe6dad6197df55ad55cc13bd581cec6f0dd9ef4daf735db05883ed7a399e914140fa41f2b1b8492203016d2ae281f1d97a510a5d46f618d5ecef9dd42e2953d3e3cd3199c1459adb2f214166c418e98b71a83c928ec0a7300a39efe5f9a9bb46a780da8d8a561aa7030ac449a6ff05f18d92c8da063d771760ca7776b0a15b2da2b31841fa9a6d35fb9a5fdc7e725bf9e2cca55d133632bb8d660e8e0748ee67245015a1843bf8cf711f3a52cb194189c4eb6ce760659f3e1c996e0119d3a9a30b74076cc19ba9afa13650d100f766da004752f2a9b76afa2b6e2e6e0ee23cad84bf7cac57ec1eb3a84db84b8d185d3cbd76779a85b7a4249082845a86f345966ee52d6814d91ddb8daa1fae5ba1b457854faaaa349868c097903f8a7f4efea3bdd66600e47031f17c1c7a868cbd20bc0ad9698f4f39165b148e3206eb445afdd6ad09c52eca39acb4e18671483402090d9eb4efd50041e74136ace3660f40a2e6feadfc62a10becc8c29c52af402386a10d5a0e06da445e698d0c4ba557d1225855b9cdf120b5164bc00937eff52b07116adf097e95b4b21cdbf426219870bb16a422cbec04395b0fe68360495745765699d7329661e852c6dad342589cd54d0eb807f6a450f2c31dc6f2c8a081552317dbfa0e32a8e24d0d3b0c1c0d158be389d7ac7cd51d365648fce65cb3f69487871da128288ea794eab5d87aeb3db4610c242114b5532ea2b15abbd63aa1d4db36a2fccd8dee5679f705aeccd89da0dc953421c85ae9231cc69d51f317b0d9c4c56055d6904e64cf1ad1c7164cbe5933c32916914b0188e2ffa0b8934e3d89758b15ce977bfe2572db97bee8b446d6e8fd45960235f267b8eb83e3f0ce68579908bf48f8e0b2ea86f1f49f9e8bbd6795e617042dd301f663ddb3b68c505e49606250b4adbc1a68a0a18391a940164cbc28102b539472bcf1102831df4689dee5809981d911a307e848ca58d07d599b15d03fd4d37d2275e2bb0dcb2373d5ea8a817a24b58a93f1e99161933a24bd96a9b2a9caa7d554bcd9a7899c4af38a03eae509f55bfc4cdaaa7bc1425c6bcf19d7f6700e491146c6a0a12270afcd43d5ff02f56ef09274a37a7eaf9ed00256802d8d62d7e2b6964c6c3dfe5bf3999c111b3f7a460a5a808b93ed6d656061250344647eb05d76b40e0758e2c40cdafdec86a9ef8c8aae2c510b7cf44eb9ba0e5850507ac9b8e924d29c65d0d7b97f1092f026d095da01604bfe27d2aaddcada7e36cf16943225f5f0ec35085c5d9a9502dd8b5e76e356dae14208c895889595ddb9a46030ca60c69a612194ec31a966e5421dfedecc228fe79971e7a787f9342dd28207b88c6137c03ecd3c2fbcea50cef43519bdc9ffa8fc4d79e4a839630234480710d87b50100d22597416e4e4828a18102b9f65b8caf13452ddce41c508bf13399ceb70c870229784935190df1b34163aebde26f0fd51bd9710676fb9f92f238a55d8a982bea674d2713961e1ee86672226febb68f0e4f1ead2fef3f5c236732dbec6aeeeeafd9e1e04490cb8e9c66617d388d52550fd95a24368a35b7cf254d65b9126c1cbd4ace8dd14476717b30459c636904f632bafbc70fb61e1af774be88223b13664cafbf07092d8ff0c8f6422338c6bb4db7402bf5d0aae9b7539d6065c7c7e86789f072148936432d187315080e0a3a6cccc5265792e793c188246ffc24ff23928b5c2036676220d1e426dfc1a0d2ea0d380e8497a34fd3afb6388377dcd33035ed4890ffa8a5b650514779a2a6f3b5938cd6c38d84b36b49d6922ee696882b520298b78b21e526af06921691613dafc203375511cff347bb7add3ac84d36a8842572a41ec41c8c25241fcc870e288e504ded9ab969c4643525f0e96d8b5dd7d7d8d637ab8c046927ce4a1e1eca7525fbd0ec52cd2aad96400e8e6b979ce53d184615e0d6f4d41faa4115b11e6bb35dc01b3df3e765a6c6d2d11e0ee25e34d2947b15e23325494cd86e2cc6349b781b3e870b928dc5988c041b0528f10de04f8bed58303bed17b2ce25953807486cdcc54e691defe45337408b45080b3e1c1fff00e749926989326a1868fdcefb64f7c34385a545b1e77cf06b4a199e71dc35919a7640b2c32c1b2c35c96bee60977e02c3e141a27340731369018e5f1bb10c9836f2317f92ff410c48201c195e9cb13fd4070e6ec56ea3bfd86600fc5b3e4b2c23a513e4c0955b78c6432bca7e8ec8d8518b9bc52f71cedca7a3001c3cada8895e61a2be2aad5098106a3b20da0d246ac47034e1a7980f862f28bddd21d5d572f45adba2a798cffd7ca5b0320bfd32af6d130f17a8d1b14b762ec95b38213de2c5efc088058b17de610bb4a7fc9c9003ad1a156110e7fb174f1d64b41779fa55141ebf245b4597f58ca6dc187baf035f05b1c2fefcf7469d09f6ed94010005d8bc01bb7e5188c1d7527fdfc3f4fbeb1577cfd7b305b7e70a8d9e68820b9378492b5d54cffb949acdf37d261af60f1d2604c175f99b6158d2bba594632a49fb3be159e45c5db318235a793b364bb8b5497b77a4e159fd9c6acaf9c33ce472f22dbedf0ffc83c059680abaf77ca1475012d621c58cde5c78fc0948d54f3217c9cf79ae8b57070e8ffd6c4bd69d912403a6fcf046a90c4a33ddf959b7ef88154171bb3e354a37badfdb88aa1a35d5cb31e2d0f7fb6d257533b631841de10ccb074381f354745fcc2c12579356bf16fd73de21c15467ae4781e4099c0aa2616d75ffd202271c42ee9a975cc8f4a3c9d0c912af7ee9e42f33ac1621b137a136cd12a3a2e71eaba276d9a07a0f3525bc505cc0a63edfff263f9c49c729bb6e9e82304f30e86c9ebbbb00995bd49547135f49991a721aae77bfbfdff834506673ff28367fccfe4278c8a50f7b19b9a2d1952c35baba20cc523da5270296230c27d1fbdd0a5beb1e77ab0c689d55f82763c844f937aeea3e336bba5d48d428b293bc48a1c41bb013c1116e17bf82396f040d05e418921e48c5abd2caf5ec2e471050bda1d0c14b656902b16038a7a680a7267be8aca8d2ca45fd17e5b14ff2284e4900dd770b8d404cc6464a4d15b22724f835ba6af02512b51341f31a25785f2c0587b8d4d5550ac7dc55d4ec3c15d5fa0fe360effb262cf1b487c5d1f96a523f837e4473ad309939b335fae54de8e979c7b1169151b3a830f21e14b3a7aa44171b6be3c1ebf9160fc1e8db3ee9857e5f20b610d6bec5a5df3efb642bbbe02199ea1f432075d856621f0ca1f8a613401279963cd46a5a7968e44e2b42b78e881603b938013270b789ad74d44a277e3b36dbc340336c6f581512a41cfd1196d5b1b896fa6cb27ea30b819d076d7c6f934c67d94d67be6357eaa55020b728d52752650b7a809f2dec77d4ca1223bc60bb5172e34ab566d1a03679aca4b8ea3039877a3e03eeb5eccbab4164e6d4d00cb04cb90c1f66d03b916358d297fcf6153c6be26a75852bda5372487f86775a5b159020110f53273a4a0ff5a0abd2dc8e122699292846f6e965fc5028302029a56da8f24c03aa473f84706b48485aab74d9394aaaa4b865d374fa159cb4ac22a4e57a757d0d023cfa001394c54cdfb8ea80144ea60e999abd097407484ed201799e1a00048cf6ca18fd534ec02239c4d58223b30c46398cb9b8a2a64716ac034fd626da882a15b8dffac7cffff0c22415762297a8d858308e85af1bb57cda415c56839ca3d1abc0a69d39bd8d1cda230f8381454392592005e47c0bc0afe278290c7fedca1ae7c68e139096bf69cf5d70bd3f392ab21676e2206793f7a700075cdb023ad961615c309a15a903692cd16a9a53e663bc734f48f3ca8d11207b23f26d75ae4c6df1d7a841cb2524025bc40a375bab50bb66f503306d1556c1180319dc94eb0086c963c3c347120e149137fdf04dc7f3aae03f9009215bcabda59472cb2465f205e505190622cc0bc691378934d4c2ef9d869df03b0d31d50acb891fe1770ab4b69984d2c5e8a2c6c510d3e5bb0b1b4c19efa2e5a245e322cc0be665edbb68c5608456851fa2221415b0a3153e0f548c4115be025430af7abb3ef3302f9817c6dd65b56aa613b41a701cc7cd6c68fd3f7f4eeb6447beab90fe6cad075c9c331b1be0662d362a6601320a88c14cd898183066f4339bd9e86ac69c9716eebddbddb67befbd35df2e667b3b21d36525ec6e6cc06b17bd9b9bd0e2f0e68eb76ddb70def0dd2cbef6a3dbb518bfd0247793d39a1ce54af34457ab6eadddbc1bb6de7aeb15110e0801b08470203c540fae86c0c52cc65095487984bfade0b873386b20d7fe2b2e7763ce4b35d35a4f3d799fa8ae83baee2f84939be0ffb66dff1dc8eacf1e79f0614ce033bc21f194394d1bd7dc5a3c310d2d113eb8e176608db8e974c7b9ce8c2b5d5d876feebaaeebba10a80b81621c642fabb67c8b8c02134c998b4365e08e236f1cb5e5a90c8d1aabe6180e0e1a79d66a791be17bf78cd1a8e1a819735e3a5747416b9df96577185e374a2765d164dbb66d1305e139fc3e5fd3a5095c7471137e7771633d1c6ceaf6999f7423a70bab05c3d533f3bc1b394fcff33c8f14041e7b85df41ddebe21def48a59ab9dd64fbf5c0ac9dee98b39c22d7759057ba40e64479fd47e3e8f5abaaa70eea2ca17855ac9b20421a376acbd730e39b9944f0f9a0ee37d070baa468cb276159429f4b646fdf0081bc0dba8890d3054475c971dce4266c868722c10bbac7f8d3e676497f1078f0fb8b30bf28124df07b0d776fa18a0915a1c3ebbbf4a18e2272d8ddd301ea2112ed39843aa8bf47a21f0e5edf5d07efd321f481be98928b31610c8dfded140906776f9e9956109eb02e44eb9c909890f356b79a73ced9f3c1454ba33d09926dd654534a6f06612ee38ed339630de2b4d619a439ad3507ca39e70cea405cc660ab66b401f1600da1dafa0e2c24b0b81a0efe4760ee5a8edbbacc5d8bafc55de6b85a0d07eb0a56148a7dfb6ddbc46eedddc27479234aec2479dd8b7772c6e1b5d6ffe79cf37fc17c1a846f83f0291576883151bf02ba053a45f8148a1b5cd3e53b4a11f07dfc899e66d41612ef33e49efc0f55c2c3c1ccaaa14f1c71efdbbe11e7dd77edbebce415ec149633e73846bf7da6661e3e0086b99368df6eff230fd28fc21e8c3d486306854d1a0f28d8410425a051156c0de8992edf6119c36ac61c8e84176edbb67960b67ce758b2f0daaab5d735c30c3332c8c4ec80e3329c0fdaedde568cb6e31acdfcffffff0fc28c728bd9c8c5e805128c69dac497d0e64bbf7deecfe2b8cb040db661b19ac703c991bdf7569231c63c36e79cf26ddb38876d9f799ab1334f606d814d14385128a31edcdbe104dcb00ddbb0d208d8444d315152a0329b0a2b09490fc84226be192204777a09dfe02161bf89bd49044548de24052ae37bf5f214d3e5b19210cf1ebc992eb22176c257827595e4bc48d886398002331e3025304ddb84e527c20763407cb361229e37e10ddbb00de3b81c122d94c4b6fd7fedb694811bf5abb17d312f327e80d9f27df432655bdf425b9a610d1ada12388e76cb53ca6c696756f6834f2190461802122413db9c9903d1b9dca948c28b91cbe8af68b81255f89df3f09ac8072ff94ff8bdd23949387c8acb1d8af639c3ea2595bb0823e4332a64e14cb178c2c54e27fcde9576d7a5aed7127f4d975ce2482040e44784df397f42070af3c4b99119a322fcae4b34454b4176cfa5554eed4f751f529a2bf1558994035f9168e77ad737efbe910ce9973f3e721fbf79d17a51f326dcc15858582cf78514f0fb75f7d67a2fb8523e08e34a660fca587acc53a864594d140db18458422c567091cb42b237cd2220a147f89d3fa132b77fe73ccf654cf80f95d1bd60f5a43f0e648e303e86607c8471d9187e5f29df44f9ec4d3c5c5955f2a68d86b9b4a56ae98619d69c59569f6259d51397c95632162e6391d5d39c93cb26977d5ca76110eecbd1432af71e5461b59f38825599acd631a42f46ce4355331b3758351fe8808a86369f703666cc4449c2c5397bfba4033728555d41b13085f0ab98560863ea69ca30a3fdcc66369ab0332b53029b917b0ebaf70e045faadd99a7395b6ef7b04d52b9dced2786c25d22e5e0edee799ea87d42da74e1f1a0cb2b7dcac26c2ce59548baef2f876da2051ea91eaa155245d8ee434ad3bd696cefd35af6820a38852d79e91e32898fcd7a70aff66ab3b48f2cb8b68f4d436568f8283a25949c077fee9566b146b1fad9322681a924a4284566cb77232d255076c2ef281b85dc377bb557b5529658f621bf480a22545996b396e9d9ea4d532be1d1b5ed4397fa27fc9e2f0ebdc11ee114df7befb513e5652638ad4e9400c295b0aca7ddbfc5ea684da3791e7c2dc3fd8e32cd7ab0f56279bdeba4db439dc4bdf68abf4bee0eea3ae81efa7474bd881cba4febef8ede773ff1de84af5da4ec4df335cf9fe62bb1304a65b9eb5233d13251bbb2ce91e571a270ce44d123a6cc6e221b65b2fd438474c9ee17447751bb6449e6e9aa56b6876597b4ac4f1e5244e552b9542e996bd43df3a459b5e5b7cc7de2de31ea19ab9e74cf5759987565acf2399337d91d3db27ada33273e4bb4cc3544bb260a2b61a408def7de7bbb6ebf6c071e004f25a6914f9441eb9cb7ed48aafc2b32d833ec6e9fd4aeaab6d46eabf6432a10fb846b65423002c9915ceeee8fd9f21d481632e4b6fad6fd454393cd564f4395f0d0dafba025fdc93cc4bf11094659f423754e2610efe37980ba48af5069ba64faf000fac40ff34bec634d7bb711eb47517018a1380ac1880a6125e1b9e34e4665dcc7b94cc6d58c39b48d85ccb251f9c85bcd39e7bcbdbcbd553d6d6f454949f0176d8686ddbf036f630393034ee64f9f79f557e74abac794f1830f204ca889d1ac66d649f8b5e95203a2c70e6d69b41e2b203fb89c73ceddde5478a9542b578e8f1b1e2311568d55a93a6e643bce769c135dbed8721d672fb69cb55be66ece397343840051a9f04b438134eedadebaa6bad944783c69e1ac3a1e20084ecd63af6cc2ddea3411fd64ab746df7eebeb4aba4c991cf07ad4c1381f3ee31b133f504824c6833253a383851543bd13550d78ad0e19170e03dd475a01ea87b24da53ba0f248ad2731075944e3b0a4a4ff972781da5e7e09ff7e580613402a64af14fd47d48bd2d7f46a599bab07a12d5134aef36749db47b4a9f295189b43f120e1ca5eb20ea299f0e501775944f47113980bae8bbaf50e71f0e9f804fc00c5ed9bd9e0d1a54d2215d863ae6035edbfa0b010df59cdaebc1570f6ebab5ca149a095cd4319ed7446d188a29e3d140d7eeeba56bdde7d1741a89ecd3b107f717c4eba28edd9b1d970442dfb19ef44e923bc6ee18bb634cd762f7b577cc9376d596c7bacb9fb881ec1dfb47adfb6adab512f978130724fcbe776cd5feb181d83db4aec576746ca2b45ea27b741259c7cdfe56fab584e27c753d7b66126e47d7746def6f218dfa475c1873e63a67dd65dd09a1a29466bbd5ef35a6642293d5f01961486c5e35e4911b22b6d5b21d673bce868bb7ffb843ae900ca4429173d65dd61d8c8b73f6bd0336eeb55bb6dbb5db9da9b89c951979c957f9a333bbb2cbe4c22e920d04419c07c1bb8589e2029581e2881874d48453618a11139a684d17141f2e5e32e72f69adf568b7db2d7249ceb7ef92300c9102674c8936f6f44548698f3151b8d3befdc9528a4b16a6f360ce76bb39f4be2ef9e06d5d32e70895a1fdbfd7ed9297a432ec2bc67b789646c2de74e44df592ad6ac750044392b496666506eab16ddcd6dcd63713c0000d0a54609a2c17e3f0b5f8086b6dc636631ff08ae30a5bada596d65eabaeb56a4e729d6be8955fd5ee5a6be62427310087b473e10ec54cbfd0ebc15808c83459d28e7608134422fc2e6e3be16ffdc220fc5ec789bafd88f0bbed721efc0e8909be1190d032b830d846f011b477391d12d50832306235dd3b46082b1019d4b11b09edf66db1d0ebf6d744d9adda5a7bbf97a6f5d22c18e29533e776bb39e79c75a7bb5d3b0a10b84855e2cd59d571a6cafd6ef4d2551078be76bbf7b6ea29e6b6ec76637c32c7d58c3977093d6388d3091818181818181818181818184bb52888adb5d66aedb6618ba9b5d65a51100ef3caa220d8878b2e5a3078de24ce2494b3378931a8623c707d043090c090853f755430606f0a2a2b2a2b2a2a2a4434cdd3c1c8dda7bacfd0f5d97531a9eee330f28b175f10bc061a109851c383e10202107e9f3334e665d6d85e4f3689ed91a9642fd5b50d98a933e69c3bd6da99ab9a7787eac705a2dedcd1b46a760fc771938b51d18670f1ae4ea32e66094fc49c708214b70710a31fe10359c57039a666cc795d57fe9991e4abfcd199e5c898c0909064599665599638f375ed5e15b8487b05c175e918f51c2bf021d5a2b3e82d587a8b79f294982f653d3d4b67c1d2c3e3d0f0dcdabd65296215edede65581dfeb62c0edbad3db75d7d386aa3b392ae5d0e1441536a91199c3c24577f1e56891bd097cca05cb8b1625afd5a2933eaf45ea7a8b4eea2f4aa41c00d05f74007cdef8e223759df4792f58f7e1f02917ddc5e7b93e8f7c81c788f5a6ce2b535629aa141fd72bbdb29e46addaf2ca51cb2b47ad11e7a3960a38e9b9caaf878b1e4a1f410e53fa77afac2717fdbbf7f32695bed2e79b563e917ea18bd29652296d172d7a4a890439445129edd2a746643da5a82a574911a5a86a8aaa86768a0ac545a76110175f8e1e522d3eae6cc970cb89ea64e866aa685422c12b882152bc54aa4ad193992076eda8b36029cd14f5f0375bc0b8cec642966449962449be58585890008095b017594f2c3a94858545c952d6d388244bb2244b92245f2c2c2c48f20e60967d507952ebf5210ce1b44774ecccfed8d89d31de846bcf2f84dc7de1f6d9ab1376f0040c58a91e6810228515a4782814fa72d01a68a189540f2c2e48f119c24e4665eeb73bd93633c383df2b95390002a60b7d91828bf6f53c6c30ae52619b20f382cac81dd445eb04fb040cd3eee20ba1d7f90b2a53002ae33b2fc08b0e0ab34bd23d418514e1f7de59a10054a6e3a1323a2bba280cd0c9ecab93bd58256248ec12254230db9364e7d5c9ba249dacf3e996743d21ce43f6655f39c4a269d5808ab8f7ce3be96a45e94b9db32304811376bdebb7bfbce078f0bf7fea05ada921da6de721508976aedbadd4f5ead9704b0de82a950ac7834fa92a04848bb3d6e927764e8493dcf5a02bfc9e79c77d7abbcf69ba32b577b4739d976c09002105c516d42b8939c45d892b890108f1276e38deb4a148c0754c99fb912087b57fcf3151b5571c4eaf01738a907e41407de66d5c8b31a8b66ddbddee0ccb7b28820fcb5bbed7621e587be3bdd096b87f0f81004216313de60e371ea4a6edc17a638757c5187bc0a756518ce8d6f60fec80f2e26207e9abf6a42c6fd0b458dc0f9e12f6ae3b04da215011b5d809706cca82d57e544f74eb765781199bb1df1a07eb877961735e18c2898476cbfbe4ca26cf95fe68b5ba4e57b78e4b5a3d3af7755b3f7899c4ab13fe44cdad05de75fa267a6306f3148369ba54a6062ac3053389f0c728b899bd1a2ed2d5aac5c4a4324b4c9407c513a113e1731b28beb9c3f2fd7185982eb766c816706a36b4166c058bedac62321e9f1d586c494f12586d818135c4bccc2460b05d33e6bc745668fc07a2ffffffffffff2993d5a89133e7a31108bebc4451a33484c2ea76618d7afa140b04790e2aa4c7c80fa91eee1136299ca3f65091eae10029dc59940564d62f12fe0f1112ae6a063e011ded79a79ede4cd49c289b0122e1531606a6cb94b1c094f14170f15bb023dffab19e701f851f6cf4ab7aca1f89e28f44bf9c9ab5a58abc2346efe1d314701b35aae0222d298c2e9927aa048595949694969465040f8205e0eab47f0ca0ddd61269eb7404167fa4cd09efe1df746ac3f6816004b60ef3f23733d89839c529abf12652032c9da1451068c2c686222dbf454b26ff4f3885c1645bfd96b53429549484e88067d8bd332010e133e7bcf3d6b85cbef8ced07218e6c197db73e7ac0db66a6f72b5b54513380de774a121d7c03619899cf39c74ce496b6c9df3b6c13cf8c22fc0819cb73b2fe52f251399ac460e0fca38dec4787060362f24ae9c71cc19e46ddd7119b46ddd71a214f05c5b70d7b207374b65c94f2379b0bed647545486b6e4f6cd4fe4335b64a667b35a33b240ded6dd0be8b3348f6637cd6a9aed6896a3d9ac91681d0d9b76b423796fc691d42cb1bd7a5267d3b8f6cdde74f21048771e0f8174e7691f5aa52d15ede01b8b27df5df534531335c3da7390a0e83c0f4a11bb13fe10ac131e0e9ff2fa9c2190062dcf39c43968340ad7e26bf1e674e63c8ecbdbebf6de9db7bbbd77e7711cc771209047e405071482069df3eac25f28905c968dcac7d533bb75facb99b4042eead5fbd0ac3755fbf3a7dd399f29838589ca3d78d8275be7abefed34d76305e4071059773b167a596be71ce96ac3176128663c9ad4b3efb59071bb8dee0dbdae129060cc7d3826f2a6bcc130432a6406b447379dd0c7919a292a03a232a01af183083f065d7c16883efae49bd30a2b84ff51297c0bfe872aa8781eea04aa848f2681f32fe12807094ce0c995f8bb816fc7a51b7ee0e259cee0500e856c5e201df0f1ce7be9a573ce3c7f6606d7712c9828859fd2fda90ac783b9b6905cff9e73ce20b9e3a0c43a6e07ffa80ac78339566a78c9afc5988f7a06d2dc8787cfe4c40079fd3b38430d345e60264a0c11f8f5077d443e9a976c0f89b4e6a519864a33d4200dca79134de929293637382e22a4e8bb1caeba3c6aa034e194dfb08573ba6c321f4c8f5e0f875c5b362fc8ed229d80c615638cf1fc0ca23c509491188a10d65ec6a2b0c4138ab084b0761115fe12e02b540927a80c6b9f79fa024e598d2aabb29f3299ac06c65c75e4d2a7c1382b1a5cc3813b0e771c29093834c7dbd61d8731c6b8e33a0edf9c5b35237e0cc5b85ab1481d1a978f9b9c96cd6a9571c64478ad19676cb71821e69cf76ed5de7befbd20ef823c22b8484371d42af263a78bf4c8eb84bb95133e06684cc0065b0dad14de56ed566dec65de160d5ebf187ba38d2b480c56ad55a57cd3afb4566c547e56582bab951a95da0a8ff07b9db57639a21d9fa8f84c955285c99b6cf85d853651f5767145a5e224fcdea9942f95f2550ad5a6cb7795b29e4256841f92f1e05fc9c590cc7e5d4e48d6912a2aa54aa952aa941f83859d90f3eb35536bce59a320c1c5598ab98495f7ce57ea01330bb8e15533e6743e1dcd5a1a36639bb1112eda725402a15cb2a38cd1e86863ab35c290d8bc6ac82337446eab853371afe12f32a4f642fe7fcc96e738fead2738d6c2ab805e41bbf0acff61449b34898941a2aa3dce3197866ca381b044ce39efdce9ac77eef4dedad3debd9dd7413980bab7bbee3a6c0df25a50b1f5fe6ab65bce1b0f0c36a366cce1beb38268463ef92a7f7472feff338da679d01d689c8eacdd369f1169ea798161a250464eb84ff84bf809e5878b4fd69fdaa46681cad849a3bd89f480dbb77efb2c32c989dabed5d3eb277e92d1ce1b7912e74de2cbbb3e0bb509edc9e952a41ca2a79584b5f324fdc171fdcd83f926fc3e01bbe57ced7643e0e4fb0ed3e5fb93f9c89b3049921c57338a66a026381a8f19138e86b7bd1a93d2084ec189a259c7623b0610010490314d56a6a5825ca7d1713150cd39412090888344fc085c9c37af21f880881908311a417710ae6eb84035afc0c2147e8881ee6e4ccc9c53773b86f6339bd93882850bafdef4a40ea6f1816f700e6e611b2e6f96dbae6cc83c5d5bc39e1246422094621ee177cc7af0f1ca946f14e15f1e2aa389d56be66f4f0dc289a2fd06f31211fe8fe9f2b78579a8cc9d793cab7af2462bcc59fcc3eedfb10b26aac3e30cc22f274ae3d574c1b8075ed912af6c895732c80082a5528b16a212d3abe496b3076b3499323907b255b6992e4c6c79677736b34ba68c2a669756e2952da70c038159addf356fca78c8c62c2c0403c17865cbbbc33ab13e1dc7711c870344e78548c78109aff04a8fd3c5e686858b8511d70e817608c4030cd0a040056619bb326071977559ae2144a218118b15118b951b9e42516c260abf485710510e6e6cd7690da511f9107591aa4054a389ca158f99ee35c8c8e0a1bc1eea200e059509c544223a321f44f944229148c46285c5d757fab598cbddd65e08c4fbcc77d1919898181414111f3d9daec43e5838ae7d850b63180c06c318634ce610818bba1c59b0b22ccbc7e3e3f1f1f8787c3c3e1ef1bdaa7a370b635cfac088cc818de338da3b8ee30863b1c20b64b67c2fa21acb51bc6385591e52bf82c0454dbe96f894a63b1e09458b83832f7783e3ca19df6f09098136818b57d56779034d43417941595141595149828b5d4c3379d3a673744cc33412fa8897d6d144348eb6d142e81f9aa5575aa5475dd3d576c7d339d1ed4c148877b1942e7656a4744d1e05a5530203429c9aa894ec9a8cad643300000000331600001808068442a16050341caceb900f14000e62785062523a1c89a3c13047610c84418c31ca186010008420406088a66c03d6ffd7ae16ab37ffc335f98e0b5642d2f738caed22a0bc783ca8b5335ff8f7a9afd6ec0b8fd4e91d63d0d9be1e9a323419ef4e3dcd95510a7faf87914c11470a611ffb06d6c6c81a6465ad44e61d573b06cfaade495bfe6f40d19be29ba962f9cc70d0fbb84e27446beda2f6e6aaa4bb9c176dbd325699958e8a3a210fd860c3dd3637528b1e8d865881b18b613b211153bf1bb73118519e8d25eb389da645f6f89d58e5a1173f7ee867a7a91d1ad9fbedab51fd3b2aa815e69b93eb856ac80891c9cded0e49fc623daab269bf34afa7411d7573c85345372ba4106e9e598fc4a76c0b48773ece975f9d8da31034f6dcb1e4f17bbc2a17afcbef5a80c5d1a5f385a14cb71114153115ae6df32f13525baf42be8a5bfde6fd6645986319306cbe4a6557ef929aa98314b1178171eef8f0e05b540eb8e881b7e4318101356da771ec6bcbc336c8a5b30f9a40da7b01079affb78053b42c3f036c5d19661b9fe2564a5c7b0a2050da0ffa2e1de5cd6e6cf36eea3f321278b0e945f36f2bec877fcf51778b7af1571c1b718d4f3bae0735dbecf4b7e28005c99c2edc497eaa9b07ceed62edf6bc29f635d70b0a1ff55e6bbec85e859b802f73a5924bd275298ee7ac6acc034960960d642e87ee21924e2e88cb643e168bde8225935881f31897fb245c1e47a6ad9f72c9eac9b7e6f53c1da24e459e2c21fbe70b5da100eca43b2821485424314abee5b161f00bbee2b9a39d35ba76059ecc669424949b672f2213e9d9bb46a304a29ef03adb4561f6772eac50692bf5e0809260ec84e95d280c7294f41f7bc68a5fece9a58f284914d1b3ea83d7111d15060539cd4dff194aee0e7b155e261679c108728490834a3a302bdc868d2189b964122e18b5030f73a1282ccb1c5714d28bdc4b51c3be04160041aa1f934d755018cf0c1eb0f06904db52a90d258392537313a3cccf459e72ce2125133f845725f2568786364d01a93b726bdd8ae290b735549aaf62ff8b91923481aa484d8e9488c6edae472a7eaf2ed245b413db2beffc60563b967d08cd0e2b64dc328ac27a2056f45c72cc366230a06e477f96ef63e727f699fdb4129bea3bbe04eb82f9d10da432092b22652e796237474c2bcf54931d9354d0c40b897a0eba0c61c02348825e2a107eb3c661a66d935aa64e4542a445e4211ec890d0e071154f659ee1fbb0361c3a2493202bed0a20ef05a78459351bc0be2eee55d1182a39c31b9f7268559610b694b3f71ccdd1fb5084fb36a41b7287a92a897a297a100242db8b1a7c1fba9939fa10626a9852093cecc187c6d11b94c1165f60144ca9ed51c59e7eef7520f38151b39be954b4232978d4c888fdc86294dea7ffce7844e363a22cc8dd3cc2cc6f7b87d7744414a48e365e52e127076a1ea9fe6106e85fcae118113b1fb79c1a3b45ce1d209073ac2f17ba04aa44a81558ae69f7a2416297326d532b1a19555eb00cf8463795dc8c23f125eee67db4ee1c21d80da46795708eefb929a8aff245cb693974d3314ff29cc078138f2d4654982e89f23a62b9a9c8bd7bbe971bc659c98950e4a89c252085abac54cfb69b9f9f88228ee294f172e6708385e116a66e9fc529f9b18931fde303e2d179d406fb3b81123892494788c18852eb87c6bd0ff6a132fbe9c4f7a8f51aec513db795a5d348a192e60c78d4b91cb745b939d639d0683b6f7ff656c91c6ad61a0a38a47778e2977014b88722505ac10aac96e1d1b343a0e2410300ae0db755a007328474884d4acf1ff0f202b3f0c78e2b084336b92c04ab94b18e2dce06269c2061e5e070de4428d8d131f4874a1ce25cb19801de0785f1082247b2c0745ffa7a61aba8723c59bc5e2529d9494b8628f9876d5a496c33e9e96a738880f6198c01c21ec8d0aa9e04d774ce55b801f6123baa7301562d524ec63b27bb9861b303b50db2b745313e78e262692d3a0ee65830dfe93b575b1f4c941a77a65fea14a51633ca37a1dcf7ce6bc3aa14de1f1e633e112b092bcb12b7806271792157022758bd383724e88cf4911e13377b9cc90965a3053c1f1b53baab8d197d531094dd4ee6b2bb4d593e103e576d51bc85edd4ad1803638220667fc988aacfe784714980698a9da44ea3381c5091f9864009ab6064b3b2505af6c7d8467414932866d6d0dd2335e5a68124abf7212062f4888481bfe479826108882da40c653acdb0cec3fa9c82ee0c616ee6d5c5fcf57ef1205cf2de2d0cd1ec67b4def165755928759aa2eddcf39c01860aa08199c9e78f410762e9acfddd4b2cac425c7ce8c26881e4e9f6af739455a92b148429d0b50cb6c17941660fc199f14cb75758b7264b90846cc5114079db10c88a617d06c6768219d71a22ffc7c27647a6df0d59c4822a9f32027eef64d200714f63112139e664ea0bdd011532157c766889029ce1e6f8e2b6c21d1493721a6e2b2868540045e48ae030fdfe66c6094bee92028b608d7eccbc3e6e65c0f9e38dc589c3c59191830834ee0ba4cba0e54143fcd3261c26c4b9d0990da635d8a9b7ab6ce2e315509380154ab395619ac514822e39767fe047498ac5603ccd6965015880c8a4f50816a96aae68e43d672ab8600b44ef8ca71b67fe1b9d2c2c0e941b4b59ba6d520c93e2003313c2528bae39ed3d589f2507fd7f1b27f81fe5c5986054993dde1b6ba8acc0ac0ff19561ab6ae276de03727b9b2035328168e9bbfd0b0dfb02f7d64445ca905ba1648d6d20979a465a7dd85b8d55077d312aa8652eb5eb1cfd5b8ac490c4fc737e370f5cbca72d2d350d8ba790f1dde5cac3167dc8dc420d5b2b0621c126e11d978c143262ef4a9508742838943033b9f56c7ba68c2a19d9a02d56eaf7921ea2a50e34e0e3295372c978638cde13112a1c08897fdd71bc5aab83fcebd1c9e4d47697fcc8082350424ca64b789c1336442234370c335d9751285c614a6c894ce768d350d68720c4261a4d6370d3da85896af4cc8ec34dadb6b41b229654e22283a1e450f55f5abd56e083814e693bff0117c0a0cd289057396ed021eacb17ffa6a9cd76dae7a3842c23b35f18dc1853add828f78a42d72045d1faf4e1dcf0c9b2e40da877c277baf0929442775b36c343baa6ce821bf1aa62329539b6070ec81824872b8b2896db16ec22f7cfc2744a0d5b6b66a49f7eff7454051da7905e65d48ce88c251050844798f36d58ff9bdc81fe62ec1e10a07dd5e17d87e2388e0e0a5632cbe3863f111cdd86a3ed3e4cdedf1d0f387c0a2a778b58bd454f4ff385046f8d2c50e4c9f5998167a4ba17d23ef328024660b75723da253b0d50b9cc8f41a3ec0f27084b46cb24ec14b0e1fbfccc75321262c84d309b01a303170f92356bfda59946992408971e755e741730b33cb2960209e48401b4d1cc95fcf558ca0b81e72339995aae0e1b02f32a91127027b4d17c2a135a8549044685f6d67c1bd3eed385eae1c65ce02f125942445ed3797258fe0bb0da29ee94ef0adba22f8340f6e00ec8cdfc48e8615122240bd00a8a24cbc978d63624105b81dd21d75876bdc0fb4354d09b9053badf4794a161c9a713eadc371ca232da6e96048e27e3bc8a8023dd6a866c465ee436b4d825b31c46c793dbd4701e59192cafce631fbac3a5cd61c4131198a138911e466affc48d19c531ddb2c9cac37dfc2f1f3d8cf4a0d561cbb5e2227d573c06239225b0da1a24d52e5701b926463cca7fc818c0555775e6a8e2e4a78cc1efc2dd4cea8a8ff32fb4f5360de9452083acc915981462b1d9ca18d42688d40c98b178c2a3525735832fb3cec453589874ef032973abd64091e0bdd4dfc8d869cf9ca3bc64b54280336fe2f6d21220980c29569102730aeff2225a30299804ffe34fbaceee1e204e807e77d6c38481491735ccfd268e81ea1b61dbbc4bd6fccda131a0c6ceb224a1a26f6a641d8aa17b01048bf8370519be6e021a54c9f34a2ae595591a0b619c9dab3f64aeba0e688555338ac924759012b61f00f94ae682bc9d9a2d1bdde1d55932448019bef4896f5fd3c210b1276233d1bd580a53ba07f8dfca1026ef06a96c9da20eb195e4c15dd78b8d9b93db1cd78490ad03ccd9d64cd526a858bf559a1a9d31ffb04145ab42b97447d11694ca4887a4fabb3313c2d9f36e3c3220c4ff36aa0277ddf766256f90cdff2686db40e1055f7f797e8bc7dbdce8e66cf781badadc3327c510378a0c3b6a83a6734cf3c2cab0fa26928b2cf336282b847c42a2820a3d7e2525eeaab947c9369dd69c7d6efb675183ac6717565392f74532e0d503f891362185006ec278007285804d48c6bc3682c8f2f869c072180a82792e13abb6429f96ac90af1de4dbc404e1c875e30ca7dc1ac1a5682e18a0877e0e0dbf5863fea31d226a0844a3cf9abf513c24996b42029640d0ac6c289932d168415b79585f078b0ea63420b78c4dc8e1d3f873d5fdf7cfa49e1fc96a7875049b7f0405b9798cfdfa1f12677bd784ec9c23c2309f174674da3e190819188603a48a76eae01346172c714946725d61e19c0cbdf544598ab93a3cbebcb84111f79fbe62dd065475fb6bc2c628fbeedef8fd714a38a2417b1545187e131d5b42e642dd0a7c4a388d72c1146e367932689e86aee750cf45217c256638e194135fb5374120309746c1a6b5120e7bd0634ed5272de40fdc73c2a5faa101b46e3c2579677f41292d397e9ae0df52e2a1aff2381fa1ecb781016fb2b03585916241820e24de56cb52ed21a2bb164347c1cdcdecb2509aa2d37403674586daf45b1ba3f6abac8cbc55fb092bfe23b8b3e471759dad260963faf23bea4b3701bf57632762ec47e892de7b84b927578b58d128245f6515d9f8c86d61d54755890802c23f13cf1ef75addd4ca46986ca1ced9723374928f18731293a26f2b53e1c11fa1eec2528b1f254b0e8a639b44d6af3d12be0e1e0f4ec2e594ec34526b59326065b386bb639579b819f4381aeed3f0ed93565df1b6bf4fab33d3e1ad471e7c0678397a9f341d32040bd133b958acd0b2393693091a9712a453d3434eed24d7359b6136891add8338cb83fb3085ed324b25f109e83f5022cefd32a254282291fd06604537e2dfbf79085aa5370bc0aa319ac280c0f4d288e0a4b75a5561b3383b70fd7826b125c078462aa8b32d9b100e555170be122333e7869938056c36dee3d889f4435569089c1d3d473be55a5c078f56c35c88c414ec92f45bd6995df98d1e1905dcbf5000eae3623e8e6aba9057c684dd11b716f1cb7ffa01de04b48794545b613c2d8020f8a4a8f699e73810d26da4135115b84f2a2e438453257ac99bec00d97da1985248da0fa7948e4251df67479aa6fd7c903fd6e0df16686258b6d340a41eacb22b6bf58254550c67251d9819c28ca7e6946800506d8af5a3abe19ea205614d2ee56f65d39e63e8984ee36c327f084ef467074ef433e7d8d05206452ad1424f9cf5efb3cc768b429a6b519596e604bfa52a4aea17926822de1d310c5dda93f6ccbb2d7268cd1e64322c1c7ebd1ded38dbcbb6e83b2619cdb7c460573fb52c5c4b3afdb0ec2d92d62ceefda9aa00e9fa04f10009fbabb72472f3f7130a1b2926caf6fb64d75bbf6da19232fca0f1a40af3173d4705c7978be41d720b12e1030b2b6ee5d7989ac11239d9d7cd496bf74c47f2ebdbf8555f9224db8954328620074191e4743e62f549e8be2f7f7a7dba09a007ef81cdea24781d69a5b9668c34c105a2a27891b13f26fb118597d28401d370a0a1249c89f03b79f3aba47f352b12ac5584ac14900d9491d8a55b94b81bf1bec02ebe595c0dc6a4384d28321e23d35fcd7ff6f6f3f2d56620588bf9ef13d44647add66b276e9db33fbdad0df179701cbffde15f05b494381375f7577b21969dfe5461a16d12c7616b02021747fe57cb06fc9da36dad8aa2bb276c3620ebf41a1118777b79528b1c32d8e368c7be830dae914f05da1b0f1932070af372a77d4991316d4ca77d60aa13940af4f718a60abb045d2794f3f87e3504ae919c76c09a60eecd8931ed4cc35f6bccc69b21c27e9a6d498e89d4038625a1bd7001aa9d092d359b81f89debea234e1bbf1298275e26fd2ad21e11da7284971b2ff5c01a0d852e030efc9b0981cfe9c0698e9f420447c99a6a2aa0e5e8ab39a820f72740f078547ac0202242ae8b39d74fe4c0407c4e3f168e4e69d7649324ac91cbddcb66a001b8e0a9269b1231b651d4e5e16fdd64136cca297493c268aa7e1982cd8d9a6c18d56f4375c6d7af08921ca5e8b926dbfc55d8734d3633a3822e0356a1c9bb265b79df35ac286ca7cabde89816856e4a751785197153106214f08d2d0217ce37e783986da0e263fbd51ca3031f36d202aba6a021bd265b700b2a55942d53521daa9aea50cc106e0f126a01b68e6a0ced42368bca56d4f5642e72efeb9b27dd6c67acd3b17ad5a34018a6acd3fd006475ba979ed4e4486c4b7aab981008c0e00810d17242496804a8a91059c3246bb34ac4bc3ef5b0acf666844bb96aea5c7e05a6b0b0b23165a8d49d320f6ab57ce50e95097c231ef246b2e59774ebbed1402f692cbf9a61e5d77a955fa9319921043e68a6fd169ffc62087d74935fab00c9932aa418cfe8add2fb4eb6372b2ed86b7d0a0e87d01437709af7965c2a8bded27e2b28f1ab6ffb057312fd105b704bc4fbc3956675ecc945cf0ef741e5cbd1ca494b63fb74a50d493ecc85b509841d37a239cf0c2936c5664f03aaae5c47da3b48c434e10d093c95e4a94782a6a4c614d448144be958f53a1a42e8da8859c7de824b3d8b3e668d11a1bd3392fd86aea64be0eadb8542bc3a52bbeb29d05c1cf60ea2710c9a7d9e6912a73931b8b3164148c4259282102c49e06688820cf9a8830f04de5e5ab61f8632ce33912ae765425859058fab168e96953c92a20356473a3239195874efc5c14ad45a871fde5cd8629ca8d18286e121fd79cd3e0e1c0b25122037024aea309f84d4ec85b4d9cd0c5c919750c0afd87fa29c10c21b60a14c0c6819380878bf8a20ca4c1415a40228a47f092bd110801f31d0c619d2a376a084dac02cf499192e437c29d39fcde476c65dd12ea93fbffff3831b9de16448598bb79e39cca616ea0dc0a6c4845f511c8eb479584310c5e1683f1c38b687749bb23bf872e3f10f6aa16968e89ffa8d996e349b50b40d86c6d590bc2707268621284a75a303522774301849c5e478d909af1af35c8b22232511b928a2f525bb17252246980e90d48792d73fc33d3ceec708bda71d7ca97956392dc99d93be17b1bd8bc87f785b1cb09d15566b0277e6015591a590314d74268afc72f2f1aa2b690a095084a04965b31c5648e21754f525f1b61c2662e1afc0db010e0ef8a57487663e99d86a6b11305cb78c3b78db6376e1582f93ba08f198f455f2808f33969be18ecbd7b45e1528d4753fa514b066d84e8fa0d83cbfe573e3eb4909dd004af78364fb5e66b379bb2a673745b7e076a67f06280370323da170f4a3dbb90c43f3763297914fe22426b8303dccb0ec9ff9d87f6ee17898e5e0e263478f44111bbf78bc7c65a2dc5743c5143214f4e3fbe04e5ee37233eb75b68cf6edf973e1cf26f9c2d28e65ddc8b41dcd3c2d8ece8d8536fa1b8650aeeb881138b96d5f8036db160c3c87a7a352931aa88b13c85399cc8c03ba15291affad2d79120a6b9b9d18fad2fb0e87b06c62268a22b1f34bf9139650512dee86507d08dc3f1593d9aa1008a11d9c1a79a84ebf98c38a8e67444f1d14999a32eb58a3657fa629955dc0bd57806e1b78b1c3b9592790631372cf9a41c63e4702a7dbb40a59ee48a0522164187179165e3122d68d5d8595b10029f67dc5839dfe15e5b93de27af68d46b14266b35ad6b8658cab832d6dea5efce7fd8a3e878336089c31f46712abc0cc5399702b3c10722ee3706a91e92a23322da436e9ccd0af60be08b3eefbdaf50ab2de3085104c5f0253bf18c75721d3c473131b64489f5d2e6a435310fe877c5e278f6b6cefa3ef195f17c3262fa10731ef8f6d5d85d66229210b7e3e39b708b4173ebde148c1d2c6d263e9604c5c1a05a25e0f5bd6b455285fa8c70dcee4079057a15c64aa803461508d255ffd97397700958347818ce07bbf277e8aa0bd32f24245ecd394a6333006698c66e25e40f86ef4e0569114efc0a5107283891b90a667decf2eb935c2b91390513b2a0ed242c34b0ab075e43b750207c957a7d11509ed0139679b6f452a4ec16debc94a228863d184a83f966918b9bcd395f51426c110a1523f37646e33305ed25a812fb8a9a20d145f86f5fff720336e0207803ca61345b94ed282f324dd27610698493d816050c10163721e269f842d107b794636e806799a6e8a9c78980a7768fed54a145f6e9256c691ac114d2c45ab3dbbf30ddee9741821abe6ae6a0d906f305939645698fdad4783fc390035953c818595524b6a4ed19ee2897b1338467508bcb6b81a0468262bb698571c06d1a104da77726515d977504c86c875001cca6f8cb394c82ac1480b03879f591881ab8ea65849662791f9122e0ea626825a8696de4a594ac72b940da366c3bc5488630fd56234faa15d6ce57f13756a6987a63a98e342d5bde3bfbe845eff28bfed58f3528b235c7732026f7aa7b5c717f3e25b98064182866bff501f4d5796b0f2f235a73713d822a3c2048947b010d30f7d2aa40dc57c4cd06d1a8cea90d694a1bec5d0fca3ee54cd78a5c992b03e5370c02c8fc32e99a4f7ae108c58c65a09976b2231062ab9b9c7d45a336aaf69889b96c15e9e6620deaa2b5a08555b58b3862f8eab2464ed822e3832c88e2893740642d6114149adfbbba1d5cfe535a4f05ce1253534f33a74f81f50faa9d130b30441c2e232d7186690a465aa22d50e05e8519c73a7cfc8ae4e362b259176cb6d7a3cf133fb2bd7c5649570c772c7b0b3ae6ce3a8c97097b635dc8f03e5fafba5352c7a6ae0754a86c62404f188eba2a2034b3791e40843be988995f9345978f3b4c44b2f10e11e028baedcf66053272a2f02496c1546446c6156f9d8871a39efd5c2a81fa487f70917ad51b4c659ef7dea315554a809ab6a4e3d0957636cd57496fea8dc1dd144482a60700720a422f6f516f2e190e256c3d7fe4194c302ef29823f2139cb623e70ff84a2c0bb0e4b904cee1c247c43c154a439427070932cb1a91dfd11e3490c570cc7237bb39fd6ca870a3ddb93f559255d84e459eb5f6f40bee22f62ebebe8388d2be3939946785e6d571103b8f2ae243d6ae707e814f1db3f9769e5095a7c328421cf541262e73bb61a061bcf9b2521ac558eab97065d18dcc2c0ccddccb8135fea5fb730e5e1b76db2c8bfa6a72b9e62c5e0897aac33acda84e48220fbf71bc0b53dd0a1c55a7b49f66c5cb9e1b0427eb09e6efb48d77ab2cec0044bcfbcbd11089fae49c9d4bb1d9f25852d3216fd3f26e437fef0ed9365e3adde23c3390f0ca44419dd7b6ff1c0a4f94edc8f889a85cc1d7240e882313ef6fc0b3ef29c1449900c41362e5ea25a01e19a461ff69142b4c72de7e4084653548f2aaad7b8f56b0908c6ff67948735cc7c077b3b8ce6358b48e1fd0416aab6bc59b3d01efb4bd5ad3f661eac3846a5afac1e750f1c96a44ecf480952d92734f274cc80306dc3a36e6c76c5b3af57727a7206a17c1a82697414bfde08eb95b0ed07eaaf73eaa22b631952b6b68cfd655c781bfc287c001dbded128f53845ee34a6c95091efc529160195f8745caff43e857ded195e5b5d3174007a5832baf0e9c328a0a519af13c0c00da3cf86c4727b5eab420d9d3d481f61d5cc09365f2456362370825f98cfe315179ec6fc4796a2464703982fed6d124bb080aa03350a3e24c5145f437c532b14abe12115001dd4bd9d4ceec20c1707850d4025acb6980c1a9a53803db4fbcae13e13927a2e2902090aed8e620c1cc748313e8b23e718ac555f2be3dd86c4036334c8efe73e8c0b79ecfd5ff15621bf7a7294ffbfa981ca76a11df5d781c6b102f8a8c33f16a922d700361e4334314d880dc8e742ee68c7a8ef16c4bc222fc0ce2fb0e8ab2f444a84a483314049565ae4797fe2457bb11c483313e4ac5f72dae8873c5876e2e8059479d910c70261720cc09a44a9b7ffbfc55ec010c4785e3b67080f280f997fd66dd30a9afd6294c262b7d6a10b95eb2a3343e062b3eb1b8b7760be4294472b2168ad49138c0723568a4ac2e823a08d082d9d741725864091a7319088604ab0de0ce78672579eb583a1a59cb487ea045067b5431ffd804bb7e4cc85541330a22ac634e528a0fd2fd14ac623ff528999b794688904c738e504efe2291e0ef349b64aa6d0ca28110883a91bc6ea7af310bd65be49cce4aa96d8c42c0cc1050f2470ed08927794151e744d860effd2ebae0812c8e5cbd3ef5500aaea402fd68607b4ea4f0fe68309b69ef7a66b75b190801b50c823867e7c389d17656a593066bfe083269e0978f4352dfe8b7f6fb2b3c250d9008d901cd14c3181e88af87e9cf776189b81078bc157e878242977ed4b84402d21736a4715622516d988e28f820d392aa07c129907e2f1090673d0a25f5d744fe85868ea0fddbe6c2a5686bce6f4c19dc30d90dfb2ec9e3a038a1c6deef61cd22a2f655b8773d187a4b5121af14035a00489df081ce7d1a3aae029714e95a5b1d9bc44ec7bd45e211f75a58ce0566f49961d4fce0427fa6b180de23608a8bedfc89a167bf50632bfd6a5186ac4fd551cdb074c8722722d776772f840ab88bf9a9be982791123ae753227100de4bb6682b5beb2427ec43aa7d1ad9ed7ba8e6d8acbfca9a73e3fd890fd33099e2ba9cea5bf679d2fe186213cd7a82d091ebb4eb0d3ffaa50a122027015c1481553a93a7e3015029c8123d389b814f614dcc1e7b8b5737ad746e82b2e156943b69052c54c607cc8e15092287a13dd5325a994ed88beb61eb71aa2168e3882c6d7521ad3adb2d906636e38ad1e22733b1362eab4e72c2898a3d0231f5731befd0697645e6bbd275782fbb0b1d81c9aefa01efb4367ae5c1d1b39fd65f106b47d3d6d4879ea90026d17aebda2613722f3fecc0185910103d5c4d79653bba188263937d615c0b508c0eca66dc1f354a9f3536403a875f1b9856823a73bef3197dcd4028f697a88f7bc31bdb84dc74165a2b4555c4d2feec549d73c1bd342c4664042bcad06842a86db9f895bb71ac95be506e4b0c9b990a900f5c5e9f0c4c03cdcc46ed755bcf691fe9c806bc420fb97380c4509d3ecece013e641e50dfb82251853ffcafaa9bb38e3497aaef6963e3aa4704bc65c12b6e2c955910c72d56698e8d0bf3f7b769c21f1689ce832f85963321b12383efd5c8b4b8a93f1cbd8fb7347a859a80ac27abdf05a710a4abeadf0b2ebfa2cfa3b27b55163678f4126eddee808d689792fecbde5164d48f1dcfb2ad736ad861338689b0bae2b2e0c1e5d1031606da77246f0753334177667de63aec199e86483883a1934d5d835925b8f11dc1015b657bf490838e78da163c4060ba62a3c5b21fdc4440edfaef56c905aabdf4a862e22f83df5bd8c050849df6db0c41dc88eff9ee4231ece26f6ff1adbb08a4197289b56203bf2ca30a8fbf38d01d9b5b70435f331844268c8754a388545f93dd1121c9e1efa245bdac4db358c22b9413c90de54b694990f2f23786a6d03042521c3ccc147fa3be6059c7ada049728a14cec7832fa1cfa4ec0780e9900a4aa782a4a07ec54f451377ae037711df04f8ba1905be09eeed2fc1583bb297011f458c5884f5e62dc92408f39c123eb27937de760c31aee60d7602cccecedc0b1c85f84c92e6211fcb45c97b61564c553252e7315eae820887b7e4f18c418a90e2599a0a180f26a13725a05a431e6976589bb09a9f32eca189a366a2a9266a1be686a3c0c386fdd59872e86904fbdb6004176152d2090b6fd49ca44467b28299805966358ad803432809c26ce0d1d4be382ae89ceb4f0ea58d82fc695f2e12a72397b5df37aba50adcc5f901c43fa8ecfd5652e934c02ebd338ac997a6d09664bed7b815c77ce96a342edbc33b0749ef8aac338de890e0dbc3e6d2b7134bfe1b0de9cbba6401a31a3faa612903a5c41f05ce0c300d48ec835dae8a564a5561821ffc56a4e1f56adb7ce9f8f38aff59463fc4989052181148507038726ccf382a8d9bf41e0bd332d7d7c325324f2c06776aad7872988c7db0de5d69ee3ad5a4a90a3ee6ad7a43c7534a73c93a7711b1ed1bd61b5a36a75a6d1fce5a7c1c90d9cd327b336b786cff0c61bddf146b234660db956699ced5f258cb60b7088d34af66610e9daca67e51083c91385f8b1d977065cb7b557f24d05a2859f665e23c25493aa19dee0f5f35f1a9011fae21808b116ddc15145ff1e6b5b7b70d1a19ca149b8c975009fab77e51704a32106cae0782d335188b61d1be98e73cdde16d87db1d819d745cf2cd8b0b2e3268f5aaf4ea2c7438df2a2ff25d93d2257f0960f96b2eeb96ddab1a06c52eacc907a8c5ff29134981b4fca4c1ac851b44d2df0e0bb7ccf390b531020ede3f3bc550d2a80f1b12d407ddce18879b50e950bf14695ffb322e3e3abad72d67613e52deefee0ce025516422ef8f50075182c72b4aa99115f86e1702a5ee42e15298e24849fb0d88bf9b7453f408135610bf200906c3200ad24282a19046253a2d0132d4cae1ca53d9953868cd79f87d07dff9cf3216c17de5c8b1c2f892d1e96b95bb0f7e7dc12709f9957590a5fa6f03dfc47db37744e98914854b1c6ef01132e601bf1d04d6f4d56f8e4360845d3f27424bbb7e75c4d46beada36e0d2b56facd436f0660d5ea6b523f1a0176cac9dcb2f8139ca77fe0031696466a6358d2950591da9dab21a2fee4cc38efa4c4570cd9cfdf4603161d3178cbd07d1e29822d5cf7f89f4902a598350e6e66c0d027d1a2c6c9db9292f827afa1d846adf6fa08f637ab1b70842bc265a4d4b894793e1f0ac7469648bad456fbb631974ce13931e4d08a19d69923a46082d54edd3d8d2455c2f7190d6bd96971f9c412c46153a0d7412b1f35760d4eaf480721e732a408f722b048f07c6b14399141f3492a516e5452d402b66955c8ea827400872dd168ed00a561b82e1fd6491e3558e948063e94f13ab65235db110da197c9a9a6a9ec271fdb8126ebdbe36041bbee664f411425d1b047524d697bc7c970795fa2e2ba8efe5043f77addb1856b2e4621c178696ff4e736ee8aa87592a0747a55cfe574388737d7100a589030391b4d08aa03fb419084bdb50601186cafa924e8ae958dd4b61aaa481a7f73d911c72a481d255d0ba1e1469a6c844f4403c5f7518095522d2ed72b8c3fdc55bc24b907b4da5a87fad5bfe18ab49fcfe362984194124c45cbb68a00bc8d6aae4225d8345865be100a91791b2087e54de7474b7238a83e36781c671a917dd91c81134760475f3c533ae4adeefc20dbf8ed9a54a80048ef2b930c0a634be2f4a81b8cb2e3a0db3b733c9978248f5c16d226039ff9e903a49cff01c4276f8c65589d085a6031b69e30a9144e9c0d8206dc548cb691726fc65deb726574cece81dfd58c30c8bae9d51b178fd164845d583aebf1f12bbab64297a32112ede275df69b78f91687a9ef3299604cf2b3a01a365cb4bcd74193dcfed438cf488d9b90491c33bcd60077126f1f6a5c3b05715baad7f07a1a774bead622251c08f17ee4701926e14227a1d07f25956d2f979934099b2926155d7efb7ce3f0a9ea0e5b386a8dfbb9f7b8407db6d6e8628f3eb135789d3bb375485df72b3e041ae70ae1ee84bee0a721b79466ec4e75d5d640f65d0638d7a144c9e52d6e0d595ce7f8ddf8f84ff15ecf38d9ecb08a1a53038b54cded2b4be81c889bc8bdd930a032a1abe95ee82fe239975376ac7543131978c6ab4e9e81571a876395338234ec4674b482f586162a5be8ad00f92d3f8cd71bd14c51e2a8aa9d177296f19f30c7f0a760a2bb722ee80b3fc8513a5596111a0f0ca389ce53b25ac1a9acb3b572d3fbb70838b5dc9cd87dbb3853072d208ce5700a9a743f673b0812fcb046ccd50f6432cb4d9d04804bddff1f8e9d7e2c9ad43eaec1205c0d685395fb810302a1eb2ed129fd0792b63a4ca97220f8993c05e6a06aa2846790635640fc1b5aa187b14bd382c4d91d64173c5b647089393be9b1e213fb4a020ccbbc2f7c050295c75f82de0896306b4ee33c1a9f1659507580a53580aed1fcbeb8939b80eb8b195008b4791111c65eb6ad0834ad139c8e034dd64c04c43ad81ce94eb80e27f57551bdef62d50a936c9aca2ec720887221c28eede9b268f039a5a319fa68725dc04486d85f90571923883a935fec4a711d703206d50ed76d6e6caf25d1dbacdaee8b1045971e66184227aeb0d197473a44b437779479798a201568606b520f513463a43356eb43a81e52b82c2bb7f7688b529047de4806aba87342e21be9edc5b73c64f7f5eae95da8ee315047010de3993152a55e377f920474656ab0b7180e4a2443f6a65b656ea8882f4ea77f0472b32db3264589e434b791aa99cd5f5e4780a0fe8fbbb24feed7d42e7ba49a1bf6d1e94cde8308826a9031b82d2be9135206125afb85d44b2100954050d9028c1285da359ea4d53ab43490a0121159ddd3d4d3940d632524a6fbf3956620f237f2448532a4018ca370b3f40aa816e10844ce5913f80d155a08c18080d6324086df0903e0784046dd4334980fae58276919ae2b6906250a4f9d07e312531a14d0abb572f4342d6edd418c3e82856c077fa981e7abee2df0301dfd4cd931196ebc68a69ace35d34f5fa0a969a5ca0d36c5449a240aa04c4f99a0ab00e321149f596a18b7df80323a33cbd97112d73f9b9d5960498f96fbd92d648179db6976937287917ac1138585de8c0ff8024269f51ecdec1202c4b93b100eaed543796617b83cc5d6619e0d552c2f2cfcf348fa3c2ea0c547c6f3be2e08d2dfe29fa335e6d2eb9622469c268660a6af17c0cd338b14c37a6ba3de0365f4ac13608b7d22ca4a937e1939e7ed5289d87766a9c6f48e910f0a7dde22a81cd19fe70cc7b8335f87d2ed98a74eee575119fc937d00b1c3056980dbbc26efb4ffff10930121e9d4e006607e92d9ea8cf82aa4edf3184e35be645e7c29dfb28885caa404daabb38c8ecddd26c61544c34944188f63066120cd01c70ad3b3170f0bc0079ecf6a615b0efe0fbd2c1098a4b2a8a3d88930bd5cd395c525fffe069dfc1722e3e7f63b92b8205f47cac3fd6ff5762b8caa3dfc7a6251fcc8e36aa9747d040ef2df0cb44b8dd96ca77d2e4eb071e2c653161de2c5684cab72d156bec59aaa1c315516d2e41c1832eb21067d129a5fb1cab3cbb951f05d0b2b7877315a799a2ccbd3742c5267e1a0b14057b5a2a7ff8a84f06679f08ffe70f91604f2a0272312572ccbf4e5ecc06e5ed4243962ed6a37ef0987bf0a59021156faa06464085a213f2f3e1fe0fc418257419918154b0a047e31b8bfe8f09cf61f2cc8bd9c143ce82430779963bde423dab1fbab10a9dfc05c0747ae671832bb96626c194e4c632acbcb2bd476dad822c55a02285c0006752675c0acfa6821bb3b208ea29073a60c975a9a9c1994eda4b0009938157184aee24205c3789f90d2ad9b306a08948d260a81469478a182cb184d426a4f0ffb6819e1bfbf71b88bdf049b423739411049e23b4ef9f4030c0a2a139e61121270b9671d1c56765fb74c94f300f91556429c640c1f29af0c363e7a309db79e272a3c6f14c75151c5c02adee39d2aa70cc03ce7ad746fb3a5bd00a86d7a0b7ae8122a542550ee006a2c776a0dad40f8d8eb8acaf285762319012604a61a26ec0321d1362634b3374a31adc5163676a0e85cbe91d8af663a68dfc7b0352dad2244f9f5837e4b27ee6a9764ab94228d9090b1003f837c012559edb9d8316ef4a6b5cdf9c230d43f2d057ff2e4c49336a6b026d8bf5b16f794751d30bcca622cbe6ee90b541afaa6c680ede7b63a068621162c6b6c22bcb16087a3801d4320428a6c917d79a92fb1eedeaca9345a1abd63c5d2101eb87c27c5019ddc57508108da3458c8bd748464b81a90ebe5c0023a0c085645aea88262a50c7520350ff9b1d8ccc442d814a4dce6fdd9458ac56ec5297762b76b6c58daa2cb427b8c0b79e5b696242cbac36f7c07c77419df26f83fb5c114374946d7797fc95c3cc1baa8ba0906819b8e538f32764f6032ba09261929d029d3d2763ae9f68c83edd50ccf0c4cca830ff2137b0b53c4280c16e7d98f48cc4d1ce9c2cd996a9fd3be7e262075fb5f021cbc8db4ba342134de8fe5028980b5075ac4958195bf95236e3124eeea9ab7738e93f2d68a6aebaeba78b943b1594f9d6f357f1683d32c0ddb0f51680bf5240638cd728f589db4ff0ca314219fb00509917d37075a3e076b7d133c03399839351c1c1c75a8840521c5fc04ee62281882304e0d58c88b7c205dd817abac678825bd7fdd62a3a2684532695ddbe8a5344802d308f344ed5e090c09de5c3930a6cd1cd014f8cc4ee78e9dd23fbe9d11ac2d49c7aeb1a758f38e8e0ebe1844d79abb73c03e876a2d31a2eef1a62c9c2779028a21a7178d2a85fa3623429187534dda35fd89630ee559d1f2bf8b5bde64a23292c65c3b53794582d6394b8342f93fd88934600456a127ce2ce2efd8b6fda8568059343c4e0ad60173a5617196130b89741a0f50311a6dc158e1915bf60e213d24deeec95572ac72b9317e7c12ec01e30a8b9969b2cc5c2491e314b42e5ce3783dce7792a0e1e875b9cd5398d00e44a0c7611e7c25c49bdb5c54665a1793c9d2365ef9e547c503a38347eb59647a7bcfbe8d55af84a38e065cb90d82be2899cdac68c752e2f859efe17afde6c7c20cff6884cb62f08a63cdffc1fd61043444f0fe70a7c00287e638b9327463869dcce083b66384365acb621831ead9753759895241deae9f67948df50082ee02097d4aeb7a1eb790178e2cf98fc33a6c8bba1a9acd16a63155b219d8a6a7c11d613879fded8e73d4d29d80035b91f9503ce17702d9cd77ba1abfef941684abdce170bcf50e545a8c1e325de55e98b15d946f53602c72087a4dc6ccc187ae0afa97c9f4e8d370cfbbdd4bee0402b4a05e2308228a5d2446f24c5a4a7a1699ebc09a2ef8d26ace1f87db139b00311b7c98e3347f3f7f17db0ff64f01f79cb0df4cbc48a69c19802678d0fea7af7a292a1284d2792b8a6032630dfb0c163608398f8267d39dd2824d23449c930089ca939cc2083db71b21f3fb1f33341a6d3583452b91b9e92a08d9acdcd3e996704b2a5f2a1271c05d64de7ceb78fb57ac061732b488095564ca64490fafc2bcaf68e2f49296ca3d1abde1061ea7112ad1cbfc4535e6f69d162735d7358b8745a9d0e5dd666491396184fe85cc4998f1cd903c4156a8dac0d19dc9c30ae8e9d429a61b1d1b4ce1364ffbc8786a672af24d80b72ed363d23782dcd0a6ae6987b32ade920ca976947437ced29caa7c6f0d63d74bc39d801348e221d3951229fc56c79069aacddd370e8738eb17607cb6f03cc474b49c0ab7079837e81c329e44b41865eaecf21a6eae9b261cf15aa0572c4f2683bc0fa0212d2979e8b9acee9ef22d6dcebdc3a09e78d82becda90418eaa707069b726256e164fba4a4a4b48d30c81f3953efa0ecbf7fd81dafc5094cecfd1aef8aa922fbb6a16c896fd15a1ee3bc0e31209036fc9c36e1bd2891e75001a76b4022d3bccf8c4b4c4523fa1d1f133022ed60d4ca37102ad848dceea9d5634c2353e4e3eb19aff13c2d63fd93579708030c0fa2922d08a03026328d4c559c1041a44cb334a3490a8f32b4474a129ab30b987cced8bfd32319e258012baede06234ac8bf5a206e9efff4372990133acff1940942da26eddbe4591696b3c3dce18df8d50ec0f5b4d35f96067cc225aaef84c49fb443b6b9067d390bb6f7d8275eddbc6092ac93983276c8f328a55530c14e371de6ba72cbde4cbcbfde35a07950f8fcd770ba4d805e4d3127a63ec193114668cb0c91c1bc5bec72c1860b34255780c66415627f4ae980c3784c8d13dc54bd6e2b2b76a1ce84d3132301b676af4275b3246ccd43895e8215fa756e1640a4aa3970cc54d404e391df4c0df7a1007cde18d5f9abbfd9a3aecb307e408317fee1f90d507f2151464ee3371717305e90fbfabc9b3a764c527a80d0c2435306a5916fa070dce34d9931bb27aa78888a2c2823ee59451909dc8b7e4305b8b2b78ec705667b07a3a9a7a2b136ce41b7e8354b74af61be25c4300a1854aad0e2163f603d1d8ed81d3ce7c277b6db832d236224868fb0fe670013bdf62b23b331bc2bc7731ce0aaa4454e3c9eafee87ff8a98add86913ea16080e37b98262fee06e91c8065830224ac0e9c4985c49060f1fa87a1653c34e52436c09ea54560534611a2e61339dec82b410543cd2ca386d0f5705164aafddcf765e2912c0bbbea40cd27e2e76331387d1fe7fa2c73961ca5b7bc70d4cae4c43de7429d5fd19f4751c875967fcc26a078e69a0f5f21a64a67993e0166b3ec9a9f7989edb7c2974a21536c6903301481bad1b032efc7b786da4aa21cd678796a3676bc92f5e4404bc43ff73b40cae8f278cc5d6c849688b67c6488a6910398df11aa80083b46c4587ab977b2ee8299d170529f5cd253af904267b5539383c35551d88e06e0123d335a691f1baa6d53641ac5868b665d38d230eb39f89d40abf306a07bbe2ec74346f817458106cf8e593c0cd3a9d1e6d7a681e67a6b74faccbb749de2e4b3ea439bf0d67a7252fc79ee9c7b6b1d6b91dd438bec690b2ec9b7e879274cc0c585478d18938bb48569296e62cbc55ab7b7e4c961e68a869ba62a0da38d6ac2843c40be15d0475f17233e44fac319a7832a16de387d8efcc548cf07a704012b826ac2dbddc43108666de60f935b5836f9dce1d118a3f52322c4ec111578db5f8319d671a1297b607f10176f16f9d2e7655d6e217aa83b401ce4a1e2206b38124f1c4686d3f9e07693c054aa79644cbfc3283e516b2faea1e56e4e10d7d57aebf0768af421959d7664066a9ebab0f1b89fbf0ed4b0bc8e8565ccab056df84b9332747d5ab2a1c62fc467d3473f0a52da451b64f2385bb66199e2a8dc00280fd4c7cb956d9ff36facbe374ab76812691054d268a0e12813d3676f5ccb2217c49592654951a1631433492fe4064350d2425022a949bb5ce5f97ced883ce63e46185329fcc886d9c85da35bd0792558487095211e3ff4e1b4a0b3bc05501dc9bbe4114a4480a457da012ef69fcfc381473f56a50d0f9589644eacce6352a0c3d3edf3400f0a1129aed098a2a66078451f7cfda869abfa72f4800196a75a8a8cbd5b5b270b008d1840d5f5bd152dc92432f2892d3840cf134e8bbe64b819d6c3dee11423ff68a05506d707b215176d8ce8cf1c6920e4837f7a814ab523703e3510b37089eaa764b443e769d58dfb9e92fa27314f492fb6328b1b5de241c531906c64410e75031f304744bf7bb75e91f782bc00f11edbc22dd06cea927e11da89770eb7e98c6a65b5726b2c7c50902336d8675e65e4ac2613a3d5924b102601e3e32a9d24b8c4ea00e6f46eceb50fef3d204d253f35a3c5d9a40e854c8f4c3ea7bbabb9d388f397acf26ece5243627a4315d9019b35832528591bf2b29f5f4dd250785d52da6059e64e127590fd68739104c5cc227e065abc3b09f3c26071516fe64d81d8d78ea9e5844b2b2e1df198062be120ed26ba39506bcb406b4103855a0e483bda8ae858123e6eb76cea6213cf6f2b2de49b24cf32cb5f4452dc79eaa48c2874192927d52cedabc2a54f373187cb35cc3d8960d289a7c4afea636edd9dfa904138e522d7fc3e490b3406f8074b9f85b455404462712704d7bc1ccc678bba1bab425612663ba4060441703863e5c6841dfb8ea653b76c4fb73b20778e726f9f7293cc1bd27a7e216e7fc03b2779050952331a99377f42b163e1fdc44fff845d62ecd47f2c39130f4bfdb72cebd948b05c168817de28fa8fe46165df1166522c8007a6ad72a95c6e8be6f619118f4c6b874d0ee3d1c006930da934697f11c6f3c9accb00fec80511eb5038a0507c0e954cb2f53d3b1d9cc9d42b7896e8b194a765d1a3d7bd67fd637dc49c50c860379dd322d750e409eadb82470664062faef63fd993f2c3cc948b1951b81e39b8e4e6cfcff8ef6bb0c0a24087f749f118bf0a83033893a10a7c348f819e684db23f43085800a7bb2181f18555e26b2bfc65993f55f69afc457a87a4bc9f9cfca85b37e0800b0b33983798b5c9410c0b44f6eb720a86695e0bc7e035ecba9347646b05b67dbd8559e9b90d0d0b04fc047d4d2f81af9dc0852b1d0ec1e9fa1106f932fb40f3535c9126432561ce5a134280aeb01ef7eb7ce7e300cc6cb7f4681ff6a425495100b4d989ca2a5350df1bf78256d571925ec7fdbd31a5a9279b2e7604ab5625091b5c72d2f7116d54d81f9ef5a6ab67ea891b39478f086eee6af5a0fa51d5688ad1063d739c149817bba1eb5af3ec6d541040366df555d16d9a1191c9689adef50f7dd6ed61eb5c5320946386d8ec43d2f4211a63f60950ac544f86a140bdaea384b6bb0a84729548ecec2b30ba2b7c1e58a6e69985373f3403b13586099a61c361409d0c8973b2e77f0ac3432d29f93b40db701d5a418a3fd4e5139f1ddc5bac72c3286a1e491b14702035433535bddde623ecdc0bcbcc095e82d43c6d1f4f4fabd71f31f3f57af7a0ed40c2c26b8de036f402c89c353989266aa44693dd9f3ac1e55febe405ba1eea7d7e66b9891aba3baa93e7e86b49295deca8c35f7e2ae21e50cf345ef322b08ec098b98a8e3de2d56f35bda8e61dadcdecdd70067105becd685cbb94658f302c12cb43278718952d245aa6226073c9a189889b6ddc7c372baedb298f08f3f3c25365986be05bd66fedab19f6ffb004c62c61258f08c2a6d7540646456db177aa05f1eabc68becf1a250ddce8f0d9f7614226e32e6a8b345248279f407b6f29f6a014744bdebee09a0a340464110569c36730abe1b7d7ffe2320424b1b8fa3a5e1521ba3effa008ba0a119ec007678357dd40e11531c7d55e78899be66fe1e1c7e0c37a30e7e6b100cfc70aef722617a82a29afe5aa03425565119df9e59f01c8b7fca39511cb10cc739a01a0d9776cd2612511c88e7d61267e8c71cd9276fd1a546102f1322c7cfa75501f9b0d46982ff1b6a3414f40725474cd8cc89498d049ea6f4e4b11f1ec62fa27eceb79d0ab9464bb0fc46c0660ddf3c19299aaf4039aa5c7d56780de3c411ee765c0168aa00ee2f5f92049038e0d93a7c53bf69e3f248bbe0982c61fec927041ec71597cde3a6d1dc9dc97b860e5f28e0f1ad8a233ea2329030e81b7f8d0f74299c80bab7730dc262b54ebb3c7e775adf3b4e27eeea6353f2f2c9a9e70f57f734d66689376f6baf040ee407d3024053409ce2abb827e798076251d1d72eda8f781653e9b5be618296c5b7ca6aacb38c50b11bed0fb891ae8476f7f8e96ea12376d9ff0ad6b03a381176b2b5bbce7898475551dedac931fc3ff63ceec567ec6c500ed80fe4b9214cef61fa9ce431c46b6c1fa77b7cb39d68732f0ea59faa5b3b210dd1cb6a05129fd07639a8f45440b029b056aafeb5f6abf75a9dd0820c7b169a55b63959d028763d699a6e9d8bbf3641374c1099972548bd7bc4960a32b1db6b154aaea0cc9638fc9e779a94a35583e14495814c3aeaa6cfd72f233fea7904ece4f6e4aba4ee3029f546b08ff05dafe4db7c774a3f844a28e61ef34d2370ea18b5752812e2fec2bba8303687cace6e240e27713f7b1e118185c66f384f7a3f7468a42e580d394bb9972747db4c4a85da690c06d1e39eaee043ba1dc0bf8d5505c9ed3ab08fd998f2ae06814973075785f9c371aededa3c12905f7a1adf2732f3c947e766f7b4e3e8d71a771d5a3bd20a228b2a3f5e03fbc48764e97b15895bbbbb9e6859ae8ec3e25297b78d9ac2d9a5a5bb97cab8dbad7321937e78834b71ff597b8e62f383140681ae4999e401d7383fe3778df708396352d2ceb22a52e5b3849ee42d55ed8cf3c4cf341a20a169978f3286b1720e94f6dff9109756791dca1407e1b95c37a26c2c14ce302e4f2b3139cb1513929be336a559e4efc0eb06e8bac991b71968d4c8f8ec5966e3153544ff1c8197a7e9a0ab5278e914840568feb8ec56c2790e106266c7da87f4bda31a71e6d9e04e3da051e1e55cf5e368fe770255ab976d44e65d27a2c8f65073d1bb36513bf237e587a47e330519c77c93caa360f33577cfd48e8e28c740b9ac445038bd4de8678a10eaea4f3ccab707373b494ca675af299b6f08d7cc40f1974c4084aa050eea4241e150a43535c30cb342bca1dcda02b43f85a79b35d2117f149843cb1b0ddd809c9684e0750a77a67a647a3350c018ebbc06f2b097880b2b446b0491790c562ec6800f91de0bcee536221dddc422f97979d16c78e0b230f0c117ad74b365689053a6a65505d791ded8cf0333c159b81397cedd457e5da7a707e1592a44341ecc8e2a990a9fc5b5bd7aeb3e79a139920290a7cbf2d25ce11697a72ce3dafb4c5e8a3775cd99e9052d856351f3d843ef5d0380b9acecfc6ff92a54ca7b19fe9252eacec2671ce376e4761a4ec5e58827cba4786df963da71434fd37ddc0778768ec1595341053eaab3e1fab9864b6b211458e7130e181f0406fe8c6e12257a0ca09c3c31f507860852b5d10f81f85e622d26a9bfeaa54b52551c1ac4d9a4dcf4b63b7f6285b7be91a22082f045ef9ddbdc155091c29564dfea111fc4c5cfe81cf3a027365e5ffe85ae8f92e7de1270ffbd4088abf1314c74c2abb0a2affd4b25cb3aa0652c74cd05c622c4bab665888087907f68b0a99850a36062dbee98a831730a8553f85f5790a91d12998569f5eae4f9f019082cdde3e6d7b0266ebc798a6e7df5b86100a9582a9f1f87b1352a870ab9516c87a89f96aba42b17a7a88b4a826250c4506d51f4383aaeea101bcc4bc3f8d916ea3b066c7ec61128d928c148d58ffb028722bbdaf854a7d5cbb4474a8a62cb4b851ccc274c04157864c0cbdb8437eb78049b13eaf6dfacdd816d4090ca618d921386af3534feabe97e1a6466a02b998028b63c50eb322c166055e748503accc11338109793197305ba5481f03fc9a52ac4aae8dd0a195691e23880ae992cff1dc1e4cbaee1004bbaea5c8a81f21d1054f9115b4c4806d81ec1a548d5f375314288753c35e50da67b855154b970c1038fb297a9d8ddb7b6c04a580a2b3adaf912b8aff9868dc360303f8585ad4bcda78b31235801148bb46ade5bc96a8d746346840801635e1c1c6484872c581a98b43988bd4ff475318dcf9a6c306bf8c789e6e1a3bf55e965f4960b57fb7306a211705baa7ca3c0124161ad2942bcd6d4fdd8a8f1608096cb80d3a378e5814fc4e40d59fb87358d99ffbe01271042a0b0d8a41d543c8ac7aaa846e7bbdc74b4d77014339e033eb64bfad28b5cbff2d8e32057e9d687e7b83a4fb151278bf87c97ea0eb72891d324849e43a8cb78eaf31a92c77a649d534795a6948e2f357e2c4a11e90192497d206421a2c75a9a7fc20e282344da39a7d4c0da392d95e394b89a0052bbc136bf86531658d78464da3f695d13d329e298c0d2b4db22983f7b1bf3d41eddd089e8e45ecf282344f708b6ea5b9298d1fd2c1f907e71c9817a1278f2a4d83bc2bdc388c91bb64d0baa1bcb2d44c34e82d743ed2dda345489f366f340f71e7aa61e0831907e9cb65ab303432f79c4815a413de97f63a3d31870d0df3a4827b47ab5a0690a2f901a858d57fac65ce3eaa314b6fcbb83e4ee8d88c00dd7eabd77e233e66a330c3daad1537b7974b4e780e208e3778e1ded45bb1871aab49c7e4d5bf3ed2afeb72e51fc732fe81503795c5f99d4ca65cba80dcd7586f90f32735f1f10887e2a3f6d5b2020dc26c8d293f6a8f01188912392cbf6a8816109ac3e75b467a9314052bb6e28a678e519a96370e2bb215725e5652319780b128c21c8111e9ca0e4119700d63c948cd3a05c119c64c0114ff03ea3457a25a3092324a8b0134ae13e1fb72561459a6accf8ae8f1105e11b721075c95808ee17528429008046fa1812fe0ce18617c993a2ee67a87c7345e630c16308697956e7cc491e900f29ada44fed62ffaadb5269288904d441289ecee1d0d0a3d09e10a9bead2d67530a9948ed11f0ceaa4bf54d6dfb76ddbf4d7b332795fcfcac8de6666beb76fba63d31d7b4bed2dff444d265df2366a644557db6664d373ab4dbf6ddbd6b3ea59adaa0f900f91910d6fb30274d0e265a5001db4f000060535db2dd18ed2efb42bc7e9af473bce845802777baca6c359aa1d7719ee547fa7cd6ddbed396d56439df229758b3ad519e7c7c0dce6d376ee18d59d6a5b0b236c11f86cedf56a7a3bfdb4fc2e6fbfdb6b8d18857aaa551de7a9ba6be7b6add3bfdfb84db59b394ef79cf6cdbf9a27bfd33c58f37a4e9bee39ed8c710a150373429d4edbb95feeb4719c76bf9d3b6d57e36cce1a11130bc9b676cecb5ef2be9e5d3299aec9fb4edc6947d761ed329fa64e7f399bea4dfaeb396997d3b843cc719ddebcafc76453faf2deb6edf532a6d2653cbb795ffef5ce6b21ee92a679386e9b6ed25fcf69739db6bd7ada4b5fd63d278de3ae71dbabf7f5ecfacd2381289ff3beb839ee1aa7914094edfe7a4e5fcf091f47668f80919e0d6f332c83091db4e839ed1662194ce830839a521077e29fbc0fc93e954e4f6929733aeb0fc9a6fae3be1d67defb9c5ffae52a9ff34e3cf891e67c8825e043697e29cb15d69fcfc687270fee2d1e1e7b9f8f840cc8b04808caa6d7e0d8f430f5d2dbf9f1c09cbeea4fe650a2a05ddac49c764f9dea0f75285754c3f0e4dfea5930c473686cee31616c7aed3bf7140be15ef51782bc5b5041995540e0c65a753a4fe9f8278d5fa2c799d5c3bfde579a40f7d3277ebee5d120a623d9a5478c423ed5aad355a7e37325d5490381f7a7fbd3091f7b1f12fa957e0189bbfbea4d9a842632be90ee58f3743f699e9873c791252f09dd9c4e4277a7694c9078ee11efc46828a393ccadbd9ed35f4a53944e3237cc2bcccdbfd9e33c2a5739736097e6865808dd14850fff4b32378cfe96584dc33eb2532173534ed3244f32f3e7b32337a5f8945e1341e56a36b9cfae05ee641b3f7bd9b9897d64bfa55369435fba953d74bcf45bf2bebceb04ba97b667933475f55d3d7dd5e7be791a3e8edcaec3536de7a12f7dd3f4a5534df549e7ec7163cd93fdee64f73893cb87bf28601b3ff3f221ce8721c6261916e520c5becf25cd79d5fb90a8be1dca1e3a5eaff2a0a439e9cf6e8ebb0e0f6796bc16e28679f53ed42fac797290b89381c464af5ee91dcc4d5ce9f5a5d7671acad5e9f8311e4f4953b93ac157bd9df34c87f1bae34c93f7a1b4bdde778f2fc414805b75faf6d331f79297bd0fc9ce3f79dcebb9d79bf0378f87a33bff28bd2b7970675f12da791f937d55dc79eaf3394d025156edd4e773cf9aca553dd650aeb84d7f4cf63dd40ee3ce3e9f7d7576fa0df98e7c48363cb4d2065e8b5a34fefcfc8400c94e3237de41edf81869f2a156ebb2744e7b599526511f7ab0e1445207f9fa0448afe0ec5189264dbccd71841176ec42347b7066409af834768c41d8f1f1403b725ed2b8f69721d52e301b13c955fc8ea10c238439687ee1e69c538321c41bdc4f05aa3f3bca53a26fbef04938241b120d29c3903f93e6079640230cf1fa9934f1f6b3a9efd7cf8ebff445b4e3efa6040c3be217768c55d01002da86605d3fb387b645d2c487014446101a3b4636768c55763cda5ff643f403b5a7d68ee42a5e87930c4fbd09e7f6f99cfbb98034a41d2da2c842dc449047006ac8f09f45a445273e74ce39a9e74686fff28e70d3b8c4858fd267df49b5fa43e529d0feeacf8e3f186a8e0cffd51f4ae98face1c2c319b8279487704eaf8d0c1f6f9057fe723a57054cdfdc4c9aa8834ffbbbb991dbc43a9c2a05ecf62ac09c2631581dc3a5e17d36c53a79d7182bd6e9525bac1ae48f6e5317f9b33aa7cda980c02daf03b7d43edb72c911ebc48deda9b1283168b36d1e47cacfebc8b8abfe46b0a986aa9d6cc72df537820d290dee8f38477d8c52d6cf3927a594ead45d5f6badb5621d6b4bcfb22ccbb2763d25a24448483a05837c9ad7a97a6aa9553bf3f23a53d71fedb371a40f8d9712576054ab88d4a8c5942772ea7d72326a9d661eb44112246d187d707e6a37f29753f3a9554d0ab2460507f983463713670531e80651aa00f2217f46f638499a7848638c29d041d472642e8adca920c37751f20e976635f35444323869a8a7b2f7c9819a56fdb0ae2a32afaa88ccbe24d24aebf5115bbd23b3eb2d4fb593daf5a98d2379b0577bcbfb92c88d69d54eb5ba05b979acc3221c9052dbf260110e3f1b496a6769452aef7ac94feb754ef929cf33eb65c913151d1caa229ed4c5914d91c1c8ca289de040c86270393237361bb20db42e3dc8de507d2103e58b19468eb7ae5b9752e632727cce564f6ad7ea04bcfea8944eae6c0925474a287bd00853c6c55eb58a48bd5eb5ec965611b16e2d9f1c28032b19910c9e900114488390ad3684fc595b5454c415a07cb244504cd124b5238dfca568004a0d5da1e1e7e80a0d53ac5c791274054a179f09c00bf9bb3664650bd10d70ac0ad046fe56f927890a1448402451b30020e4af06fbc1c1cb3dc2418e1b13bee4ef66c76bd7033269e23b39f207ada091c50a328660984759ae48c962050c8610e941ce3c2347264d7c1645fe60970e4c249326fe69e40f7aa94955e812589440891bf9835fd438e24aaa420d2a44550ca9c1a58a34befc3449ca5ffce16125882f80a0c491bf080406104604c60f05634ab56283a02fb604458941f428088cfdc91cd810922247fea250152b20da7eaac041cb4caa620933b8a12a9800aa228a0a86a89c91bf3854146586a8080a1dbae2091c74c51757c6a872c41bf98b55c290392ae2614b52110f6a2015e92045110f44103511896492440a22c922a2e2e40929027085fb9162fe5841e528490a1c560411b4e55552472b3b5e8956a2513c22237fd1287ab9f283263b3e26491b981d2f855248910d1fc081b46304929f0c9252768cf174cad8858cd845da448b884a035c5167adb1c63967aa099eb0d65aeb4ccd14bc200f1176d74587fca5523a409b4a456808299b62919f73ce69cd4a67a59615452833c292e1a4c1b022664dba1a0069dc9725a5ac434852638d379e847d721f61d7c46c10131cd275ddd8582ce5bfd77a5dd775d92d5df8b2af47efbb3a76c8f190a802d52994bf9b1b13e08d94211b84d015c0dd72a95c2bf789abe556b94d402b91a83a717faacc1ef70639b19f6448b333da48bb338225409d7591ab2b64e50a5da1392d2eec0f768461f1287681318cf82526c926110983f791eaefd84d07431017d062b9a34797f518643192086fa611188f4eb847474719667feed1159415cb42423ad2241e993d208c1b84237324ec981d2d21ccb38634124be86a828d2f3549ae2c2943969014992200c7d6a45b3f50019186200feb077b745942479690ad7204153f6843502841849095464d7af2420d8286b12624a40545119282b2b2b93184619462dac5b4eb255bab5e31c85ffc99bfb1b22d734e6b5a37b3f46a95b3552e3591bfabe56a912bccde2a570b0b518bb48933c80aa217b3d5ca6e66d92aadccb26a26a404113feca02a414454a1020a256c161445480afec2020c8b715e167b9131ab629366d76a580c5f92f975b720d5f24d9a07e65b8fb6376d676d4bddd36f3127d445a1e6d4baa9a1b0cfd97198a69d4b623a9536a89b6efaa52a93e99d76eae83b9d92ab9277333dca2b9d67be7b49cf779fbac326ad711b87619cb6936c5ac2999d95c160d92b6ddde93bed301e4f67c2dfbcd2515ece9edc25ef3be92bfbb2ee5a673abee99a573af5b62fc9e66ef24820ca9b76ecf1603ddace3e263be3360c35ef5ba1034968115c8ae8428b45702982cb8e9731d2d40288747ff810cd530a4b93cb3e442a4efff021f2218247f028a396e6f9cce90f1e71c799390eedee9c478ff3f175e8e0674c67c647794e1f2278e443048fa60f110c323c4ac1207f3df0081e31096282e443b4e1a195d1524613f048464b194c6c5961df5fee94f3cc0adb462badb4d27a32d53ad333d3338342a5626050cfc7b8442bf78a8574bf9fd974a31bdde846b74e43badde47df8f4fb99bd2fe7bc02973d15c618e31576fed5df0abb7ec32753cfcce99be639fdf6ccccec53a9b4c5c0a050a88ccadb763754deeeb6c2e676ec7ada55dbdf0c94ab1f94d20e08f70dcb545a42e9d38c69a667a66726afb06738ee74e2ea7742fdc4715cc984fae69d8ef2764ea6154c25dd33b342cf4ccf8c4f940d9f58e1260d1d18cddca4a1032c5038dcd296bf6f3c279dfac3e78e3331ee4ca652b7e19c655f920aa966b2a61814b439ad093f3bf62ac63f4d9a1deb2f04df2de8d8f8721ad34ffc5c3af799795f56677764e3b9659e6a3bf7ccda2f6f9cffa5b66ad340e0e6f0919d610b4766de97bdcb74925a4ba5739ae7744cbd2fd349ea2e697ac2a7187a0c83326926133665189b32fc8ba1b5d0528bb1877df3be237bb3ddd49f0f84f666cf2c81bc9d4b9d83f07438b3d6290f421afa0ce3438cf108230000002288b063874aa543878c4c2a1513030383429d4e2653a9d4753973dcb661ac69f76619b629b667a773be4ecfffea9e2f1d42eb64d29fb5e7b76b4fda79f48336f93893725e0b71678cf1e1f5be237b624d3fbdfb7c2ba6f8d3fb8e9040946f927abd2475d324d56768439c233b2138b2935008cc31c71c324240b22f47b75a856cd5bb34e773f9100bd970128a21b5d06a266873ced7a8fe5e85dce36f55ab38bd73357ec9fbae29e3d23312fceca9badf674ed5e91d8df596bd2fbf944dc79ac7f44ef368f792d09d93d06dca5a8e8139a14ea7edb4ddedb4dd4edba5d3f9d2bd3a6f7abb10eef7bad79bf4a497f1a8a4e128ed72f5bebc91eced5066ab1bcedcbc2f3b7df34c3f793b260de50a6fe73aef43b2f1abd7bdd4795f86bd6ff370e7ed601d773d57390f0fd9f0081218998fb467dbd7a35d8873cfa69bccddaba765dbd4be9ed3c974611f9829e6744b8731bdfbcd5ec5df5ebb4f2c847bf7ec7da79df32fa5dfeabb4d77dd044adf1e4b34dfa2f4f756ad06c9be51efeb4121bf76a7bf9d87bfe1cedb20fea1bdebd9d13a249beb7ed1d2b38985944e354fe99be6b9a7efbc9ed357d234ee5c0d298ce9a4bfb84d2653a6b9336dd807d66d987623dee93c8c72f547d69f4ff568ad9283f4d9bbcfb347eab9569310fc14c6b00bef60197cc59dc7a33d7bb7d59297737e7d2a25b3feb6c3979ef238ceeb39edee58fbec3a6fe3e0dee95e3ac4d533e9af077a3adcf38ea679d9314fd3dbb34eee9add76875827734c6acfe96aec11e8a914254f425c38f37b7f754cfdd1631fdc69c6cd99e96f63efeba940d9ac182b6dcf9b45a1f4a9b3f6d3a7fe4cd9e9b3e3dfcccbb2d2bcf4990822be33bbdab94f9ae1d7d9dd852f28e04fade23e9f9d7a2a15a75350e467cfbf99f7b930adacd3da33cda36515e8a35ae30c553a7d76326d25bd65dbcdac8cbb956dfa5bb277260a37e3329b33766dce8e5d8ebb3a727ec43ff48735ce827677ec71a79ed6e51414993e6b8d5dcd833dc37476b199659f94e2530f9fb6b0ddba9a673e7bc4415cf8b2bb77faeba9403e98de530f3f7b9cdcd8fb367d655f7611f84cef73c14b4191273d3ecec4be1e7bec7d4c5250e4afe763b2b343cd07c38e33b10f89971124b9f065c3db3cf7ce8c5274a502cd284556ec781f54e45ec0f63019cc32ec59ea59cc33d4b3532683c9a05027cd633f9f82c9322c068561319f8f41a1accf84f1be2ab5d0cff9d47c8c857dd8791973114e310aa94f6de98e4ffda98e854c1d637a774d5efdbddeb6957ea976bded269d38078edcbcefb43715a77f987ebfe5e3ebe42ccb32d7c18ed2b4a9c9e81deca863a83ba9c99b877802a7738738727e7b9df330f3a920d8bb578cc2867ae7a9e6b377a5926aea1fa897f4d7b34b9aa665ea656daa00701d1d97319926ea2600e81d1d97b90ed465327a11d0ee53c99ed741e94c7f3dbb9b150389d8290612f7e99df7d1639a8426f29c58c8e953f39cce691e98cf97bc8f9e36980923824e3277eaf4537f3b3455c5c41df38fc99ebfd7bbf36ef3c6ccd339efa63f3a2f9573dacf53fd25997b4e22f2c484643bbb84a97fa68d7a87429d4a9b98a36ea58dcc51d7a40dcc51cfb084d351bf57d3280fe3f9a9bf9e8d7faf56ddab76ba73d7e934e773cf5de79ebbcff638cf831dd67188633e371dbf5ab5138238c73e7dea4c7f3fb57693e6c99d1d476ad101f412f579192fa551d4ebe0014c9fa68dd2df29c6a372653a8ca7c995e9272f431ea6534a7faf4335843c34843c7c36c6d7d934d43e7bbe3bf5bef998d2218e31bd4b6dd3b992de81f93c8ce926adda397d5e04539b7ed259b5933faf63fad427b932d91d9c79ad16b1108b1dc341b09d1d66ded5c99e3f31eaa2f44ef6fc2c63cc651e76abb5e348ea6dc79918767b29e77da7cd1d4a9bed1d97371d61846f237850d2607dfaf58bfacc348f3d86d9633f502a7cd58ee9dd754c9df6c1efae83df75389bb2632fceb1212e956829d33bf8f4f876185f045867c8a3d35fcfc5d7876ae7ba536fbbde77d23da7ad13e7d83e38ade2ae53d210f2281d6b087998eec31d5f873bbecfa6fb729ec92be55355d63bdce9390de58a6aaddaa1e72e024e67c8e39eea0c79604eab76b6d3eb6c58d31f938d1d9eea9e938fa598b6d6eb31e2a34e3a4590de96cbfd7a9f917d7f6dfe571bad557f46b6fa8d7bdc4ca7ec54daeabb5a2bc7ad38985a310c2a1573fa7bb7eda87a9855bdfaeb59dd6fdbb66df7f5f7f574bbda767aebd5087dd5aad2ebb5739eaaf4eedcb67d6f25adbd7b77ed9b91cdd5bb6d9a275fd33ca86fc7919cee59edac296a7bc43b288de937fda534a530a753dd7e378fdbeaa6d5ad9ed6ca6db46ef5b4d65a6b35799f916d42fdcba71f5c8dbdc956024396104e74f082285eb0903842891886ce885fb608028d25a604d1052867eaa00738d8df6bea19359fa163d026506678e10235c901a80c0c08402778c3e80470c430c6d096a12d2a098e0883882744a4000756681b160d5931f444957b64d7df468b52ec9867310c420827c491503fc1116df800c830e79c73ce392777845c41ae8a5cc15318dc1823dc734e19f7ac357a71436913bd146a8e755d5a09931c4bc34943e7949933b82ce8401674588831859a1329a5147a50de421ed3935a09939ca8e1a499524a4129bd02094984c5a2a91d72562d6f1a638c96f73db10f59ad47a594b2c6e983cb524ae965adb5f6f2be69bdc53eec7549cd421c69299db3022591e5a3fca8b630ca5a449cd95c4a9b7bce393569f3e51dad6f0aab47f7c43e628c71462f4a2d11e9a2944e3aaf5b3cb10a89948428727c8d3546293ba8d4d66addd6d77ab82dab56ab561e8a34d5885456b960828a9c10b247452f90022315399184a4365fdeb2e54f72c7db94567ab82ba5946a242b74d041072952a440a83dc9d4aab56aa161d75a6ba5da5a6b6a012912d90ccb42828522578eb496b41eadaa772c6512657422b38324ba208496452febda0c5f4c3bc47066efbdd7db9906b67dc434b8809b9a1a2c4776ce19259dd56a8708f03419ad26d7d4cc7b8a9d66bf12c618e976358d2da0a686ca49b33a610af6c83462286f86d17da3830938379ab575095aff51086badb55a2e1ffdcc8673ce3977540fcaa4b68536f386a449e93c9dd45e36d3ea8c9f811990f9ff4070c8131802e464942d0da48b5c6990f98b26e1681541e79cb55ab43ac95020a5527d23a5bed24f092d03424b4e3aa9a5cdd8592fcb66e7ee17b0610136c7f5c89ca67275b369acddc86002ce8d091ece4d1cd9c255bda5748bef871f7cd0a489298cb8c841cd62c5942c6b6859e6a8214b0e4a63f890038c0f50d800ba41961eb02c42f480640a42147b5116c44d6b9c8fd6552d4d6dad54f26062b652a91e0657d694d7efb49087756c5b24884546c8c1b455022ad1b016270239f555cd8db59346083a212e32428ea521a4c9567260af47fdd5cbce6a59d34648e50eb05f979439ea0fbb757b1d041b3bf5be9b4d79b0cfe78a77f0ce754ccf9debf3caf1b9984439b15c0b616ad2705c8cc810dacc7f32c61919e3bc5cc1e823a5ecc185333060f5776a95d24cbb2c9dd6b10c07c19edd560cb3b49d564e41e6683a75039db5feba2e1d6115252e778b6587df306073380d6d096c84ab6a822d8bd866372fc20e0412efe49823f705a909a2a561a553ca4cd21764a1c16659015016636829b0b2f881bd020e19c0207a03cb0d7400258a264859166ee47080296958244c460683d596da4a4e9a2b564b18de6f866c5a7f63e99b9b239bd2d5ece143035fe4efc646e26820c8c9ce54cace23150548b2c000053b201114c404c5411643e02c9a684116376459489901aaf163032a8670a30c28a61003cb179bc5cf0d53b6f06001e005598b0f5738f1c14a12d4b21b16f9f0c4946300550c2186175cb0a1c40f39561c588850588aa018e9005301162eae111a2a40c550050cc8e0228320316e80654ac512040d3e44cc6089278080638a2ba28461340786050887ff079822c5cd07ad5c99e2449d628485491396700d007a1dbc3141f660927f7e7e5e98aff3d63ca4968473cacf5a35b387d573c89c9a26d120209149a171d94ee0443d819111aec18a9acf741b164d91617fafc1686054f3190ac6189719327cf5999f7e8119f9f0c4a84a7d02c6fe2ec445bfe8418e316e3802055b3871c3141c98f0c18a132092a00cc1839c69f499bbc6fe5e63fdd0830ee6849f3a4a2d238453c6d9c4674f2a3fa5256dd0d089c32d3dba2f0725dda4869047951edd3e35c6254a5c1861bdf1e43e81c5b559f350e9f94c2652462e96cfbe30c250393b7bcc43f948a9a638c80f9fafdbd6eba7273deb36080e02826dfd1292378f55e5961eccf1b1984819e730c27aa386a4141cb3a2c6b461d114217945cd0556c04697285ad0240e39640d56502c78b0c6b8cc9021c31e90b847c4b041193d0c6911522267da4088ab46dbb0a8871d5c4027cfd4390251684540ca77c3a21e6cd0f677a2790df9640419f2b56191143474f0240958e9161b92c8f5146ae1a488093852cf27127481c50dc818830d2138a15db08882f550434ee553ce4eb235673e3dd95fde3487cc064b830dd9c6d8244b065c77f21c78cd1e4269436fd56ba33de26dc397d24a69a514b392c6cb7265adb55a851deea927f681e91b4c35dd92e3219436d7fef2ecef757ddfd7758500c9ce6e3766ed2f4c7f482ecf524da58d2657d72fada2877b7e05a058da1bae8c92d21833894d4b35cb823ce0a494ded2f456cbaa17b5139359ac1966ef16ab94125b22628c90523b69e2cc8efa1963d4288c931291e40508724491618b1f6c109b54c0035388c1e5871abab061092f54907286005c71849417ae78128610b40b1520264616423120c3881fb441b9a88086941ddc30c58735ac70d952018d8827b8606963871cbca8c18e204881c30d6ac0831aa6d040837475ba9258186129e1a496619ffcd55bd57f9fa9af61502427fb0a2c3fc89c805234450beba668174941c22aeae1047bfe246db43d9faa990752dab0480a0ffbdab0680a16fb7b4d1b1908e38c278038aaf2060e5ca8f0861b2ef832842ab83042ca510f690cb10614353411832d54c8e1270b1b8040c31a66cc40b59028e0e203a436a8e0e2848838665081369468028e2f6004c9e08b960ab810148414429882872f4db2b4b046500f86a862040ebcc8428503a480448d2fae7c89430c242d5854807338d9122506416801079191126eb46028a9054ac481058b0acc1e1401a38925e628220d2f39b84205e8250c379618421c36986307b448081b90e0e1cb0fe27042d40413a221880082110f5ec470c50535354e3001855af1d9a3e78b116de4a0872c57e45053d3c4cc019458060872e04116379041143940c10314f845872b6780620920886cf0658d358e805dccb0bfd48e3fc470024b0e7280e1060d6815220862074140f1421449fcec20a154405291507470011047ec800551a061e413639472021b6b4c11c710a6c41005a542850b732c7c86159036527a32cad873ab088389a42c40a471643444d0507c62c31241354d5c01a307202d3e6872b4e309950b6393ebf6973ba4e7b1bf62d0c48e3beea2185cd959a64d90abb86335d497b49959ed902babe54af3c21e8b720beac19c5b3d2573072b30ca61727fe40f0d43394c4c40c10d5890534f3d25304b0e1378454ed548a8e27d76bc338a0cbce43039801b380441713841500e939a1750308395521281ce38ca61229372a06400b41bf401167254c9a1de9d41cef4e2a6749ed2d56ad24415ac3a3fe7e7fc6a356922a722627f5d057b6d7a799f0d55f5d655530455eb643ad996318e4b7fb9682dd5745a4be9752485514e2a836e84aa1d4a75e49e744626f7e6d3eeb4d27f2e246d55ab5dcdb56a4ddb348c4f59fbcd274cbbfa2c64d33abdb3d1ece5735bcdda2d8d7bd5aaa7da3ad5a6775c48722169d3eeca95d359f39472d0ce3d3548cb30266c3269266dd34cdadd345aab9755a00a4573dc23a7bf4d6f25aeca3e7dc3278fd35f0dda19a842912ba19df5d753836ed5ea21ae75cbdef69c6bd5b6e79e1aa4554df3b6a44dab415b4f0d1a41970d4780e442920d5a00d5201bb49061e619b49bbc2fc93675cf9fefcee92fc99efadb5e3acee4b8774fc915a749ee99cb9e6da77ad3dcd7a3691785a152f774ee976edbcdb4edb45399ced35dbb496befb467ded799368de34a5f92dd3d6214b84fad327d9e7ef354a66bffecee54260d046e153e4f76d37176d333d3352f9e9d34f2d90fd45e569d58502b6570b870e69341bb4e6bafebb2183614852c8661d9b558f40269643c638d2d8fb2136425f1a804d847fda59a5693615c029b58c34140202d224dc33eeab51a698c316669631f7190ebf619f661f5177f4c9da9858431de387a44d4014b13618c372d777308168f8e4cf808b02111d1be68a003a21622a3214062d29196e88068c651b44446d101d10cd924c34164c4492b5dee75d8c77c97ba09226b19fb98cf58476e7a9b991d1ec6a348140fe0d918841793e40a0db98a6c481a19cd904de2913e03f288445e32162f988c994a1d43d07a59d6621a86657902767a1b31cc8c20ec176a5986d9cbaa5c56903652a655afcb5e3b69e6b33cc17e7e5ed6ce69377004960034b1398fcd89659856e293e3d5cc0943832c277dc1e61893f6e5e0113c8cb7f139c6a776cc1ef127be86a80a4bc315c336b1a39568b4c414fb8b4731c63076bc518932f6cd8e611f19a684d0c6b00f6b055116fbba95001f568ffd69bb3e6057a21992d022533abbcc2e6600a68d097b1e71c69e4850b1670b74d8b30540d8f335b028891b8ee061cf39c338234ad2bed18a91ad98b804416f7aac0b31549bde6e61840b683635cd20cfc4c34f2b628a6d804d6f8b4063d35bd807cd330254dac8a7e42fbf92d796524ad951c9f143a095b94f9bce14203ad9df024c8057b4ec0f6e0963534ad928c0fe201c22ec2f36d93446a154c0d85f3c220b1ae10b9b9edee6d457454238d9f4373840860ca1824d6fe488159bbe034866b0e94bc0822f7eba0041091ac206211da4181aa28d2a55b010512922e2079b5aa9c14ace32d852fefcfcfc04514a6996df9c7e08a594c29c4902151db6a4118371e186d45a9e04aa0193e2e5e200c5f620940161c3d5c311a6c40c160f568a7d5d57ddd7755da736e8aa010d56906e908094278a805043051298e2258615f010b4430d12e021a909268638356a2abc2265c8dd0e6c646bc40e1ae4cc04005690392b461842d66880836c1243842a79dbb068873446704186d9b068873960904f1b16f1b0022684260ddc94da25b49c11b361110f6a2c619485c21e4d9c3ce77adc50833d0a40e5a9a77939ea212c7dcd9d55ca21692deb9a506e435838469656e2936364d268e9d136ad4b58ba442e4182887ca69764af9b9a11832e6b5ad6651d47da9435e59571916393ebd69b60cf8c580fc9a4a15e525da49c3509d31dd43b491aeb9727a4f2bdde959a94d7a5975c9fd7e797584b45630d8942f10920385148258f5552ab52a0bf5e64ce5f1a5a3248b952f1430e6009d5dad6758c2d2b465b3d08401ed6b12cd3f0e186e44b0a3267d3d663767f9043f896257411b1ad230ae536c405c4bab4129f1c9c49937947b0a32c1f85e25743276c0b02dbb2563527c012e2b60e015842b512259684756156b366248df51a7982bc653d4e695f883fb084a8435682ecc9cd70acec66d258433a3069ac5b982cf207bf2891369f2d81b4891f615b1600b66559d9fa080018aa305cebf75ac6964b56d5cb0de57d628c41f05e6bcedf287fab264fc84e437f79823c963d4e29ad98595b8363a4226d597520294a9ae55ac027a72665a1813de00a96b0ed28034b908154409819e4af668f3bc4e8c60bc90b523c4746e4d098c016546c2c2591e1907baf2aa2a32447865045a98c548076b7e387c0318e0a206d6051105f76e44cd8d10a348a36f19c1669436fd8f12cece44d3f876cc79173ebf190c855fcbc8ebd8975dba41ee20ac09c7a1cc969f1563796e619b935353ea746ee487346ff38a31d4f331f2ab1a17b7f37ebf28c72764dd3aa14ba0efb92fc4eabb477d84b2f615aa569c7c8d0b02e7ad11129de9b359c56db49730ff1101c59da21675f8204117afb259c56695f42b1ab344d643ba622526f5fb56ac8b6a98ae86a7feb82d0be08bdad5f829d6a153c3dcc295297d81f12bede8b61401ef7d5fba4b9a7f7a81d722ce190b35fecf651fe78ab4973bf7970d268faabd9f71a91333873354f4ca2725c38f3554a29a5358a3c06af0a3d28b3b19db1ee514a29a594424217a594526bb512263997168a32ca58d3b8f091524ae1a4796243017b54a14ac445152c3b0e51115fa3481bf9f8d1ec78ccc21252de8e325fb7a30521847456a1d9038334f01e8c431589a201e7bf6efe337df584f96f08ed963784c4b05f8ae5b15bdef4806ccebcf59a38b9c9991ac8e660f166f7224340b5f86476605a3a57d8d44a7c723e696e70260d85d922f7481b7afa0e481b95459f9a534af925988a058b051ff5432ce6c19cabd25bf457a5d691e44c4f65e9215facb7922bfa1944e5f5037aebf7d78b64b7f412ebd92fbd040922d6b32fc9510db9aeab8a5841e28e8f92888c71cf4b3cc43ec6214da684716ef3f254f3d7b117b16ef5126d7d09f6eb30a77240ae689e3414bb6e2c3b6968078ed82429291093ae105fd800ec01e111584295d963deb22ccbd22ceb9af52f12c978914d8f790cda4d8f013f46ab6a8c5655ad4a41bbe944b45a7d72ea7553758d19d93a9c34d79149637dd08b11192f32fee9d1872a054d331d7f89ea7a4c114c63b4e9f32a22a6cf9bb49d18c39eeabab07f35fbb76d08b3e161ec511337c4d7744dabb04bcab0efd4ac2777e65d5b621faa21d8f489ef956822f826ad641e3ff3ecb6715f48b0a7c28ee92130076543bea47561d7be98779dbb760c70d7b4ca5ebb4f0e0636d3b513315dd34a90b057d2dd74ab8994f24bf789d8470882c46daf02ccd17ef9b830cfda9876596c88c9543a8ec45444e8354d645e8569550a31d75e24e6da979c1ef322f83087d130e7f2eabe9aa762210441e2beae02cc81390b3eb00f81b96ac8e92a55915b18cfc88c54bc5845aeacdb98e3e348cd535da7c3bc8876945e82ba7618ed4b607e3accc1282d06e69483359369d2925e32afc17d599af56c59c79d750e685b9665a952c0d79058428fb5ea2aa27dc9bca655974ec99575eba733f2edfe609714604e13fafc63c98c446fa71919e6a886d4ab8858bff2fa8d5ab44eafab5e7703f0d83f1923b004994d8f6d6aad56e293939a34d49a1e10095383fcc12ed2269eea900123c7294fa39c3dc02c5d87e60c5cc9cf0a65cf3927016098f7f37a8c2f20c016b28e33af2f35802dbf2a14ad1db5c708c396b71eab1231462f220c5f5402f29855a85a1aeba03d8f21d52b282b9ae6b29cdd0c675996116ded8a1cb7a6e19bb9b8698cd1b87fc8470bb332eb5aaf4f608f3cabd5802d6fa1acec3c7b542b5836230752ac63b77db532ad95c18cebd56aee0be421b38e28cf7f595293c9052169cb5338c4458ba5a6e68ab53536d70bb275fa8199cd33af69f6f45fcfccb59e8d41be0eb7ac74c3d9835a212dc41d35d51f98d955ff9033dc3532dcf3baa4999527f27529e4d21fc8acc468c50865c348639458b66855101629269434a7bcec6f87bc54d775236deaae37e1caae04d8b58a50b5644165efd8dfcdaeb45ac10208750bd262a4824aeb2f6bd202a4a4cd577369f32d60d7571e9ccad8df8e5d77b0bf1dfbb3bb565ae56b7d4d01963b1948eb9c734ecbaa5ef8c460861942a88208a12d309011a43ba8246c986a8316226533000080008315002020100a878462b1683cccd234167e14800d7f9a46724e97cb834114a3288a410a114308318018038680cccc147100baa10f89f234fc6b5e17a5c9d3c451df25d19cb17c44a0c8cb2c3d7d608799a59e7acab7870af1cc7434c6449f2a908c8df6332ada62789ef5b619dd533746527f3a9a66793680790ca0ef8a2cd8ead99d4d607979d594c105870be9b6ac540b182229e02428adb26aa49ae0b59ea6bef23a3cea1d88c8fa13cfa321923250dad80b55596d7ee2f131aaf965cbb775a10d2dbf74f2915f96b9fc0aee1af51cee483a5303423ce675cc962c3f160e410635688c5182613be506f85df042c4504ea256021a5689453c96758ddb22e3dd142adfbc6cbbe5cca4dc0de5faaf6b290135159b848743fd97c4ef269a1be299b731a8acb93a8d68e7231ed4040255e08a66708c0d4d914063261f1d47e4bef09630d2b5bfcb3efd8934b56368107d22551f097dbe117de3575bba0b89b978c6ef0728994f08fc3782676922bc81e930d96db3b7d939833146de0a868773ecd286c7d2f164ab97ad7c2a1f88bab8a9da8414fe286016cd4f7df83305154c1ed6c8aa3ff182d33e844e1ee250b072ce09ed2cd51ca2af1585e140d2b88d5c98f5a0b8f68dfa8e33206e811b0e4d63f6be97ea585a07b5203448c214161fa46419caec7d73b829b8f478f4fe26234fcfa8e18a6c068fe5455574070cb2a506f106ac9e74112361038b01d6934e45bc7692e7f3ac3392c18de252250123043493b236bd35548913de19645e49b0c9bfb6643a2af7a2bda0507600e12224c808a0e4ce26e0a35e4e98b813e67b028dfe09efb4ae1eed3be03dfc16557c9d81458fd65209134f498b114ab1c1cf74e230b48edd91d64bcd128317243f91f09803f80b19134181a3afc75f14f6f85cc24dd243918e8a032cbe2787c323669bd830c64f345449b82c43a20b098e32aed6fe3cebdfbf9c6048e82d418db3227a3a1084dc83853a61160c491db40d24e50f9012214eb1898aa00c9456a98d95b1196a9e42b044bc464a53942e63e475644ee4b69755df71aab1093106c2c75915f7d614de2ba9867bbffaa3aa09dd358e81b6cb21bb42098de697b91dcc70470784a7535d3db838c0a67258ed8c90b40cfcf244ca004f5ad94376c418c69c7c33e11716890706c0388bd96ad5fcb3711389d6f9a6411f6bb884d45cc1ef4b0ad820e43d1e63ba60fede857e36e56b74af5a2259145cd899e92bfdf4a83d16e405d2c423c5b801d4f98933b96fef0fd05246df76ebe86e84b2663ee20292d459e94a33a681756040e8f81fbc47d2f50625e973ca4073aac978850f1b0e7b753dd3f9e2d32c9a0374d9a7a6e9038f99e307e6ad2a1af24b43ad477d1060d35607c2df4f68f9b4e79638c53d9b6b2b1887a2a631893396d69f4d023da4ad6b6f65f9f0bc6a28ce64e2f4da45e665264db3ec271e816457bfe18fa474af9ce288dc57ee2629cbc81c20de99fc41cb6be0ec3757febc28a6c6bc86a0aad57e2d7ffe357f4419aa6ff2b89e94f42d8f9718cd6949954c655ae649031c77153659d46929d8c1c3ed669109c2c9f4eb7f9803662a5f9470540576f6825a4057576c4fb243faf23a3ba8b8b814bb7ef650751a6e5b22605dc13880812c301c2b827ecc25c39f5571e5b2a44903381bbd6cfeffaf346a3e94dd0b83a647f8407ad623d9f38421fe3e4e0a9465a3afe9cc8c956ee0984816b09019b435953349fd33a3733a85706fcdc70905af3563b7bbe521412738f16e246c2db9ce912d20e833668819930ad0288508c3bb4356dd4d4214738c5b0ce79beb1c17af41d8c1093fc7bdad304374fdfc2355e9d28c7761432ee0ca28855bc7c1530182be5366997e2805c85e3a3959b2cf3f8f24c2efa0c7f5ea9256e2e562bd82400a720a3907e648de4e10a08f874919b7404b992278b506f6d30471cf957b4b5785a2c4295d043fc31b8da15bd54ac8cb7b11cded52dc1d55fcaaaab62c6b4dd16f979b88c87e4985bdf4d76e17390f48be7ee908672403832a3ea4ec95feb3ab51e32cd29ea8d56ed575cbe7f0d017d65a61f4e7d782346e0afdcd15fbebb5eb800bfa54e8994c5a68ce1b2ee415076738327c5fd5399fd96f5ecd460844899e09c2b32355b1d17ac9d6b7f5a44f9bf66b8e6482d6dc5f2f475641a9f4e3e91c633b6e984d6e96972a6be95c13adbadd88e6b5e1be2589430b3b08bde858a73981f97a59d7a471bf0ea75ab9d787fdf4be40c1a2d03e92673b3714f92cbd6605f1ea806374576bc64fd6424aa336184c93016be13463bb3e13bc725b5aeb7983f56ee8fe7ade29a7835e27756027d3d59d6a2104f3e92facaaa07a279522edbd2a910b0404d18d5359271da6ccc904e1d04fca8719b612bc089bd74acec490526183931d9451c18153150805fce57134425c76b240f1e094b1f1c90fd2311eaf8dd9606759a1f70392e06726231b0d530ed595fcf50b01a9224132221101662846709b411f18a7d62df648e666b3028c79519f5b62d034472677b7f91a2c45a73bf09e52e74dcaee675476c18479a369ff4a719156001ecc412634df24c622d55ed9f9f53e9ccb64da069e8178ec42a3a7c016a05b346b67fad6501c4440494db64c8cb71bdae65681ef1cebecec61998f6d31738a5cb63780e92167034d59d1c77137a7471765583f60a6d0133dfcba8016a500264ccfe6c5dc4d762d57db135fa4c501a667da8ed5c0fa0f2b2d943aee22b28afc10542890031a7d31a6a491d12af99869295a794850e4c91929169bfbb8919ad04d4dcbec7608a0f195311256dfaebb6f72447e8e51e445b31769dadd1090fb5a65fb1015f41f9e141de21c592155757bf440522d514c478a1afcdf11536dd0d9b9dabeec3c1d4dd04e9e7b96add719f0d8e138f577b1e3ed77ec8e9c8532893991b9aff5179ea75e0ef39aa2d3ab814e094e65829135527151d5686e7451c8febb286485e46a87345c88bcf98d8e5d738026ea98d43fe8c69fa40d185cc450b6cc44f7f588247c8fce4e501015d7dcbab1436c417e18f8ee1f8c282f249e910cf23ee7d1772f9f878a9e43755cda6573e21fe4280c837dae7f1f2ef9acf86ffedfca1e930e3e812161ab51efcb68ab22b70b93a8fe57bb01e67e88da5db79d68267f8c191c138b9300991c07b5df7a6551dc8644812577a2d32ba1c79eb04c131b840a020cb840dcd1488f2a6eb2f1b088859591630397f5753017f4c5776eae9f07b26c1a60e6ae278f71750de18308de9f357dbf8b1ac31f67a818a793b3579078cc2b4bd531639c7f854897e50a86c16e408b0781753d0827c3b05493cc70698212bffe5b29e3484d89b24d025bbb937b2de8fc04477ac85debf6e549e0a0c59fad351c55ef75c7e8ef8525c3f5a99e339eae6a1a27c2b3a18f2c2b7f206a7308db1d7b79fda1198a4b79b4e53c0761cbc2aa73884880c1948cc5360520f72d1f525d36fa615691703ffbc82f5acc9a4545a7cbb55989e9dc6fedbb091cfa73d163481cf6bb1b5714e40b49a18cbeca296a605d894914daab45056de0133a5ae3acdec821077b5a315d8244188c3740205664da121ba3a57089a6e2fb4a9e8797fe62f0b10555f0e92c94c4df68db6d11c49540e2cc55e38ac3fa8a222814742475e0ee78ac9cb56b88d689645a4665e286cb44a29c62203ff7a107ce94134ebd51670f49d677d3ae8e3a5ae161117739948cb455a81a07c3c08d113781b889720dc2305db37e3515b9157d418770f29722f8da68dd694826493319614431a789f3771d0e67c28cfb99e98e5cd3c22198c07bdd00f4d48c8039de33c4e587efccabdd4f11bdcbe3ce98c18706b3d98ef29c2895e2d18eda2435752ef889c9fa00411379ec80f36b0a84d5566e92b5c719d46b439860d4a00a517e2c5713449fcb9187e2e5988c812dbb95d5257ba3c5f7152ec5e31585858c4e82d2f944e3a60218bc1be25fb82a83fda22ca2b74c40257064b5bd3d3f9d5609f61c4f69224fabf0fcfd83d2fe5ae3ac661eb1443ea50923e907798be478a7d552868cb35ffa9626919526ea5bee411b4aa610067beaa2ee0e97a3abfcf98ef5a1062d5fc38325bae39d536f19585941eeb55a313a8d8b0dab2f0ed18502876bfe4f26c216e87187dd53a9e4b668c8f7451d7a0c888c8e19de94d171e59443fc54cd21aab3b4e73fc39c60ad97ca3878688d802ffdaf97d3980c8099fa30604bdd4e5acbeb9ae680a30358da257bdb7b9bcd7ddcf7582909dd39579982d4196f8e0fd0967a70c14cd17f7df4f3c56c5d5d2b33369a70c92717eb5c9dee9a2d69d9c111b570524e02ec3f7aa2963f28b23ea7d6a9dfcb6a0e69652ffc3aa0d44ffc3e75ee0cb789dab890d408b357075c16fdd2b1e266d6194b5b9bed9d4c565a2edcf2ce01baf6911aadd9be3cd3ccd9c91be662ad93c0f29e720152dd10a7bc8908082da46f0cb2f401d804d74a554e344f4eb9204a9fd0fe7adc5322678fd81382d03cfcb3d0f6453ee884ee1fa040c8d99c48b0653bad479a21b83bb73febc341948647dd2ff125b3bf92ba031e77d8b2213be1950831951b31c0192d8ae225031cfdcc51be359afb56760f2002b578e1c248233521671daecf26472aa679aecabdeb92c614b88b9b6fb6914bb63c4d45a5f9abc5067d5ce8e78a100f0d7b25642b3aeb46b70291ca2fd86e116227d936e7e60d4ef9be70e88504dfdf20135012a9f72a602f025733adc1628a402a7d4cbc08435742f816df2446dc2568aa7b4c51f36f83f59a68849d93b561aa35ac799a406e369d66baba66229e64cf532a75100c7101715c7496281b16613d16747b02c6831df5a90914e3a1747dbb382872a664997c59f0af3bcd6da080cf701bee5aff628d82ff5b8cdc492cac0d7b35a9007fa8e8bfd01a093b8f561b89499785463be683c8690013a0e3683d8f0c8106bd3219bc16dbb56110e12ed49204c6c9b4d6a4f903a6620262820ff0a44a19c5a6eb7164fa339789fc0d7e17bee90af46fd2b9d9f5f78b638cb32d48b84ae4a592708702a9e2b101c18cceab236cb8b24537f6a143a7b1730b12950a59d70258e8ed240874d5b24e2ba398960eed43ee3c9137e78c9c9fad7f8fd6b4880d0871eb438483add505bb54556833bf5b34d9ae5e5651c6acfabe32578801e85572ac07d73e276bbe12a5672e04a211b082159133b2e4851a0164d652ed47b5d7dd37250c9c26601bc852bcbdf66a2241990f68936434293787d6966f066ed58bd300b9206e8a94e8751fcb42091e3c7c4b3d56447d91ba3420e1d2a576ce5afc21f35efe49dcd8540a76d7c3a5f681ac59b7162f85c17fd0571a979ef6a0281beda6f97dcf164315c9d17426b774746c3fe649e451cf63a10c80ca498b662362a9e653f1a116e117a016fcdd49461f6f63836f9dab79c1e389aef4591a1602e8c7eebe3ef1a08e149d9ce9ec0d8778a3b1e59d368ec55482d29491b44e6d4323266e1cb52cad584a0871e0d432d63afe5a911880f8128e6bde5a8f1fa5e229baafc1a76c5948f695aa504cb834595eef0a67d34eb63f84cdc0b25975093eea163e6d0096987087881b28c6a1524727d8da006bfdb80eac56153126af5e8a7b0f3028482fcd8bf424b40cbc25d21abb7baaeda9f096600e0d2af33da52bd11697888022a01fb4e996c19b79e6f4a7db0ef74b4ce5345c0f9068daf348339c6460676a023a28cb6c9d925ff27318f6279d982f7fdeca049b8c9f3104c31a01d97edcaf3634e889955f8b1327acde3ffa3ecde6092da700955a29bb02a774cb95dc6458b30d6f0d42056cee3e1e7d538f74f71785850aa743ba80208b5483f1c8d0c09f5408b7d942dd811ac955f4efe8e265006d3e25035b0ed2020374e2b87d75ba017f7e249a19cb8dc3c320e9aaecb542e204b8a737419e685ed23598ed6bdec54bb64c2d5250038911abaee28cc296102eaa6d99f8c1899dea8c62f390318fdbfbbc8bca098b2176a02e505b6e5fb4bf789d660e7d7338425a560558f3b587c667216d3c82da68517813c25e6e7401024f9341e330071c2bef1b786d89f3546e8f7edb5019526a2617b78450eb41169b95ae1b4631df0fa59f8d1967680d059e1b15f82fb8afbe0d43d540771c6caf7eefa9a6f0222a39994e93f3ae1a8a752a8b01fd81d596a3267ed0cb33d9271abf3e73418f3dfd3d9f28b8f6334591f4af71ada3a5cce1da2d40c23b962b7887010bad51c5bbfc487dbacab6cb560203a1e68e9a63c744a8d951f19d501d7f628d93950c1039ff94cfd5e315e5e34bdc480bbe57509f9c81d4c61173cf017cca0193ebc224a06f901da69c62b78fdae4d7338a861d001fe99f24a6b186a609c93bcebca152521cc612c0ef0434f5c40424ce3a0517c2835d623451e833197471f10a115cd694d01b37054c909334252ed5bf9461639675dcf674badbcc30a0cd84b4bc0d8818ff0b569b67eb22c8bb6306e3d52136a35d3329f6eb5aae07d0381d2352d841b7d7966f2b8c816304ae3112f0fe6c792f2036385bee83a671ba3e93be28f8a8bc3a7e4ebbe46fb38a790634b665bb6e21d30fefd168bba28bd13b7dd5a1063fff4ef95f3eeb7954a05b8064891d2a860daf372af810288c5fca9ed96a4757aaeb66fa12955c0a963fc58125297c860f286241f5a90e8cc670b20b1408fdb3f12331212c9756edc9fe0680c069dd9dc9da1966fd7541b8d8445ce2a35c308589646d0733e9902e07055db53c0c9de5db050198ede41bac7e650e345ee18698ccc226d986373cc666aab86be4380e2a77319d85a4e30b4ad4ee898a4433296caecee931e834a7201b20b3a650f8bfe02c02839dec9ad281a822ed32fedeaf81896296f74677322235221d8741dbfbbb15ed9f420add80f0738566df5f708a7c6094e80dd01b6337a6313ae69892c7813ec46e7c24a7c8ef6a9c0000fb7609e750d8d17f0112a58d2c5fd01e82c59e10eb06d9520ca4a088c9f38ec2a511424318bbc51f88531c423bbc9589c87f19af3d899a03e8bed3b8f3ce2793463cc01225cbbbbbe3b762332590e0fd6aa2c6e31305a5e62a2df54454efadfe7ffa2f18f43be34106a581c0ec1cd78ca22e999d779400d466876d2b87df013b61c872247b3017002d94b05d64ed35962b20dca08ae15dd169644a73894b3bfa26b8c1b2f97574bbec781f6671a49ac8ddc7a0a993082beec2a5f831e2b3f6397a26e39c509e472bf46932efb1991266566b11d9f4b414cabaf13a92ed2ebd3512edee9041bc8d0abc31b3d12627221d6bb65b36bc04116f652703607918686f1619edbf340abc6eb63522d31ad3a8caebbdf65c3444fff43433af0790e347e61e3f9d7a1235f132fe16288831838e7cdeceb3e11e4a643a08d9f0826476dece4eec88e405b5c572e62a4fe71f20345329f68c3b69df219c09fe9b9e0f8e2ef25140609b8ddbc705ec73980c1893bf4f23281dac59c6c946039b9ca0a8df98d3a677e245721e49752d3c2b30ce751387d129422e2b7b64c7a322572b8c4681a0bcf6f7b63908a4a684c4666c79d619ffb3221269a4c39727d451db5d7893bdfe21ec87b78714120542f5313f83a208db0a15af1ac72906da8b21cd57c4319f5480290afa1c691be8acf4504a6f2004240cb84fdb34f4ffc7d5343ed4024fe5185a46c91e4d855ecc5d6164e300df803801c3ecc1de082b1d1bfedd88c20b03b8feec7dcc4b1ad45dc04bbc4847f8e553e14838962d1784712c1103cb6c39048a9acaadcbca4cb827044fd6b741287345059d4b2c9648220a149934ef5ee6dc4b270fd4e3afe38692dbf746eddcf11de113985553e49a0f822170ab0721eb837a22f27a967670cdc4a1858b9fba479e2fde65d8c99a08b9ac10aa55b2f049b08ed04135763ebc42bf05600b384144e91cd9f6df2107d1eec0d225f5d057a32ce8b1bca17920514bc4d6bc3f2eae663c0c5f5a985aa6b6a418ba35132c5e38908aee032a17e88dcb86b542238bbaab05e0a6849bccc7dd55a5bf1043f1733e7a0cd748572e466f55700313256d8ead1e2f0005edf23fe8bb317a21827e93fb239db4516184733bd59d89e549d2ac2a1e128134410fa0f4702d0bf81fad35082e4ca17aaa7bd917207b34ab781922496307f8857adab99d853a803b17f68c0c10ea837df5326d53f409553d52a39f5fa23287adc33419064a20758573024a74ffaf53bb22e07245dcd1f9986248e9a08c5ea70fe010b80ef82422bc618c59dbb389cf20682f54042fae6d4f8419c98f3a1f027db364ebfe3acaa0f3e443c8a1798623ba81a7264cf75f446d3cf53c007f7deff9821d02bd170ad99bdf0c0d57bbf00ce2634329fbaf65b17261a023289c6766d416e54a80a1e9ab40334b7d49ecf145a9c30910451ec23374ddc535c3fc64561bf69df0af5e70dc9cb67e5bb9b921d1426ff5f6ff7d0bc09b1989b9a795a375f3dc294f6c54c8fd57e313148813984f2079d9e53e362b9ca3c7095a5ee88c3eb39a70e7ecd4b3110d73e7e91acec5c4eeac72c670005f49a0c160c75322706c5b333c3ff8b92eecde823e9630ee7b3f119349eed0804b6f5551e5635d2bb7925a2b69e6d3205ea78e938fabe18ab049343bb4fd5fefc44c97657930e9af5b4bdcc866fccae9ef023e6759ef60970a715d0a3bfd81717a08ad449c2ee50760f7a207392b77ba5820736db7753918eeeb0d19172f200828148ee5d1a8590f8a26818f30d5c4b887e4ce9c47bfa4eccc9bd4dd5abca8df0629ba8bd9c5596b9abf48faa756dc93497740bc44b69189502267d5f5740e65432fe08dc157715748195bec0c489fc2469a5bf9aef2d5773da10143ec415fa5f69e7c249362b05243e673f503bd326f342b8d01138b7bb22097498f32c30f54c9a00bc5aaf06a8819cada25d89c234c40721818d3418ea52f96fe0f14ea1ca0289a06fcf64c1e9b92c96c25cb7bbe566a4c3680784fc0171f78cb2d8a08ebeec7848126bd703d12c39f640869cc59d4c83921225ad82535258305a5bc5061d49f51211a1cbbd2efe672b429bcce77d1f77b0cfc5087a6db6084e11d62c82c82fbc930157213313a1bda33d46bd6b1a6d99061201d6771a699f7fb3ae22938cb3fdcfc64514b3e17befa08b850de0c740dc700de22947bfef13082fea51ba28919ec0b8c7d0669055314d31090a88c629e1b4e6c7d8602e1fb8ef45156f95f336501c678a46de46e060da278a9b983926cdc0ab12ea0f72e20588dbdc628789b195053d6bc24e2a90c57fd39e838c4f35d142dda8d1292a02b0a453362582aad149d9cac255770e523df659eeaf54896f7b4a3754ca87827374bd86c1f17b737b6491e9f7e60e9bb31e204b75d586558df494a4d51dbb45af0030fd2ee3039f5b6ca66873e3327cc21a2afd9f387c6b1437240b271c8a25dcaab290f39135cecb69dd2e0a9a3f65f260c51cfe96319824b8660adde710c6026387d1f0d226c10be636c3fad946bba98c4aefe753867620403b4917001f2b4fff0cc478d0b4e74341e5562ba6d2651a47de90c20030a537c50c45f0ee6772a933d045e5f7d10d7ca659ac42b38dc4dbca9b771289d553a54809bfbefc9b53a60f52fc110b463b419d1641d841261950f21ee187a63bed2ce7a40c493df2d7fe73c0407fef5e3e895a720de098c59db6e14d97a53d5942178367122e5a43146c84c583d5db8500355142918fbf706790d0a390b61ced3a3b70a3b70157ca76859ac27cfa5f6cd8758df7e6cf2ac4e1b7d988316666f35b0c26bd0d48dd93ea34a490b9df2c513ba4bce3912a0b70b3405c65ad1d97b03ffc342c338b30a06b232cf28eb80763ac3d6a64c2b75bd34a72987e84c37166b125e5f1787351bc7dcbe010c350a47400763ae0699ae1a31e07fe1cc236a9c806eabf4636a827bc53838b43b9495d9a1ad3eb841c20ff3fb056445e09952e2b66473dc7a6e854b630104e9fac4bf7785b22767bc04c7655a31ef823f72b70922a9e7262f7be319364c9b3a02f06f030c88297147509b0eda19a46ceacc3c9b35085bb96156a1c9952d4e2514dab284e25d5961bc09b65ce9dbf2289bd8233aeb46b31e4aca5ce65eed72ee68fdbbf1d655495f1a55c4ee202a8695c9855d0e27d24c7e1818275b5acdae30f2df7af23dcc4f239162cc669ca2c965c1d0f7768f267aa2d592eb21e54687fb19f9fc3d9a6a4c36a5511970db2f161076ac62ba455412a833d21157022baaa0f8351df4d2d01e5705e4e76ad257706d81f0b4502f9bfc992c06382e668e7e6aa27b048982d670be175addbe1782c177b40e4af3c58e6667c5c58e16285c45612aba37a4042f3d8b8f1ba1051f00563f02b304dfd56688cdcf34d19575c25ca50adb356fb6850e8bff8636a3170aa44ac45ac5f095e1f3cf8795066be38439a53521096393efe778133d6c76e8b7891214f279a1f2fd38ee48cb7669181ed1a837c6799dfb948074a196c6856752eedace7a5082edbcf986c56dd2582917ee9f5e134e89d165a75a416c3abd22fc2b75378dfa0e0415d4357c1467dd3df66e2aa18ae4871eaab77fc6518062764375f6a3de93429ce59e290c04fb9339ba3d5bbefe308aec5d737f4d7e8820d0275f4f87c53852d5fbde31a811ca7dac1094638858ef1db2689a651e8444363e2dc8fd223a5b109d355a9cc0581259aebac593b6a5451966db142e277b325e2fc2d0f5777ba9afe387854d58e933ab755bb8b96cd5393705d31ce4232e057d41af5ab42c1b2cd2b5c378a2e63caeea392659ddf1460035f99b08d1b47db0746288cff65365c1a13f7ebff2ee1fef8efd87617d462aad06750495879b438ced6e0203976fa3479c077e60b3b7d74083d9cae65170eca3c84bb8a1f319e63a1fe4905fea6b96a9623e4f89634561c722d8fd47f51370b27b18883eff8a19ce352cdcf71c40f8eda60a2c19329f979646fcacba542a06d7cbbedb405aadc429bf4a62af4ccb2f3094aff87bf6870c2a0ace4ef18e3d1a94ff8045bebb8c9861a0a02c8f1313e75262e43eb1e0d336224423217fc3e8cfa08d9220550beeb7f05ffe2bf185982b87f63cd68ef79850e532ba7f67cdeff6e3be5e8aae67756602a58053e1e7322432323bce8076a62fb452bbd68f0f850aadc113e3aab1d2ec558ace5816f5b3ec2901b58fbc1b6f68090a997e8904f827d51fdfc12c5bd824ea82f19bf0e89fc5a6135dea9ceb07cb6e8c3b2d8dbdbda8d138b22f78ace24dd0450d8dc75049800b3303a57e51594eeb56a03b9673b801900e92ceb687a28fcc7eb1e06b35ac3a379e8e7aa9316cb3e3e86af37a433fd7a452d7a1597be7b3679112251e87d2c0547ee35e8340060d1863db24c827dc12b9697f5b99ab9d4d376cb81bb81c624427f1183e3cf2cd919f317b96e6e4114c67bd6a0636de5fd13b680482deca185e0c38cb331117a17dcf64f544a3251aec125db3ccd3f177d16411f959ef7915142f5247c6efd0d41cb5b4117943f0e0cae1d03e27ce11ec707c085cc67d05669c5d04f5273129127f27a544182a67a4f5c1f9cf9290a62d111c8cce70384bd663309888930d25c9bd1c466b8e3c91e718c242e752aaf3878aea6cfc1796cf824445dc72a8bff5ea916fe0831a7dc259da74e48b4f0a7b937146268d073efff88c16146941fe22578a3871ea3b2c8b0eff00e86f1c4347eb5bd20868d3c11d7620137027cab297ec0cbfbdc98ba8f811028c276a3628478062fea0c49cc8c18b2e925af06fc0a4c40f84b67917eb408896e96ba9b43990ca6ce1ac3a55839f44acd19b1da284d6c9d749a819d13746fbb5cb33f9b195936df5d345f9c90509956065740eb8facc7da0c13bfe7738a6ae9e59b24995d4b66b8800b1b04af99f3e1511696913ec92db68247a9a9d07b08082dc2fffdb6c18dff5288f0861b779873be24d7d5544f80ebbb2d155a987aa2378d7cb56a6c549e4bb2d080ccf84384f78ddb9baa083eade9eaa1681602aebb86adc56c46e5887fb0344705efe81a081849f3cc411e68091b1febb184d88b37cb2ad9bdd88a2b0317da26924ad1076b031f7b4a3d87b161aabb02879a7d3876fe748589fd106780d9e05c016125c64cbba498c20fc5a5373ed7e8982a3c251e8077498d10ed37391bf07c245f03aea32a2e3e162e013b8ef1f4c245d1e0cf941e16290912d9c8d3237aeba240b2d130ed4483e374ad5bbe064c405754052a9f0b4127f90277c921f672a22105cd040e6898844a39cde1c36af82541620a5bc77c83910cc58a2ea08d1840ee81d5539b2045e0d13615bc11e28406398609828de186f05969a40a32dcadf37310295a1e74cb1636bc0861b64cf9a119052243b689bcfcb424432f049d63d79ffd1b831ce697e02658a0e73b749c76ee72e92eba0bb38fdac61e90f0f4336ecf48fdbfe3f48eec508a67ad2634c9fa290ff39c2a01332c67891d33e7d7b211f6546c66e5c55cd0a659a409c73bb189fa366d9b19e637993e3aad5f1ec60bddc014266a06b2a9706038ab2b783c24b62a71480f7c516d1985e2d9d5fce36d9422ca70c0fb3342f866800b0e7a23e38ae6fabc7b74d162892746716ea9120add7e5a9c007411b98dde59080f280b3c71a0fb8cb95f3fcccc23ee30c9bfcdc3c80e8117cf86616ac22018725a07f18c56424d62866a26acc634219990f541e06b9984b5c3f78ddc93c049d27f48b0d8abe36f205c06a16265433a34a6e9ea1efe10d525c59b9518ec93b66d91c942b9441f65c11cdbdf01e45c66e6a43fb335c8d43f1b4a6f050984e100a160718526a8ab08d16482d922c03f9834f1f0762d3f8e42b466287c32d1ba7f9f97b950c4ce4a41a35ee76c14ac5f5fac2330528f17a06497d382be709b625206d52e1ee46062e5ac6bcd5945c8e37458f85ddd5900ce05459cd26ef123e636fc30b98003732c78e54b4b70b5bde2e37da60d7a2ad8c6a78313b0dc7d54e3f984eb0a17d606e6dae93b71a7bf15a50bb447243e919e07d5f28a699b638af4b0e4befc415aeb4a4a1fd3e4439529c17aeca55fd01729a574906825f43ef1561ee75f5723b212a82fc156379ead7c8b23e9ba56fde58a8b9e0179f1deedb53e107b400a23372f3339a1e5e4c7db6c27a5f00f1b39e677849a50ecd1cf326eba083f7972a58021abdeb2153d02e90d542fa09a996beb1b4bb98b8f3171d96df9db69826ed6c8ccf4256f5ebc8da0470ce598591f6d482d2a418d5aa1555aefe0990acd46412ef97b95edf8f14f56549e36f6780250ffd051ac7333dafcedc2a5db1891b053649236e8fb0fa13c90d44ecaf0c62682698f164ba57508d699343cbc24537448479ffcecabb91eb1b05c05d050bf06097b930686a5284a38e2d3c772d8ad7a95b250a9e7e919207c131b91ca953b185d687d18be38a8854dc27c3fe4763c18588a10bf5f6fb82aa586903a02471d3288305435c5e99980336be60e1f08b1c782e05a52d974424394a61a57c6ac658613ccdf96289125d427796e946e76a3a60c04843505506b6bf3968b84e89a332d165341b35947f3e0a7117aef753ca6ddd0e62f7602830391090399fabf03ccd6f5671ad2c2bcc983f35b118172ef4fd4d98cbff0119adaf668d209c5dac3e1e8dcf794cc114007db77e25da9a017f2457a1262f0c626f51d85aec750c99aed78b9c301eb8ad7ff9e7541b8746c20d5db2cb202556162fcce788fc6fa5a12d3f024f7a6f05b2a47ecb5844e0bd9e664dc70b67e1897aabe20f80c82b775f8a9e95d6d11ada40de925157b2f8733e0ba0f5c2658d268f30a378457048d87652cc48b6e486a34842cec3d3c332423d590d80d77289289c1e3c15a373a1f78df9553eaa0348237766b63158577474a0f9542a46a26ce3ce0f5a804a1c0b2c4261c11ff269e7a409280052492891159207c181034df0a8a943a177c6ddc2544144549ea7850e843e772b964cfddb250074e758f22c4d75390159bfac4f8ba4c7d55e676695254d31a46828c2bb132fef170cdd7930028a9afb3a3ff217b568517934be07353a72cfb526507fa5af8bbc92e41edec4aeb0dcb4b6c71ed7e02e398d11d13ddf001d495559df8bb5b9d241919e99bfc2a8a84df365687e57e86a91b279effa58909d078b1ad39a7c2007edb6e7de588d2851833b0045450522262fba1c4136ead1e4003c804eaf83b5634bfe5876cf39601e3b589c31b62f43a8c73b885f1003a565db70f964c6c995595692aebe3d0e7c8c8ba440826591a81909babf5952951c9bdaaa14deb0aca46715008220261dba444098767fbfb6d09b013c246c87b32ead89154e20b6241407b935435d57fb29eb505e975d31b6b6d384cc5213f55b251c3a34d4e88fae10892ae0130d3603c4125d280738cac91a26e09bf104b45f39859e63a601a9d5140ac09c6737b06461b3459ab6f23f39dc6668502d52bcf66135b192b60cdec205effa64acf7197b86062c0e8bbd10c8b1ee0f0e5a6a46e5161f5ca19beb0d260a72c2bdb4308ddbf589e1b5524ed5536a53275a6c9ea96dac9541f19ac4b79497a1758dee785849370f8edd20708987c1c18290de6e422e7cf950e3571ca7c0a4f2003ff575bf5274b6db289eb899e0170c5e39f4eaa27b277b204db6d59be3eed714b8a4822183382a194ca848f1d9aaa2a66f1e6a2baebd8e531e42737413ca557a4b0ef425ab66d0d2ead68a3a85af603735089da07991f486d4d6bb085923a81fb39e34306265d4b0fdff9e375515899bd2065404147f5a36d319342d9a4daa4fd7827ed54ea42475590185a4722c0f7cebfaffb0efe54d9f3e3c01fc073775f5195f0dc47e2935c8fa977cba749f17cd751cd86dda7b387a4c165ab8285aed3ce548e2bf2b40a192e5000a48208664d85484b267c584cac4a6dcfa94e4e1b9826dc06a921ec02da33605e36731405effc5d17f88ea0d210036b8294fe5cb38ea755bebb9076564aa0f3953081b657131465940dd4a4c7f9b839a1320aeffe2f627ee7fe1f697eee04f4a1e7430e68185ffe0304ba1fa6c01e9c52ea9acbeb9b232bfd56ab0a85b475558d4130bade0eb15afd45fe67c1963628d8dc439f9adc4ce060460693b7eed2814ede30a27377132ed8f0eaccdfcad24a0e57d42203712220ce0ff5625b3545f9beac14ea84047e6a9556ff62911e140d9fd7c4076304d520bf67e5cacb2ee949227b471682cd720ecb1942d4ad9d2e7995247e976bee3b0bdee6420ac06b7515857036c3e10abab036d847a14833674966858fac509db2c2546cefdac3be0062a2bca3dbc8206e8b0a464b7901533aece2fce0ad7c5492052871d18faa58bbeba258e408cc50bc4b39439c4768b4ba63221308eaad5283667594a479c8a049a43cc4216a61ee29aa0644c5bc282f5b456a472266743e70f2e517083b924ba831ae531811075282a277aaeed9a45b787392a3a1c2be45ce5cab6eb8aa5e8acf59c548f77700ce44ac77189cbc2ec9512b3a4ae0bb6733ccc0603be0425d02d46a33e4c626d30a1a0170a8dd83724bb0f167d829011d5e3bb129bcc814b17932104333d42423100429b3e51d0fe50baaec32e8b1931674f2ed175009728d92b9a11ba56e106312e51e0c344d905d5c41a8492361e723596bdb82a926a825802a60e3abcf51436de615b8fd8a20b8a80ab875e7ab71255c324564265c490dc1a6989d6609510ae445ac587b14ca8d430819544ad63d070c5c472b37d806b8e662d67af9ab4165606f5dbd8953a91468af5225918fb11b71546e63af3c1b8701f0f56b34ee342761dff8ece80763ef4e90db75f3598af7f841f310beee8701daf4ba02d5302022b8e7b0b3bb70e46877a410c67817a2593b0b9e2407b7f23ba253fae7f8f8e0b04dca5edc05eb04198e4d306a9f3fdaf2c0c7c6c2105023cc88cc063c20f6eade482469de186fe868a74034aaf5b23c149de51d491f7373d977251c0033a37e047aedaf323ff8a3b8599ec0a073e4cbcccad00c874c055feb0025c3af28d7d6cf232b9d023b6eb5a97275044182b5f75e058ff7e33226f21b878e14a545bdc42167d6faca889a55186e01afde367c8e83c7061ba90c00b249c71ecfba41a1a16c40e06ce61acc5c084949ae13a7b0defd521184479d76a3b194d52e8a35bc2d00e41fe448ac7e25dde549ebfa8fc88d3b0db25e0a775dd17fe4f4e311aab0e7af5df89ab1cbdec5f020c141fe1c1e8cd16e049a102228ebbb6f0841ba2611bf1c50d9e1ec830bdc5b5434bf0c95d46aa384d1482a33b5627f545aa0a64087d3dbb8465c6d3822c2868214fead9377da792244c5eb52b35adad7c2c5a8c290243f4dabff30b8d1c605c65dfc5421221c92a0ce1ca9a75319589202ed8834a4bcd004e3d2e81bf7c7a2d818d2df2d2e90e0b4f402c467a29d0218138ef452650471e49f1ece81345cff73f861155511712c6b3c38bed72d280d4c132d21fb6a085f8459069f1b71737769b0e8621e9948fc44c9237c0dce1cd39e026d3c7616462e1391e98348dffa8d495a5327fda63d7af3ce2e4fa3401a2b5d4a876b45d01e63e1afe0f5deb7df7279edc44916ce23b6a08e1e9b2e9a4ba346a95b98c2783aa895f0bd000c3bcdecce2fe253f5b75521c4ca8004f97ed65e8a25a875c0d1de041fb788b6fc5913b2977cac4871c2508150527bffa05f7ee9a918908c7c33a3ebefe144452afc53ad6de8c1667c0f6b3346ef6f1b3c16ba51d41c8db63c49a6b84f3a2ee3cc7221c3ec272c879d33a9ef8085c780c4ee91f327dadc6657f5a16b78c0b30c00cf180405b000eceb9cba2d7543f209ffc601fe3903c68c7596a7b165949b7dbe2404f498e109df2a14c86dd96a70832972dc2c26912eb9abac982be7b7146981d3a7312aee89639c28ac85074c3bd365d38cc7ad0919a1833242c3a5281cbc1854ace95a9fd10d770dffd2051c3d2f42747b9d785ff36ec4fde35c177f793014f83cfb0cf64e4e76b6b503859662a1ae2a73aad78ad25ba28e534968109204e919c82dcb06f571ba06d208b3a7c4249a141b384c624c555ebc5b12ce977dc7e6502094a3a42558745f92f1f83811be8b0ad367c1ae8597d1b2ba8e236645c03bb36ffc1a537c7c63cff8874a78c7ebd45a9973367e8b1502c8422c7aa910cf020c51e18c1de097dbbb2b2e4cbf41199b6c296050b9afff3b2181c80c2eac266f8e83159969300d06d81dba66d768b77c209d4ba5f94bb212f464a62f3b9760bcdf8edf8b181a42c4a3e1c1859c2cd7320fc5409bed82b05de6545666e2f5ecbd342eac51e817f12b2bb15b6095e7cd4634d0d5d2ae1eb0a718a5ef93d904c840dcc06c63022bccb316c7521dc0738060ac9ad4005350675b4b847fbfca04cd858fc5870ed55018531e0d1e11083f8af0f492700d52b8c82ea48a04bfd9c5a114a6d20d71746023e36661cf378b35a9477db120dd795e034ff758ef59e10eee4413480b4852ded13b6450770c56177532136991b6acd7e016c8e8d8c901386a041f365136d3a27241effd8124250482d0f21b6027c6d2f3d27f0295da937d6720504e1c84ca996a134e15140c0d84d26e8c4a33a35355565e7962b5678d6540a95524b2d019e44f50223bd508216d9691411dea74561dc4f0352120b6c64adb94ade106d009beb6a02473a6bb9d5267f6da2765de9705c3dc4aa5befdb335de8b22d00f08518d504180f236cfc04446617954e9d703652950f31b61e0796ad71cab9dd4346d5b873920caabbfcc0527839711743417b4a2f8ff801daebfc72c0f87a18032267d834d1aced639640689090665c41bf8d9a2c6cb9811a9971f89c56253ccddb30e46d42908ac36984974411485412cc07225d694522271e57373a261a4afd1f6288ed4efb2b205f7218d646e59c22ceab5cefa0bfb6e5b8c07d6f7cbedce33d621b1d75ae5c818c4cb43a6c0e200650098b4ffbb3c190a67c8a04ed8d4256b3e708aac7d548947c82b6794ecdc882a5df3f98495433ed3087944531bc30db6028b5c7927a55828d77af0bfaac203529da40b34dcca31dc63ccb8a132a26c108219ccaf0d18a613d9ef9d42100c4c4eac75e194301c40b9f632fc8677f9f043e5d0c74efd97187ced91555a9f595d8c9183c7e2033d7208e8f3076897dc8a8550caa46c536406025c0ea65035911b6e4d838139c2c46a6cd0ea461bdd6d90b613c1a82803c7a4cdf528feeaceac9d81ac8dc43693e0a98f11268b9b63c2a2827992299775ae711c44b912c01731e65bb4d3c4e12e6bea33b2c6e372bdef81d1994289333760495896f9441e523a2234d4beb452a233a9a6469be6bb5dd04351e62badc458a8ea068f098d2c77d7464cf7110dc5a4eaa498b53bfc6a6a0ea3ef6a44399d038a67b62ec8c43eb312cf57083885868514864b0a16fa4b73962dd5c425c5d70d49e566112a43b389a40432085d9ec3fbe2fe5d31ad3202680dec1ab2ee037ee24fa4e5a5236b2cdba4ca3e4f2baefbc30ba2c087bfeec6f3582d06d415d8d02b7b121d4bec1571ba3441dbae5af46b677598ae9ba5bcfabf1f2b4cff50e738841a22df87234ca7e5ad39392d755f4e68c6bf189fbf49a334ae42b05085e481fec6a08425aeb0933ee018bfe8832267d7d7928805130bf62dfc9bc1fb2f8abef44f612a76e7ac471e5de269ab60740f084f387a613f62faeacc22e4ad32d84a618b312df535053978d2179376f0bd8bdf73e59b47a543e6f68904f7db3185fcc7279deced1baa1f062e2b160d6a87d02332eff6dffb126a88c817152c06809ea21e61f9475f3568e1a63c19e3e33eb19b01ce8ddfae1c9bf6290f470c94886379f3a054fbe32d8a34696023442c296635f80c25d7079d6e46729442814ac583bcd35cd1fc676dba2e549b36cba2934f9ffd3cf826a2d47f4a373f9834d015e16d5e1f0f82d584f5d179b824d32ce2805dd28c14a1dc4f30dd5ae9572b1a8ce1d56b84e4b61940ff2fb8eacfa8956ce0e3598027926b6a404d52aead6c7e6c337bed17147aa449f8cbdb73419a50c3d52950c1aac806408920a41c83f3e4048d8592dab55241cbedf84c6140a1dc294c251f4af27599b40616467ae53f7a62fbe98eb94664fd07a4f3e463d1c9091e5c77aa4f3daebe083d1bfb3b922fbd45e0f30d3ff311773cbd3f35b381effee7e50e871ddb58545892bbf85da86308087f4cbe3ad860a7331312bd57326a51077caf02d1cc99c45163044fb4954b5e53219c1a25335bb0f320f0f1e60aba97b54cd03a830394fd5b38d6e81dc74df76fe8d39ce0033ac9189fabf9e8376dce56640194d25c51786276a8d3b12a7412614c5172195462117a0d8a1b499857c7fed8ceb0c6b7012c32c4dab304847c93ab5076327b473d05d9a60eb27ac8a7c0b917dda33975795ff2e1ff7bbd97ae9475d4f016b770d7a173206d139cb49398eec138448377a8e445e4664ca8f32f6c6927712ec3a65c85292f7561dd5f3c63a13724b6e9bfe5d48ea882f481909a3b0de246a9c6735e153f9be32efc0f1bb2360ab6e42fbd48f975a90939092977915a03b6521a647263aba1f897a8d30d08ce3256308bc2afea2da4c56b328f68b2cec2214566e27b81e0fdc8520d3232d3093fb980b5fd0f2e48aa0fca16e8a64078b95000f44164aad4c9a8b15b2f7838fc5f61290d436ceaae8d542e1d1ccad379308fd32c6f5f8fdba95c5d008e3ce3469a8115008f2694743fc2edd4995d4f09b3b081436c4c731147117518129202b5c549ea1eaf644bc3bb877af9a92d9616d9374a6103743c61735d605a759329d908ad1951142466287d9310421921d8ab1d1aa01f0177b3368839c7144f3c6b62ac15d66a44ad326c15fa17626f613ee6ea2fe43ee48e867c89d04fd87dc91d4cf90bb49fa0f7727a99f217726f51bee6ea2fe43ee48e867c89d1e040ae5ebef3b985c97efc400cc902ceba6f6cb68f5ea844b39d6509e3dbe1db89f9c1bf18a8409f40fdf835201d6e83d9d597623badaf91b703e0bf836d81135654907ab033d0d40199267114b19552c1d5b46f74f8138d911f0cd012e3680a24888d6d5d95d62f2c89ad7d5f8f9e779d2c1c827704fa4deed7ececc923f090766a9acfc91b3b27135fbd53f6b9006a0fbb2274007a8ec5f0218f19a48c8c1488970ced82a78fb1de04792fff8694c22d13179e101d09f7b30dc36e2960f348de7f24cbf2bc4fc30ca11a2db6b661a232c14b59e668abff48a352f927cc66fdf80017244939e0dfe6e0a57239bbda070aaa813d41e88e252173832227335aa5d1f6b23c86df53414e002a430a826c937186bd06fec0223348ff9894d6985e7ef7e6c41c9870528b157ca6fca6ab5bc61546b179ceffbff1bf049968358f3fc46081076b21448a5f0b629834228bb772bbf1b17b3cea1af80221a292020eb95052f084843ef4b97866a9e035f6eafff628d2932ba995ee27c86cb15234423125ecfcc22eff4f7679cc1f968fc6b8c06aeb42079681a5ac95fc9e7fe2dd676296159f397b14ba3f4b53bc1f8d750163d29917f019428ae1d594f00cecdddc880128023a08fb92443754b290ee31bc056044d65fea024eed10bb068b629a34bfa4e05ecbcb2023ed8e66173f3e58952aeb30270b9b520f959526b49350d07b9e569dc6955f5724e0822f641b618f5c734f2046bdb1c24fc20a179f16b7bca08f9d6a88695dde85a8582cce250efe84566e300ee565fd19aeefbfc1966b4b28e805eb5b6daeda8c8e45730cf467a14d105895e66ac82cc5aeb8ad7d4cd58aea136aa360f9bd8270821354b40bc874647074b61fbe6e312d8e11a05f53353eed06eb87e46a95197272316195c3a7fb632f972c00a58a90f9916873c2ce17c94f3019a8e4a0ad699cc8414a5ee82c7073c43c124bda31bfead21b13b85bb8ff97caa3872f9b5d660fadfcf7c9b68b1070c163ec9c9d52907e0112225c99c6ba7900ab1a4dba39c4d5ee52487c9f7b3e75f96241f7b57480ebbec503aae70a1dbe677686064ec626bf09573b1adf6e34c921743a0d89734bdeccbde53b689e047a56af676b1f51091d0164c0c2617e10ed26603ba34e682edbaa4fddf5292f8a2237987a1bd58068a3f866792abaf6f866ab5a3b4d1c1ba301283e9c90df8062d1b5ad70f1a3f9418e1ce1323d49e67372745d1dc5714e68dcd5919213cab3d8671646013a496042311860c4f5267d2bbddd69fc5d4190dc5ac968beaec7367202391a30bdff4c07d0804a2a60e210ef4d63af5e566d6115e5e73bbe86f6cc6d21e558a397b4ecaae195385409f299aa987654b6588daf9933cb234db3ae27a7735a6dc0b819dd1c455d218eee3eae75d86abcd5a565a3fca51f5262920759573e5605385a3a4d04b74e6135945c89a765c3a8462dcd43d6307dbf393a1c8269c358b9f3ab67a814f62121d79d6244bd58d1bee0c0ceac0151966c5e02fe84752dbb94486ccc50b35e0bba3aa121b18a542106706a5a45e1e1bbb4757a0f22c63df3aebe585e70a00d846265ec3294a870c03a23763ded81e1fc06c94479fae00b652af2e50ef0d685fda3f2788f873d0788e589146cd27bed58de0827db8d156544cfaa8844c149a83e487f3b3eed1eea7979e75dec0f8e53cc8d53383cfaa74f831cd24bd8f770ed24923c65f6fecdbdc72c3983dd34027c707f94b59439fa4641e44629da7a32604eb1f7da94ed05a2b4cdf11d0fe27f190daddc0b5a2831514ec8a6b6717f2214f7b1a060c4025a20babd889f2f2c39bf54462cc1a3a648f5594f487f82848182ece367a5bef7c04b867a6b36cefbd68dcd14c79561cfacf5a48c60f20ca5789f3216c39a61354c0b86cbecec77035e05dd0f321189b9f1742d6cfbfb2ffa66ed60364a4867bf03d1f05301ae327f5dd0011d3f24ec049dbf6f985d768b6b5d68f1068e0dd1294483e75719134a740dc89722bfe3a2cf2a13de7ef20382e647acf5f023b8216f527e854ad3f8ae45f5f1cc22b6ffaa8a5ac0c90007ebe46420a174e2b939ee6aa973450938ee916e072c1626ee10bb744ed4e9704259365364ab2bbcf92904871c4bff61a056b9a11cec45d48b0f5000ec455c5511489333d88bc596ded7baf5a5d4db13258b52b771c5f06830781b8b5cad33df090fa4dc1ac014038ffbaba5a2966ba562cc333aa8c80cf64de1185f9f4dc896ecf55a885bb9edd27d30bfa129dbab9c7f74079f323763b06e64a2372ef02af356ac2bab7060d8acff0b3df2fddb3d380fa50035f387ce18f0d85249b5756f9f4c30fd0045b59a9eb0eb6d802f0d1a504f6a13c6ff638089de3c605c53cca9441254a1f10ea2f9e3f14a3c1db6d8942248af5fa3a6823fa4833fc140594de92786d7e3cae926a9786cbae74e797ff144c45b18d73ed19a0f6dc8646b17ac6663be6d0190897e32dbb7a71af5fa467f9cea8313524cbee9ab927ca6fb082fa53695935cb4a17e95458677fbe3e7416d7c9ad09dd2d8cecc31934f2d8e0e217b9f35d12a943562dc1fa4994697f8653f36930cf126d818d89cbf29c7a5983bf5114e4ad9903cf38ae7c9ed5048b2abda7c46d72b4813b16a438725bf267f22b014896fb2c867bdd8817ceaefe92e4749cf9b87bef6033774b1bd3308ce252c1fdaf201d3c5875f758145a4f428e604d25b705823053863d1a1f90edd84d14c4f946a0a75cb858bd760be0c72f27162584afd48f7ac3548a42c62dda68918a9cc1fc6e9ffd0edc20c198a10e6f61aeae8276f09af04c6167ee4233f4a024a2cc2e7306b8a0c159436458a6c21877cd59cc9a81694d7830ab6c2a5f5fb15d14b455444d168048fb22a0863bb81dff9fe49f12d8525aa09b80231a789b9de5857e70e59da58b11dcf34a2ab68ccc8046b4ee5b73945b1a5282c419b9f51ad5ab9943f78d0553042e8eaae3a40fe1202dd4f585946ad25458b003e0c5b9535d191db4201153e9b71cbb70784a57568e76dc533be5e8a5ca267959df29490fa13b740037e3986e5922948f8b627589ef9a986bd3984fb9c2b64441910aa015b72f6655ed66213d242b2bd1c80f30802ff6cc68ca9c2fcb2647e95c326eb5d6aaad52da84f56fa309a570c804501ad160081a324c28efac0314bab57f4f1cab86be57d3ee272176749bd93442f690774463f2869cddd49c5127edc586460829ede0accce6db8959ea084cf589bf60ca16c2bb25b6b016f78b1851a95a965c710f048cb8f29bf6c8a0baf7aa46da2300c34384caae4d21176326acce6cac8fd8e82135556966616620154ba261d3806328393e764f40f17585a01965fc2d939260e1f084541382f57ee5ee0c5379fc8d09763008158e5c1ef5fa2dba09f19f0ef82a77cde3d654ebea2bde7d936adbcab0182f2d4b3922ed03312e7665dbdc481e96f72fea856690c6776d8a99da687d15f24194ab9ccf47ef173ad4580e368061e2e8f559186816bef0537e5a9fa891c27201751fc1fcaa69f2317f776da4701faa6b57eb24ab7f8ba9b61a4361039d990fd07d4cb7e3f3d8ee1cee0ed32efb85cca6018bcf288ddfb8eb7fcc074db2b70d9d6d2ccc707f3c7f380d7581a5358fc344fe1971498f3348787cef15e12d80ee9d75a3d763d6ff39c487ac6034d2460aecaf406219f5a38e01b5d8d522bad0693181dd21b4237aa0a9120a58b23764bc05baea09a81bb1438b4a48e335a51b3a3c7a471c7e266cb0d353a5fc7746795db908466f775f5dd49e18b9f7d81767859117451857a81ffa812021a1c6d80e04aa007d9102432f5cda60fa9ae4a41cc32731f862e63525a36818e658638f0fb0d79454d8066975b07b8a6ceb81b8327b438fb5e1c9377ad1fc6318c5093f216ecc92c2458dae3eb41151cb888652edea2da719181ebdf23a99b424ed36ead26f493ed61f3a283d1ab79c126732a301979486956a391a6e47a789eaef9c275b50fa369888ee881cb10697dd6349d837a7a58bc98596731aafce171e78d9c0680c6764eb8e2b3bd96c009a7d0cc1a9f81b9d15c86d8b66b391ab84e8bd93995caa0eda5e00defe3847cbb08382154dc6026f24121589c4b541d7845f78826b2dc2350089cd84aea4286ceb04ff472bfd8b72e366d2eb6d7d5083c2a25cd235de42fd7dad31b815872c8290ecc1366b2f94912bce9a8247e243da8f41d042183b5760aeee24be520b7436a78d11ee03390ea7a49911aa46ca4f60a1f805e6a2807914cf6a3842865c7933538a079423acfd5a40733024d4d539b07ff1e71e37ef6a5f2a4d2280db48295962a72cfc9205bcf4535f91657726a2445895055581a670c4044709bea6800bd9a671aa6119f10a36a0df70d918040dd2664244a146032b198e6152c39dc1527090bef695974e8276a5a052c5bd08521463b51865a70f8dae742951535777a6894586af3382cd65911c3b923cc00a2b64166af76ac8f9d08549cf35d5ba8fd8ed4982b8d8b48a34023792a4b55692445a95429312da3327bce1fd2cfb47ccd4491176642cba7da7388b36c98c0d650f1a78b7db26b880031a9dd6bc7972447c0aa7e1b87ef0190630548b4474c479ac86ec0123b17f09c4dd6fd9cd99d151a2ae29b9df7994dbbef9203a1cd1d0832101d929f532351e4bf1ff69faee9cc2844dbee1f965f3c5bb6a2ad2eec21a5841d64f85e70095ab535f90987e365e22a15598162244d9ad1828bde2ebe9057f0a5ec982a153d4237f5b56c3734a12848c22653f292466f567e9bbe3012630fefbd1948c63998c5567178c30314ce75e2c8b8aa204fe754eb6184a6abd6b52056f12350fc010c4f7f94ac6f329bb3ac20a764020d353ba05c1574354b097baded4a9704f01cb78a1bbfcdc9aeb76f6106a5ce095d376756e33cc9dafe7c2036cd2fcd90535d41c62fd0c4c2f7987e32d012602bc2c9c582e334c41a036ddbe948499aa142b8927414395e07e26f9f298919d550b8e40c4f482eb7766533d988d401dd6b07cebe50e99752148bff3abac8d01d76ad2245f2386adcbaa5e628ff7fd02522b89dbaf6ec300c5c239cc21c5e060c2726bcf677586047f0b3820e0b7db7a5854cf40ca5487cdff625d8418984d754180fa09f22b1a2bb238dc765a93ee4145ab884d33ea5e972f5bc6377ac6975111109ae09194ee857d5936fe4410ab5ba479537d1334a3d457b6c711e4d797cd83646ce826bbb60efabcd8a9ef965c86edd9848ab3e5731717c6cfbe17212a7f234737a3acf0f0b19a8e7c82c886341c30915a5f2e6d1a5ca0259f9c051e451a7f2e9e93c7327a70744fdd27d1c7f062b481a66ab6a0e1217084fbed28b3cbbda567a2253a9a6c89838eeaedf7105f6660e5838470225c1ec0dbc9f73a02df2b9eed172d75f28dc7212cba58b5cd4bf02d5b35a6ec4e520eaf42c4ebdb114ca31a7f973434021ec966334285a4097b31f2678faf87ec91fb0dcd79af7b3fc9154dc750e7004969434d875c3a8d639b01f26001cca378b46d7f206f7c1bac9f4006513c958d2902a54123badcd85a8bcb96dcabf63372f64f4b01686940f58ef10521b5fa5b3be111dd0d44b3037fa5661ac35becf38d260502ade33aa6c1cc7760941e1747004603514addc22c028355a29b87e1a38ca7f384062d750933562471008890f0a2b2c60037453fc8d65bb993fc16f1515c932b9fe3c48ba2460e02f5ccb523fa223e6d8dd14df214f8d793a77648d12a339a403fc2b9a9f286e78b9a32c840aa1d220179f2e539852b0e96061fb701def37220d66b5c2c7eccd31936106b3d8a95748785a03007d4249c5d8a2b5cd5432f4d4146090f3273b3bfa1b73ab871138a277800f51f8e85af69518980eafd6119309387a150915664bdfeb6802d7994388653fdbaa0725ba869c84a10785705bebe9eddfe41cf8118d8691b488d6d80550784b5babe66adcb5734efcb41755e12d3af1fbe46958c7e09e686f9b54a728f0044e39e6da4ef1d7e91af8b5632d3ecaca005e0a1a942b4f7355c3ab2afea6aee26dc7dcf42ec01aa87bde2283fe029aaacc9315ee5b622bf92d46c4c8f3adcf36b3cb09348710ad4dff0211dbef4ac2dc1e61fa376afdfd9db335b602deace9682be4e36a54daddaef542223690e6cdf244ec40345457788487a0b4285b2092bbd3e30b8d1d24ede41c184e6fc5572118d4481e3c94d3612515f1d896584f3de4e47c2c8faadb43d43c6ca63948654c32cdb3ad8e24e097219a91a728df6b55ad0162f5d285f226128775942c4e128b26dec4441c7b91bfd4051912d5f0bbd0a2b7507fae295e3c5cb4fdfff498e87bbb5679e0982788ddbdbab384c0d529fa475ebb49a604340c0aea894614b65ae8f89e38539b2a801bc0fb652723b59320a5aeae58e9ae92f9df5533ca3570f7d620c530644e5958e54e809f89bda6fb76924846736b635b550832580c754ec40636f0752954a9891f8d7db22f47f9e80d27b12ad177a8ffc83cc3188d0fabb0d86684560d6785102371285278a5d440308389c9c6e9d3ebad3970f51d6958be4f5d570af8e9de9198ad5094bbe9a2fd24aedf7e5f8105ac344c76d4765185044832dce2ca93577a8e0dff031ded8686b8e8a753a074c3b0c9c991fdea404057a578f0187997c3d0ca3bac8caacb6991d0b6c0f8d7fa4d557b633b0be189b35c3715677644a61e796646d34e8065268cf19f2c9a5a11f1deb67b535769c40c7b3cdf5408c48626ff935eb1519b4bead4e94570657338878d7aa722c11353f70f3b762e04d8ac03914af092e3f534af8df7f74e4700861500757356071dd7b8ecb952e88b940e46d8151f45a5c36d195a8b6001166187949415c5f264cadbdeea00f8bbd6a3e4a5e7d15742a340c4d16bbc9999be2ef35e16a5be18b82c924ac7a46b936ec7df6f7a5385284bbe8605562be462b0f575ef88606312229d0fb8efa10c560162903a98f4a03b985a0e726cb83a1a072382a0b01316d6e9a8361825443b9e90e2c83c266773f147fb434d43f54fa862f62d3720219969040ecc49a9b1545200e04c66c202f905918fbd0234e009643cebca92f98f5504ccdb47423559ed87d9871fcc050a79cbe3d958a94f52b491098ec9532c9f859735e4f3d3a783e6525927d5625ee47f2686bf640527f47159e7074a0dc0784d5fe8c87f3ad0c3989615eb43b497f01a0dfb2c82de1cbdd5f34e342d87d60666038395d04d4bc4e690152bb5105a8da52c44d02d8ae4f855ccfb0f562b53d8a821607aeb34a47df6bac1b09ca19715cbee8f2ea0b6d917070af41c0db339235866a4a6ca28b7becdb454a4982acf09aef94ec077b708d80ed22624082cac36aaa1d6877850ddab16894a98e41aab58b8d2219d2d99312039e2385e3e28a7469729579d72a0a035e1a6f7e451761050671880e3544d754bc58451585f2cdd206b815e944665a43db5983204dd16831650d6d39a2131a7eab227a5caf6df3907e44655f58a3db168ed4be6e15a2c1d5c3bc32b233c82434b188f8a47f6469de0c62d5d111fbbc802fae181e571f803dc816da3c2ad6bfc2af34dcf8825cdb0aab0583c154ff58c722e10a380577dfa0ae22792f46ac1dfd975fe2e77ebbe7c0e768b8f0e9a75327d9924d1735a3da6ab683b908e64636ba74048e183c5c4f0ee73986e2d48d540c6513aca02f9aed9582a9616906ef7a3a14b42c9d89165a614506ab65d92122e3066bbf50b5cca158ea80b989a0464a40ba6a148c60252a9dcaf5ee0f60e208ae83cfe74f8151c746c4c642e63d23864fbe94828e89d5585bcea4f437ca08b4a087d3d9679f100206270d80f77e142f4c1426a1f264c8918cc4636f21dc3c012d84d59855079446828cf2e23b0bbd7cf8c718cbe8e135cf302a78664d8e20b881a01980df1209417897de716ecaf387d42889af1e6744584573c374c955779bb871e9b131295eaa6e6356e774fe1c5c5161d082a046dca1194dfd30d662efaceb4c4e782609bf29b9e47cef4bf39f9f18c59dbe86a1622fd135ada457cbfdf4780aba1cb402191adf675f221577fe7e8f5b1b3efe54efd1a0f4c0b104bd214c1f34b6e11b42e982630d7a4b983e68ecc33741e90dc71ef426a61b3476f0ada174712c5555cdb27f2fb8bff61ce0a41ee7b73897d86def16161289681d973d1118106020ee11ca0723dc063da22f7b76b2080de807f34480619fa0ed581e77c53c6280f842c303fdcf8bd6e3c74cc624944bb7974c1d4d2c2de2888289b7b6dbefc265040d176896014ac54ebb2ca4956322c044d99f35ef130cc34613582708c061ad5187aed7a82b2057d806f69bd5df396462b98e4fa0bad8537e7be92c3a37341fd82a1f2c4d12090571d8723ba8f68050c498026477ee2849c4e7894f01779fb9f687436bbb5f15eab1b6282e920a2256c9a7083a1d45882ef5da5e1dc5194635cebd5869cb5ffd2706e18e033291656d1f4a79a97bc0227abab1472692acfdeb5d4926eab4dc810a0fe112019a72a4d817f161153017da23f7e13aa357d24facfce5ba354dd4012548a8bf8efb0d74f64884bdb24da3fb476437eb6e5c6da08e849124bd58958b84eedd05213f3b7a75b267974b87a4abab6e0be67233b13f5b02a2a959eb4902187f079f770f92a17337f857f1b7256e2c47becea686b1095db95b72f6d3054488a1563c4af7ccb86d414bb70b4d31758bfbc1d5d1edcf8367e8d72d7e5b3ca56c39cdf5edfedca0c34a3bbb1e8428e1c4fb8fd85dad95bbdf8c996002cd6fb895f6eb48f52bd1104b94dad48fc1fbd60c618f1da180b725a1dfe710043ecdcc46baf3dfa7d73ce7192172b016fdec8101635aa91237d4aa1bd6b2cd74b8dcb75aea19faef3f42d0411a72700cda56b828ceb6af8ddd2542049d65725907baa0ea70826835add125e864ef087bcc3023e60391db909f1d193a9a189f809adb27bb3b15e425e512d43220479cfa3b5885b78e2fd149439f89edd42db20c6bb54bde0100d66a82890e071841638281c39505513f8bac30fea393d2ac23e32b181ce2403ec3f16800e742ab3a11170a713ce451960a8c5ed409f3ae43e3e6edaebbde49ea2f2ed6ed9b6bc9d260c033ee14c0b85d430eddaea42db0daf8e876c506bc66aaaf5e59d11eeb87195fb62a7e68e5c8fc632d656201031d9f140ea361c2e11e6c1f86192000b1030e0f717651b8ed9a9c21fb23183505d765ff2952424bb9a0779d9172e848bcfba559f8c06ef14e38943241e6dd99c0a89fbf07e39058ea56b55a7d1aae72f724822cfc0e3a5f6795aa08c9ea8dba33f1916e3fd891a36355c659d01c8ec839b1e51ef75873919501f748f291cfa02ddf76b04dea4e2069380bc2eb68193ae22104b36ed3afbdb85362bd02ad21483b25a1f0e01eb744cdf481a1580ea7da32b556d8485f186b1dbacabab8f2ea65221e140b2484788bf2a96456ce9df5c4c174b906a5b0df801f20d5682b3592f1c971bd17a171de9aae25e31be92ac1bad2214947366d89b2c4f81c90749aa8b0fda87f735a8fbed918c7f1b96c631d1916682ebf86a478742428195c24a51d85372156b137608093db3e5d7a447da4ce4a3dda8976527aef943c045ea2e3b0382454d831f90ae864ff92e8b45bed8fe4cf159a5a36cc88ed1892d8d68c2dd96c552c1f6de06be8802d9b7dfe69bf5283530158d7f635e129eb4a02026b2171784934e46e015b8a09e52cb16766357b024d85382165b3bf4edffb1378b7f14bde3b141fd1c477392c3cc8da3d257167a1bf6cb3cb9cf257bf6b48cb2b60447cd46cca0d225b91d6b4f5b68a940e4a1ef7394a8c760bf2fa31641b94a70ee22921d1a7135b60153270a69b1175761a677c4ea563353e311d2d806e0ee50d5f734a8f03a2a553cfa9b15ba1dcfc6dc2ee4d9035802fb5974a67a10d8cb286b3b30c4f2b289d9dd5593a12d72103440f726ea186f3df08b11f493d83487acdd254654e93a75d2d4bc1a405bb9258edc574a1604558865e19e11098bfe2830ca768a5218cb942caa64257cd00177e812ad38e82c78f7808a441fba3d25ce2178cdca2a6cea161bf60ee009af673b5f2a44caea16f40784755f45273ade457a199577567d47b611812efcb1e0c097c41e5b89839782a845b2e3344c66374da87b62ae671f6722873a4b26254903e56ba86f925e6fce9da05671976a09b3f10641a3e5154c698bad0ee462a1191bd3a263a8802b07c481798f30f025eedd26b33a64abd0e3081d3909bc5fd4deb15a044ae985a1a8f9419f380ee23e71fa230102de25bf25c57a615252520e7cad843be453d71652e4c85c0fa56e600f70b2ae4f3257d99c1a47e67f9321ae704d6ed2aa0990569ad1c071640574da4ed630103a3ad3205f7bb34b6e53e18575e5830c52ac220fabf3be5792c25212c89511f7fd9638ef1fd4cd23fc2ff6c7deaf0be02edae1f3d6d02182300989aaa1fff2b59df019842c55754f61bcef0e8f468f34f5629f948b6c179d31bdcb8c4334aa51d84cee1f683a272cd760a1cd43e00c0528774291739eb87e55d0966995116e119273465bd7be3e79b766c1eb3c52e299e9b35f2416dc758dbcf62f31504fa7668dcd5309b29639f3a453da778966a2430e426c906a0250b4a094a41c1c20747080972c1cbdc72185b68ce74dd5e586e7f179b2f85e9536647ca35ae96b2d8008bb19c709db6998b2b6326d5857d65f72f036b923ab0fcd004a930026bb1e7e5f701f2ed295969864238943cd00e5f7a2238422d8403ef2fdd36c060344f043e3fbe336c0cb26ed78cc21e7f8576e7a6241616fa9401ce22cdeca27843cd9575248e408b052937f9eab33f8349c316397f6140ec607711a3091261e13fc5af6ca9ffc8ba5ac23426fa53d6e34c9f6818c6b2bb71016bb891677fe73b2f1b0168171c8ed50c4c7b41b6d1cff5161c1fd6efb7f76b5c7020400c5edaad3722ab30aa4f61a572827142e7ab15820b9a71567c7e0ad21daa2bbd532d1a7b882b0c131cc662b8cf868881a7dd6e9e3b4fa0407f83087c22fc4997ea3080150b5ffb7e762049e5797f2d3ea52f32de8b8c76b5316631616eaa421bf596c49b7a363627ef80fe46dfe2ba51fcc84c6ed66e7833d6ba761bb017299768216745a4b60936da821f0c7fd8366ef52ca672a7416a99dd1836935ab2e8d25de1a795cd7e324a71da4a60f611526bba1c1f0c9499341fd030d0e03735425e342093a6c62318038d123235427d11653b2813bc73c2a8c4ad9a0fde969041858f72814847e639342930f0a89c6a19b5745f6058a52e9a277e8115f7d4df53c73fd264743c2bd04bbe82c98044bba1de408398f6ea7c08eb17b5eb30f85cc3bddf24d39ee840866b63faf9578f9fe1ef24d689f146289c7753a1c0260703785d6d6bbc59e642aa3f0f8eb81f7a2f7da25f4e30ef3a1eaf3eb86d8df04ec99915c41da85a1856cbef0e3293c88e65009b1d14473efa09d4a318e418034af4c55e5a50d815a503c51db83d42ba5da4ffbec1a971d05a2098ec8073f205c4463230fb0db4299697f010b25a8fa93ebe6a731606807e13d1efc8cabe2cee50b53fe94038f499cdd0a3978ba0df066280df1249c8de7b6f29a54c292519dd05be05e0053cc4262a38536b9b21de830ea55f3733aae04c29e52597885486982a382a433895219c8a1073d7b99f1816036fa049f42a437c583e7fe383f560114d1008e542d9dddddd14d04c4949494999014fd5516d069eaa91b82931d8be5260a385922aaef75a50456eafb0001083b99f07e38b07630bef3d185c783735cfd160c28beaa86822499da13a2a96d8a2ce9b10ccaa87ead443eddfec73722d3d7c6a5c4b75a30713a6250d60a67daea8fd311bb23dd47e95eed7bec61180c97c35f43d73957d6701bb6939e810362f2c52976ec9f207a3a464a6b9389d256d3b7c738a326a2e4e6749bbedf08d3f1821b38bd359d2b6031fb38bd359d2e6cf7596c46284e6a2c3ccecc262c4dc676666e79c9218331e94d5e59919f5c669c769c769c769c769c769c769c759c13c13251483be80d38ed38ed38ed38ed38ed38e93128a2152a12f78e3b4e3b4e3b4e3b4e3b4e38866acd0a02fe0b4e3b4e3b4e3b4e3b0d0b0415ff0c669c769c769c7b1f114a71da7bdc2c9a8374e7b85435fc07921012e70ffc0dc47a2bbbbbbbbbb542a954aa552a9542a954aa552a9542a954aa552a9542a954aa552a9542a95bc4072c4723776b79e420895df4f53448fb179e3d6fddc6fa27b10945354f3524e8250476ab1a3c5e5d7d5bc974e55992e2653547e6cf330993e67f34ed307c441547e14128fb86b7aec665a23a9fca123301c068ce685213d2b3fa8babf75a956a43ada35a6d6216d49a6e63b3164b443a9c237bc844dbc046f9cb3f3a4060969cb3935d169534de3e09a9a9a9a1a6ea2e364279a4c2693c9545353535353634d945068e7490d929b6d734e4d74b66ddbb68d44229148a4d631f18dc96432f112d312f079b7a94d6d6a539b40a118a2946d73b6898e936ddbb68d44229148a4d6d9d1d17952c34898f4b66ddbb68d44229148a4d6699dd6691d9d1832665c2895246d6e76c99a826cdbb66ddb46229148a4cd3935d1e96ddbb66d23914824124965e5a2717d060a62b9275de33b5be3d8b66ddbda666f76499b9677dbb66da3315de4cb9673bad909321afd8b443bd2c9ec77da9bb8437de7f98efb4efb8e8d4b25c98481c9e49bc6c17f1ad5afcc2f2ec79d9eaca8366ffec64f12e381d5c752c4c923e59c22d1ff68e44e9c03ab0e015ab02c3ec1c064767ff31af1e10412fce0a40b2054218b033c28d4f7d3299145f7c03900aaa73a2a9498c117163b0358b30bd3fdbeb3c107d7d00dc3a3e50e58cee5d8d7fad9db3923bc1ab09cd311dbb9fd767db1937b6a6ff71eed8e90e525dadf97324b278ff83dc77459a4f6cb6bb6f39327e373105e904a0e58cea7f6a318aa0b742c228f30bef7e45153123e08ffc9ec4534720fe9a7c09a5d98fd3674fbe09173553c6fd7dd576b51ba6e0704ab9db6d5bdf7febafb27a300c5fbdd9df7c6cac0e2a82c014a85612e1500fefb6b65d820cc9cc46486ebe89018d529cdab6de3dc39e7bea98847fdddedce6da152ad4f6bccfc422b03e4d1323f6691484a48e5acfccdfc4ea5f64319258d317a74c7efe38391c2f7e07b4afa06eecf5dc617f9f9d5c5ca08dfa5444a86017cf3450c087cea657f942d7f34f508e17b7ef5152f8216d86862c21b1b535022055f8412247802881a257a90c2a49e8cc00a34a898708b1a55ccd8842e90c0850a5a50a30713c507349090820b266a0c61564878e1021a3b366001136c606303d263b21633a3809867b7fcd4ee6dd7ddd86531734531b38cd5b5500d3e66f6f6d1efee4ebbfdbd97b93b2f737777f332a77860a082dce8ce39473587001b55a63b7b77f75d0adfbbbb76cdbb9bdd29be7d9d692fb2d7a226458428a38c123e2fc9089fe74419257c5e92113ecfd9811046096584cf73a28cd257c69c28a384cf4b32c2e739cf6194cf61c9df739823e3f39c28a32c9564cc89324af8bc24237c9ed3ff8364e676eac6c6c6669bbcf7832dddaa227c5685e332c2d7553c481d81c813295bbaf556b532c2f8b125d5d277775d7615523655b574ab8af059158ecb085f5781c5bc9791b2ae966e55f045abc27910be58854bf8ba8aaaa55b157661d785c5f85f8d6355382ebb0aab2faca55b5584cf0a42eafbf74fd609818778d547359aea2ef00acb72943a0558ee9d3a2f753c7693c0bb163131b7babf8a7ae92204db626a31642b0d12e01cd18a10d654d9838b10d7c28f6d3149bddccbbddccb4bb2ac8d59e09a1d916814030d1f5b36d5565f8db5a7617d221ed127e6d6c7e2c2395e5c3497cb6dda5391fa308fbc214bc31e198958aa598d6ae5522beda5565c53e54af1c2840b0d462f98bbb297fa30b53d8da4e4d1f4a22ef8fcdca1aad82aeb506d4e7eac92985ab5b9444935184b98523ad722290c4c263b74baed17a61a987a306da61ad366b2c2d4019a6a603299b628a60643cd66aa31ad6ca39b19d024c50c500f3a545b1253831bdc1c015caa8d46dd1eaa8da52e9561722da4a9d19061aa34742ac3addae026c334bab9a1c56059d412736be51b9d3b4780a54095b1307580a72ad3c4d4aad3e95467a85b4551e56b209ce75297aee8b89627a606c38d4e85e1a6b2d4ea54df0fd9aaade8547e7ac5b0d3c5cd6e4c7989147927e656bf91b349b0988aae90eea7a89309d3ee523bd336881a5ce4df8037d547eb10e264d94921464c253286c964e19164b995badbbfabdfdf8a8aae65faf95bfa8f152032ce4977f168995acee2bdbcb8b88c46522492b2b1bd20f499fa1ece852199452a6e9678c170061c71d7ed3fe75ccb567b60a0421896ef85253987ccfbd6668c3c519f3b197f9fa2bab61ba4570a315e8626e217659affbeec86cb617d95b255ca0d1e715d2c2612a9bcb1cd4ecedebc130f446aacda06eaf2102ba49a91fa249490aae87b2e478a01cc50df6b38eafb94ff3cf656ca6b5fdf8cfa5e03d5f71a55dfafd321faf74234856a3660bc44a1325e4465d00f8d419dc3e1c1a8366ba416d54435f2b0812a79d82aa966a4aee321d40914ae7eb0de474739e7a4a4da5b3aab4b759cc4e59c73104208ddb7745d586e9da31afcf8bbae9dfbe8ff5ac4cb8585ceb97ddfbbbb8fce5c40bcf7de7b0e1f7c0fbef581db7c5f627436442229e93ff768ff3e5fef6511bc20ee3de7dc730efa09dcddfdaabbbb8314c07787104218a1433ac4eba35738e7dc8c20d6bdbb7b7eb098fd1a8b680996a4debdf5645754bbc79711c648e55583d92f8520b26287c3e17a0877174d3ab9e0dc7bfe9e7bcfdf73f439103eddb9bb3be7dcdd331f8ce48b6805cfddbd5f3f7cef457fef02a67fb03a70f48da4cb0bedf7de6bd992d2c0eaeff0c1ead48ae9627777776bf0fde37734121d9db96a47611385119be6a880628bca9486ab0544954654d9c8ee10ebd77aee877ba632eef9e153cff1d0337e5dbfef189db9b4bfeaf5fcee2f3ab3d9ca8fbdf62beaa15319f77efd775f547e05a945514d6e8dd80fe61cf3a2daf594e5ae2bd3f8aff7eb5deb78ebe3ff3d5bc7a40df9379ba1a9b2996b5ad9cc55292ae3ded5755486df3d9569d2f3fb9675659ad7cbb1f74bbb280d577f6ae6aa32d6bb4ab522d5d266bdde5d1faf97fb63997cdf0ad35526c34391eaef2a956992ced054d68cf0d0332d835975c5e90f7ef73f445573cfeff6c69ac2c2803c99a7797f81addcae3aff102a437ce62efb100fbabb3bbbbbfbe7727777be86983c41eefefd9e3f738a0c2cc796e584982c1ba02284b5a56a5b96374060f680fb2c6fb33774ce86e732ca7d0c2e2359515675599807fb78e6073451b29450e80a5915150ac9180a85d6a978d87ad2ee0fc628e58ecab929ac1dd785611ecfe71312cd898292120ac1801123060d8fdccb7057a8ce79d1ed1f18a57bcb398aaa2acbbd15125dd7f47c44313314039aef41389292a2aaca39e79c7318e61ede98bd0215ac60aac0a9a0602a992a44cc8b1d888a89a9248f2c0bc33c9ecfc73910080565c6a4b80b8bf1fc3099d967ef7adc87b7d72fba1f9ea20942c1623e9f6b82b018a6cccccc1c7359cc444cedba769f991f1217a379b10ffc3cef0feff480eaa732d5225097ee30e2e34a580cb73f185bdab40caa5be25adc57565f8ec97675ba16cf27c78ad920942b05eb7633a3ba2c9906e3e22c1482016361ec37a08a52421f18312e2cacb9eb3a8818076b8f564879dbc354dbef9fa1953adff127f85b9f1c1f8e83cbe883880d3ce21859ea66a1d92487d6f14d7cd880c383efdece3090177b10b2fcffff57c145ca97ea8849c308bea889b975e30e1eedac0e8ba16134ea6be98e2366884727235c5c3c1c982fbb2e417ad0c01575a3569926aa6e9fd2f75896e0428bbacf372acb125c3061c4230e4c4d5621474cad3727424841d42148c952fa16a5c8deeba0de5ddb2cc438fa1d836cb0e20a149da12da0eccc950edddff2e8110bf87d55f88d050c2a7c20fcef1f95d9e7af43b07c30912275a987170c95c1443135b7830ea22a433d111aac470d3c7a9207111ebd8a068727a1c3154881b8c38147fccd52ab0c879eee747a779a35700e8aa228aaa9a62b409c6ed6df54d3415465fa3773e1113f0df25ac79ca3dfa5c4d4664d1502ebb57d9a3357e41ba94e34a933aaa3c2899d951ff2b1c7a886bd7c23d5c276f07faf837fb564b6dbbb8bbdd58f619487ab4e663d5cc530aaf5705566c675f07f31905e6dc14ccb86ac6b814fe32f93d97f7f839a3a85123bd52dc562dec539ba5d7b60f8cae25b9566c4aa32ae17e7d036d003d7d45dd977cfc3d6aa5ad9d72ae5d1b5a56ba12eccb5ec43d15e1b39608301968d03f0bba43f246f60190776dd4020507dab91de3da2fa41882710e18e881f50555052754088e70efbb1cf95ec748188de7d0863165ff426e203330a0ec65a76f7a52ee3a15909cb39e780e450640737c4df65fe261e2d93ca2cd7e47ef7f75e76ecbabfc7b2bbbb0b618c514a8aaaaa252e795d18160316a345f7f7dcdd9d6a3dbac1a3bddcddddbddd5ff676af4f5f7d4de999168394d5a93681da94e63302eb1ddd8b6368275dcb3e8ecafde2bb2a55cc1e3b5c53b7dbfbfb034775cf0a8f579d36b34768af77c0da75cedd39e71cd576616039e79c734e52c037980ab797171717917c54f6fcf9732c263e5c5f9f35353534f0f3c5175f6ccf802504fb5febbea9af359219df3629af351029af3508ea825eeb23336ec4cbb235766569ac8dfd5f199a844f35296d504dd29835289d91d5f8194fb32347b0fcca7be00f70048d6f137f6f34608581c964f994af5183e5254b0d16fa66d448d25bf71295cc2b8dcccda0b5cd0a0bc9af95ae2c99732d336a50c7a395cc55ae35b2240d0219b406e29ab195065d61a12ea03446cf8c956cc6ab64aec64a3b6b9bba33421f2383513fe5456f3d2c4614daeab0508a947352a2acb7aca7c83a27832aa799f2d587f8a6ee5b9fb293a16c196849d9ac2aa7f9f918d9a7aa9ce6fc1899a7aa9ce67ee8650d51ad0235f43132acc2d80f513e613128fdeeaae052b5de9c0e09d35a6badb5deaeaaf5c642a85a6f33bd8c041313749b6a23c1a4041be8c6025bdd8731947059e8e0876a7359e8c003d5e622e5b5bc4972584baabb547ec50258e2c12cfa90b816f9327bef33cffbb79bdd06db506dac7aadfaa51ad65585c2623c54521e0a5ba6867caeeaadeab2b6565686bd1c7279de7aeaab6c08cdac580c65613650d72283b65b65f63cceb5acc46262c78cbf43bbfc1eaaec04239e85267e4a86cdb4511dd5e8798da5f68dda6dc57f8f653760606259d4a4bedb6b15231f3e7f1c551e55e68bbb099b67da8eea28afe14859cdeb5e75866a887ecaa33c88b2b623dbfa2f25d5fce56ba2f932e7d77b58a9edf7197507d71f20245314b10384648a927b4e134e6a7fff3bd430709a4766ccf66f067fdf61be37a717d48626207b735a52db616de2c26ecbec9322ca9f87a2211eed432a2f21a6ab4d9f066a2c529dca1a906abfb5cea93ec093ea2e249e1c01f6fde3154e477cd238fc3d16a1ba472854973694d2a99adb9136b349e3e88fe749ed8f4f607c5a101521a60622d5fe5c8147fdd9712d3dbf3efad191f1112dc1626a32b6da75e9ea700efefe8f161f279c636d908290039a4b8b2433d71182bc176f873ab7a0f11e95f5f27af892e4d01e10f5c9d1a7b81c0270a3feb64423064200effbfdabacedfb3e8665f0a9bfb29967aacfed60be87ef64a076d5d5fc29d9580c4dd71f0b851451f870f54121051455caea30f9304c73c917352346d4f72ed3a40c8c2c51030e0fa82f100a8162c2ff28e91ab13790aecd0c8fdafb89b48e9184d345a2ffd1c845c8cc0e34fec6e84f7ebbfc96fe5e9daefc8032902e0087ead4078fdaab4704a22653029ca35f9c0d9c6301fc007ed98b7e88f9fafd53e34f1b5cc09f7b22169f8cf9bfefcb094662b3a7eb09e4c03ee88f54d07b86c4c4bfdeb23cefbc8da8284f5ce1092daadb9ae9a8c0411035e5f9dd563f6f7932d0877ad011e4578098ef178502d907bd85fd487910d53c293462318e7da8663e4c0dd08aaf12e47989c57864ca07267ec462a284ef6a64eaa23daec7bec7ac9e9869f13f1a0dd7948720aacd5450a8070ffb454d79e75a34ec237603ca0794c978e88cae3a06643f7e4ae679989ed94c6825521ed7630e4208210c6532fcd83706e4ba641713be0c7fca6bfc28dfa0f73c7c944f01655f617f3e9eecabe7410fb11bde7bde7de6b9daf38e47120ad3f34d2596f18780f063efc11ef416289b6f392c26e5419996427938d7f279e75a3c0f42c9e683f6024dbaf2e3f3d77bfef2d078d12c0bedc3cf5e425cb5dfbd5d85e1f744c05bb53eee8753b4aaa82ae6568a7a18ee2cc6d6a672ab2e8b1e31eac61e30e29a7a0b194588b790524a29aa548c31ca2d9438f7c54df530d31e920a7faad476237e23b58d98aa7ccd0812b55f25035fd4a61ae4c1dc7a6d0544adc9a07c48513795ba42f6a05125a54e10de4cd12ed5e4576f398f10767d7f5fd4f1e85d376c571e1d7cd9b1faf854456fd848519092d4297920f8f835453aaeaa89dd2b930f3f46195d3af8fc7386bcaaaaf8f0a5942b65c5ce0367bd6772771d13eebe5afc1e2b5951d5acaa8d102e101b75e166b1b89f21989d11bdbc2ca009bfe8407bd9c13bc5e66c3bececbc8f9c1c8fd992f6a671d010e1d1d41cbf8f16659712e69cd105ee1186d8a3c3189d00a13716acbe30f901c977a02cf350ed0155522d3e7462db735955b7b4a86eab0a52a5745654ccf9620bfe0dce111d0ccfbee89493aa3237920070c0526ff14bf81be4e10a48f592a26eaa51dedd71b455c76a542fe7524eaa2dda906a37aa7c4085bf12888d1adf799545aab5af887eef1b245373db15842dc618df488dee326edbb609a1bea24e2819d7e49b695785ce4910b6f6766f47a2b285a9ed66053fc138f8a790564c211cc1d43aacdf96738a44a691897f9364616e51b5dd5c10b6bef142fe36678b88d458c5483598cdad5d451e21e7117b55bd3f3b6d51a5a47c29e50629f564343120f0657c2a6f702dba7d78f11bec3a11d51a2c6abc274c00b080c1104401c0020658f80073646e0d929a0b8bad699287746ce0114c0d2fdaccb3a9918ab935c230d6c3c1b0a5e4c6b2a99e58364faa27413e0934e8a0be57a75a4bf693a0003f80b544e340c1a584a9b9ad7a4284263b30db36b2965437ae853f09539324d810685985ffd2dd0fb7d92e346475b3212bc4b59ca47c4376d7860f3b51d45e3a93d5b6e1c30e14b543709aa48785e55ca6f56909236795926ad54b261e42f8707e8dce49bda4a4e890ad9aa83625618410be574885f0e8d1c9a3194ec70ce3e8ef17328386e30adddddd310ee4404e492a70a76a4929bfb34953bd4875a75acc2414f27a5c4cf89ae8abcc64fa5ddb7ae9b0acec06e3e0af963e17f3ba81bdb0dc0dcddaa9fb8c833425e1468f6a13788f6a3b541652f955b40ce854a65afbd8c1d4a0121e1159e1d1157e111baa1c1f9e2493d2e0d6fb6be2fd4d42fef6de3caa088f36871c78b49a75aa4e95ca34e745b57860e9d47d151f36d4259b638b70a3e5bde111e4e74c67dca9f65cfdd95d9995734a32f96ff048f67565d6a9f773681d960e0fdc16e17238b7dfd4ca712d3f982c6fdcd86ac7b5ec5f57cc7d0d6e6cedd89ceae2a0e4c6884502e126d9239ad3c9617d1d0728e68abf91ea56f8e658c139bc83f068658f2421adcddef06817c0a12578b4afd2dbce8ff9bd325de13752d9b456d4e57f1eada96a2b37565e2ee05566dfeef7287bcccceedfe3e7a8cc7ba7970ce60ae1d1ce00c1d2fdd36584233b348e7e229c433a8bb32a3fbe9565d40edbdc2a5f733b95666b6a7aebd6ec91de81a63752953fc62375039fc3a8342e598c18aa1901000200b315400020100c088542a148948591aaf71e14800b7588406e5c3e9807b32489611c06316390318410420080088c8ccc900101ecc02562137ae00aa73c7103a00a5467cd4da7eb64ca3bede0adf3e1b58cb66f55d9a1b9d00710c69421c0ab99ce89f00d55c099e4f696a18ce0380e7e7a38587b2b55d120bd6865cc968e3cc4584f5a424bb58e4588392d5f3906be32e8fab3a6a9b4f868083e3ce9aa944d32c42b78fa526ae0be3326d629bf8dfac14fa580050ab8033c07fda6fd5474056bc64bb0ed79607e7323499d62dc0204766d7ed20930e8e73a40f99bd81278a1f3b934f43a68be3d313e8710e27839f6f5c57beb83c1f46129af0e31a6486f5cdcfdc86bd50c25089a6d3ded862d37cdb9c35e7d215ee4891656a30cdcc596f156f2194a309365e1963c4dbd7a79893a3b050349b00e3dafff629eaf5208a92940addafb4d17c40d5b1c13b82772fec1ed198863d2815d0b9618a43708ff23c2e98a4105d98061b24819c0f8ecd2847c43c221f1faabd98169aa0dc176f9ad460c84ab08e4b7ae35bb484d67030054a333b29848b45624f6c38bfd046ad8dd3135fc04780ea6c855dff682f4cc3b8b283c933a49196f44850c25e178bebd0f2ec387c338942e6456a6c89ffddcfe11ccb1dcc175f553a2c37573c90e22290941b08ae8399080d6cf6c9168e8d7864ff93766eb47b23adfa8d60950c0748539c664d29da68d4692628ccb65f60d05db7fdcca77836fd0f75622e58fe837704738dc5ecdc55caae58d120eff9ae172b50ebabd4d60ada6a809cfd8c1d878937a4131e52a3489642bdefb272a3ecae270e01817605be5f39a5e4b09c114f19c9c61b73dc9c6aed456f55ae191d84cf1395a37bb66655f92f4f14707690b9cbd9e9530f74a2361180e480e45dd1632f4616166b2a250f2344d254eeea81cb1cfdd5f27606a2bb4b1df3e062bf1ee0a5bf34f1b56c30d322b0b21d9f48442428962c26216394345d936fe851f27200f12025cda125ad467ea3fdd7f449980054003a0d645df662149e096f12916ebfecb6154be8cc26d0bfae1fe6cc7b95bd0eea4835181ad646842e593dc3c3b61f00939cecc840b0b248590e38a08c6944350b484362bfe66e7cc18c197e50ff157412c6f8ac3d52ce2337eaf2ba087acf4868ff2873f6cb8498babe7ddb6442add86e269e4063bb81ccdbd92a74b2ab26f32522e3b8cb50f9bed254900e5f6aab6aa71a2a1431042698799e3ce0502292be5f76555a4c97c74e6bb37ea20f2b0f070fc2f9cf13425255ed877821a83890397cf839783bbf0042c2ba49b64b36644a71727a3227a4dacc2ab2e21aedeb02b3afc2c2a24e7a28a71ddeb470e4a09e8d8d95ea510897107c9d56163b16074d6640efdbc1c57aae2f70c70f858d1cd1839e37766c58fbe92fd0810dd794829fdb26002af37a585217c92c04860b68b0458ec1ff5699f0d381d22fa6c49c888fe6b9603547683775bfaee1aa0fd812dbfdf732cf8671c4a4d2a98feda940bd0ebd635bb43bd829d421873edfe883669b804c8115ac524fdab58749df327cef7db71b0a1acdce90a5ac67957395fe8b27f42f09312c6063ff2ed0c2334ddcb5f04d85c0708fd9e08340ed2ccbdc4a5b25ff48b8799e34c07bbf04c0b2ed3fddde5c722ffcf8e525b583fc9502fe249472b65a142c7cfc5ec130a18cc5229f8e34c440dbc9e341ab4e11cb40e5c62c42a625bb18e05987ecff8124715199683a908aaa6c158bd77cc6fc618dc00464911b5e6645cd03bfdaa41a2f370982367a88596f1b7faa919176149f891b57b3de5f8965c886446c29202911c01b391b041acb8750a8ab97dc82ba518aca3981d0dd5e60c270675d4f77b3c28bc1637ac8d49a5e818c0eb5e2028982642cb5255d4d831fba44184d43dd1a19c2d52e619280d510b412a0f68c141759ed2ba590378a309b0829de9a8881b99fa7e351ae2359ee82517071844b4614b4ea5bea04c0df7a1740113ed021384322c524bfa25f202b47a99b59ce81a6d4ef3e04ee71d4eda7cf7153dcd3fe9d76cc42395a545f65595a919561377fb9c8a5ab342da16ceea4fae99da635ad76a7dd3276ef1c76bc57efacdb9f67364cdc6b4a898568c572bd760ed2b0171b1af8a09b2ce9aa9cca8d93125c958f5ba7cab572c9810a6b483a0214a41a83ddb5396ee60208d3ba7376b506bab9160e9b02d6b72e62ab99fc7324a167ca4d620ea5c7166ff8e5707190f07350884a5f335b6b048b13993b516395833ed832e18c46de0c87ce0682ac582591afb5396c2c13ffeaad83d3fc06fb6ab55783754f1dbab42c3fa43bf9c63055f258a17e6eaa0909d2714e6d64c30b8066e8c4b9ff66f44d19cf54a41f1719915b8c5fd1b2dc6bad0c420ae9b4bc9dbe5e29ea19d710ebed9b6cdd058ff41252542c6e76c23b037e021c7dac1b4025544feef064130a818fbd3d9d9e18962df020fcd20481d06e88dbd916b7093ee13ab453dfa20f49023640384f5d6c76ab3ebff07671d928be101f1d82be47a8d91f7b713d6bb74f7e30f7e9a850030b3ca1d14f896f470c465c0565311e6210ac8aa76614fbfbf455b9bc8d1ed3ed24118675ad4b871d5e7ba30827be70754adfacf3fc3faa41fb9624082d3ce3364eb650e1fdf1d2af71f5c971c38f329baa0cadfaedcaee7d92bfc755612dfc0e093f5d662ee80ed95d1a57467afb6e3d27265d37845ae8aed8fc63d907a72f6f4277af06cf29149d9dc690ebeb23499e184494f732521a7ad6d146b91729593ececadb717aaa00fc7a7ac9a50df92b2087695334397c8b0ea88a69bc6f47ddee2889ef4c8943e76b2809fd1401e4dd87a640cddd8cb0f8ee675b21c97b5386038b79d53ad1b21a0db85f4c90df75e883805e566dd0230b9ace0feb5f5b2679a10701410359ac97519bb13fe01b3cf056cbb9d73fa1d703235ce3de7b46486c074ab49b17e3844536eaefd4d196a888d07c66120c989b7ac981f65a7dc2f26929c3e428c918e92d7d023b6d713cf9df1aa40bf91f8941294007014abc944b4b420d93e9ff1e82bf34efc58a438e52aa0e6e0364d925e8373bdba4c044898bf49b854b6cfd820138f7c35b3e5a99136a9521e954cc59b5184d11740b78b0a19ceb25de891e0605be27612113a5db6a80c177d8b0389fb70d3153f1763640c6eb107e04a10619d43349fbf7ece42d3adfa7d3622118630fa30db9642acb7115d3cc64fab7639be5db92c6050a788ec048720947d570035c8cff3b19473106c136ac09f4070877486248002c7cfcd022e154c4386bc604cb32974f74e6f35a25c7ee66bc2a64a5637f73b3e7484f881abfce9897284661577f6e4f8c262a70510382838c3dca3458ff4761f776499d23f3e577fb256d7b3e37b4661d88bfcf7a1872863dbb630fb1c4ef0fa63fd5dbfbc60d4ae93a0cca8bcd796b429a056766140bd324be8ea8b22b25f858e01d2c4a2d5584a37f595bc746ad777231be2e1c26f280f4a06c0c7041ca538ed959e7ea06aa8fe67eb9ce588c2bc987dcd530ca348ce00114a77a870ee7314bc7b9921b8d396e688be69c0838d6695998b0af4a4d4388fe04c58b2368ea39ed4aad47793514381625a07e678d1f938f3401c3de4474ebf8742059d247b9ceb13c616bce8685e1d91e773354a4d9b8ae0127c44bdad813af92035830287fa27e41810d62227eeaf17b51410f0b881c7909eebd8c73d1788dc234c3ddd0ef46d55baa72fe0c88b146ed01062e6b16cb737cdbe8c6c84d008e0686698ea2be0660dc99d3db8a48f8fa657833db16a3e10d649418ea04b44160869a5209faacf014a68c22fe52c05964b16185166529463afdde998a2a53ca89edbb84051b711d9013984e56bc3d4ced3065b0ccad6337f1ec1262c2e7f4fe1fc22bef5fe718aa38d4ca672dd01e0c340562721bd0aab60260a451b8e80f01814a1e5c8327f412dc984f2408bdb360e49ec2fee956bfc6e16541ca7cf31d5cb4478675c54721bca0a0d42635886a4b112feb2ccd69ccb7646a2528d59ac543670ed0a04cc65faa16edcb199692b409dc8341d15361502aac036acaf443c620800505abd86a5487193f8e869444a5d9e3b549623558e7fab91ab18778afd79da1124480952299a2979a5c7f211f93b383f78798a7a6814b12719d8f44814d01c3924886b7e707e092200605d608569624c6939021c5e72190c9de7faa173588aa6a5dac0a38fd72f4a9d70dc003a42c9b66cc509786635670e0283420fc7bb2670c888bf467e113231ea8992e51dcc4d02d8ac58c0190631bdf45e2c7b90832d4fe26626786ee16a62af4b603c79067d813914dd8b81b5d55fe13cbb454882acab894085afa31dbf3fae0d5ebdf01c6a8e472e86184e5e8108391abf7f168f0f60aa40b719717e682c3c730aa4ef636808d54462350187b96a402f25602a442ccb6aab2cb03ddadafe5ccd7aca9aacbe551c8a24a2bbb5f5fcbaee2a8644e85fa0b7222d117cef5702ffb687b28633e9428be32aeee1caba6dc3c5b84e767fa5fc7f5ed7d245910ec4e1f4e9388c647055b288d122a3da8f7ff6a787649d0783770198be05be26fd0a6927f9f6fde2272b8aaa3d439bff9a191e777617de38d61623fd0d656e311ced655fa3c74c46da2bea12f20bf3831fae325f15a44dee59ffcbe3b723e0a6b2f09631b78fdaf1e9d86e4ace8734892afd4347fffd5218dc11cea009c6aed8fdbeae7a35755cf4ee13f1709650f3260a8245be2156db2f900efcd7b993292a0278b617a2c68c01a94a5852cec6b6b4e0c34c8d9fcb96fecaeb933b355673768b8db64ed7f5070cb654755266d9bb6fef9803c333ea9d4fb81fb42ba398a3a51a3890c2c48fce1f881f67c2117f993af76b4b2ad1105d4eaf39eaa92f1c12c79e0c1d12001e72b18a8dff185636514f5cdb9e815fbdd880b1c0f5192a1d65958469ed9a71602761b9ffb55a0f1e2d516c8c9c62cf82bab2fe0d478afdbb25bacf0bc208700ad0f5065f26e94618f4ce1b03c1e0bb6118591afff99b8de57b0a42dfbba7bbc8912f7c3311f15b5a50230348e92c0e1d028aa30e25c4a35d93baa0a3a4ee3857b7303eeb6c02882e3ee0fa0f5ccad7e0e2aac38198de7eca47b5741e578fc86801b9e8eb05e70d626992cceb79215be1e6672ebc6efbb53d37e9408120c824e50711cfbfc15ae0e91b9989b7f884aef5e1769383f6e67c06d8f5ea84738c0a3f36f2680313fca0a8311bbb048ef884ee417494e3b546efe30c0286494ed8b5d4c14a881e6600a3235c5d6c9485fb8e280a16fe710ddabad6096b43c3064b141cb8f64e40bd036196c27e1e6673e943f84da832ec601eefbaba8bb66b25aad22f41b1a834a27a0c82311f814504558511d48b9ad4ee70c296e091161c9d09fc177a75f3a7350d70690a7d17d95241328779e1198261a1199a34939558457bfba8caa2bc8631e1801468df229de11147b41ebcdb2921ec34372efd857d319eb7c4aba086ea98d46028ae460c5b6c8e5cfda471b8d352c089fe546a2e85d12b57e22c58e33a6ea6e61849cb3f82c6c91bcdc965bdfb5b1ff7e5ed3f5b07403163d10e8c03876e94755cb4c9472b27b4a0f611fdb5dd7c20e3c0ed0f285919728ab02fb68ad3eb7db454b897412957cc2ecede7cb35daec49bed8ca272608c9fd0c102a15c803bf909fc3e3e50828e73c1d3bf9ac9b0f6e889210434f279115c45720c00c1b424634f80e2f2f41db23b7a67d13cdcca8d073bd353efa612a9480588f6f03439172770737cff03a1e726cb32a62563e2e91f69bf0f8d550574390bb708e6ff3135c25baf44e7289e09ec954e3b0d750f3531064cccea243f9036976983f27347e80e44780f9421f6d69a5267d90397837c6e011c023def795e93798041d50f9b3b9c58f097dace323a92a08c6ea45d6a7e465750110056f85ee15915b7e938d879a6f0b00e28ad1de887ef72b10530b0d6259d2f06a95ec633aea9439385c51dae195d6112558c91769196875c700a65066a83f5e8a0dc800661b64ae0150e357d72ba9edcd9cfe802d3e385a209490d36f77fa45d21922475b1e1fb348fad0dee24998aa1e6a248980675d7d7428fc98c686b672926a751085b24aa0c44e46334b708d074237a1487568ca9822006dbf61d12884a4805e103f6781eac94c68784e6e4d125b02802e056aed55581d2cf58a264f5a4c4fe849b36301a2485dfe9386854afce4f037a4746bbfd12272ac8a01eeddce31cb3830748155dcb2cfe87ca0b01b86ec189e7452152e1b73b404ddfa21c150ba3531b412f1b74cb16c1c10e1a6e0d9c06c910d90dfb3bb7ccc6f4aceca2235e80557fad85bc619c985914a39d20122036ec28e6e7fcd6bd48733668ed96ecb19f040debf87c6050c591b43c426383afcf6a4b006afdc7922fe0b80f7fa50bfed6edafe9488c809e8aad8a34d0e8609bac246acb1b8e2a34af35dbc6fad598f968e584aad4296c6c1f3e6fcb7494e1c8f252eabf20f765c3aa7f0adf328768249714ca6b6a0c4767a48543d9c6b8491f088eefea43d4134c26cd6c03cfac3e22f242dd5676401182d703e25e31411e0216b9bae4a0cdbf3eb48a824f3f2a759b2e43f7293d647651488fef6e545d74d8290b6d7ead49726be493fbe138a5021b2f9dbc225ff9d585ba12555b8ee19f4c06599adb91d61fa995d7a1c8abb0849a8ea8b60aab1962ef673c86595d37dc8390131639e982d06b700259eaf65d91c4f8c8587d90dedeca54e1755942a6cae8a567ba605e2407ad06e5b59a23d7766d6a3b25b2701dfaeb775d57ff4c554a1708d76dbb906fc04414e424b9170951e7146e5423933e62eb89e84561680c9f6142deb504180af499791ddb5a7ee6ca1915413934f2a8b1eed42443af266bf670fe681344c1331857a88d5944c38af7faa42236c62aff0772a0c131c94592c4ef86f6977834ab78311c6e2bd6c67ac5a1ab5cf84b002d49678faeb605e32f1269462d58f7f6b5f92a86507b151801c463e71a9e508f17108e7c654c55c8ac754be86a784a0c25650ac492be48a23d860062d4c1f2a25291f342834c6f5808df8a3e8f46fdf5e629682d09b97c7d3724d08e8fb2c8dfab39c441dc611016e33970f1ac9b09ccf180ba1047d4d30fd4610105fe77700f5f980085ec584d810269ee3d06fce7f81095ef26622b7ef65d0961fd1ff998639365a09a2595862e69a2fb1b62374e662619f5092dcd02abc83d1dcbb86d0f0f99a0853261bb3b7baf435016f146f7e07bf9a3f35d605b409cf2cf711e2f6a1096fd5a0dab2b72e74fc1d5afce511b35c98c2c8da9a12970977e6d4a8e7d8fc5f22a691b89306e130ee1c2e517804909244d675c7acd4abe02f781d5eaaa9fa9c729b82e061bd513b04b806bbdcac1bbc856d39e205499502f7bf28d7977310a9a4c9aeab286dab83db8771e324d38b705e656d0ca04c51187731c3e590dad3eb95af4699076994575c9b4bd175c76b12738b47a66c9cdb4c3f9a66ac7ea721edfaed5bdfdd4df474e36d006c6ce3d4367b3098d9a2240829845a6a9aa54ec8f64cbc59891df66269da409faacc52b02f626da69bf1ec4e344b644428b9400e972e6e85806c3130a61208dfa5095540be8888c6f3fc4b892c4594386413b410e59383ebd173d543a2e98db82bc2ce4f2acf610aad507f7c6f92b0dd2313e342bbad8dfb71b3a4827d892ff740eacbae81f1ad01535d1ccddaf960bf6d1488a3ec18ad22b45f9fe8327212e024eed4207fa0f5cea64718fb191070598f562dc060085db967740cb0d5a26d4bd6ea370a243c974ea22ea6439dea0465141e3f05688f0e048a5b6059787fc11465553c4151720745901fd11fbf4f7599e952c2a2b73a7cffd119b3a0c283c00365d58b1c56d147b2482de5124f3f4d9152fe60da4c7aed7ad0e516bb3d65e176182a69b51da12694e9c4f6d4b16b26bdf0d56ba67f92f9760835230b862901d22bc1df129c9d10ee48978bab8c64bb5791458240b9ac5e830fc74d476d6ef98c8b9cd5f02a1aed497e7b53743ad97b73fce323b7aa4f592a3dd49767e9234fa39bbb676a80ac2257e7439c3f21eb0104e84fed2d563b6b391bce9c513a185860a6101f9d4afd2d3a8ae1bca79ebcbc0486d3673f11f369e403b1e8bb324ae41213a5350cfebc844273a1102f7a3e7161563d53f6153df035af3e47603cc759d8bd167f4823bf63a8c7aac14f82d5838f2454afb29b6ca2459e07afd1581d247397b6b00f0eb339c1499a131c87df070e78887b7a72f35f1889def40b1d1fc861c84294f5a9420b49bf659654979c86aa97332845fdf3e83da8053e5645ebe328da9f0bb5922f1b382171297ccac865c8c20fd9a220ba5a78f1e43235f6b277a47f4b0f6045b88079e5e28626ad716231e204566755ca46716582a61f0bdf9115b0a318d9e6d762f372ee2f5cdb23829740376a5417bcd22c01d3258dee163bf3702fe75fc42e27878f48a6d477fbb0f3fb64c249e4aaee9f317eed6655089fff965b842bb499b230008ef56bd62f1b2b31a0873a15ed3c1e04be31ee0c14e6227ed3b1facc0b4c2bc0317698623c75cc46acf457aed3a3400b840776460d346ffc47ea07a45e0a6b3ec49062187da726d29566ccfbf518d0894a0b3cdd99c0064b6b5aca8b2942f71fe9b87d56024f0ed1df1d73ff348b86f768dadd6de4991bd07768239d48cf4145d46fc29071553d47b2433a961d776e123d6a8b344560d7c695c1fb1bb79195a1611052c7910b1e1fc3ed10cb2728ae5199ae1a04452cab1d6af8e76b1a6cf960d842875712f33cdeb76a18ba594228e2701ad64930315437d1543e1a5701a00a6679060f3578cb567d6aa6154990348887d6ebe608a62435e82132253ce6e1e196c9b59e2612a4884415bc5d3157a96c97ad30636a89894382a7d64d1cd5ea171c4359987f3713552f49245ce5ca16e042fa50fc7db6a4e81c34f060a5e5afe8e12103b6a503f8a3a2dfb14b647ed829e16d51c14146c3782af4dbe19ac58b21b02a875d054e6b9db184657f02de52e2c00e08185690705e5c4649accb9e228001d690b22ddb4019cc65b851b509e58ed3c22ae2e34937e720e055b899bf2673f80cd10b699b785d3db75d0207cc30e244d557ae17afb29a5646f12568f73c57d2702453016262a2616b861e4aa26e5be516d99b51e41cd9e0e5d83a0d2b78fb44379b150b6d608e5f0b7b1f1aca11075170bb68aa22cebcadf9b654ec11df20e710ccd1b4340f1221045be055ab3aca9d4b54a6e11a5f6e0409a922d036c673dfba7b5e428e8af1dddfc475ded69dc133a83ef96e5d018f9f5eef16747f2a08fa81fdb088cd109fce616d09efa8182a1527436fecfdf58b932f7e2a10f7bfaa59c29c438c35af956d08e9e1b4b12cab0d959aea008f314c2f4a59ba2622b5eb43a33bd4855c14271e1274bbea44cff9e0ad0038ec2f28ecf48c083a5e94b33f21f6878192c94dc740271a8aeeaae9e86990b47906d3131076d90ff9334f02e6f6da8434f17d207de58ba14b0857eabbc31baf1a7550df646aaa780790317bd0237b16ed14ef774d0846a8031ce18cfd6fdf3126732be6d61fc53c0f4b521923a4a9ca5685cc17252c4fd35afb73ee41533fe41888bedd8e81262e7a85ca87862f5084271813f91359d005e35a86c68f4f61191e3e999cfe70932c638d74c0c2d0d27f5ce6e80f6ae455c6f96ef29d070ab22aa5c6a39cbf80a8441ab8b649d3a20045e3e1f4c0d998aa0c2f5e4430bd6212a34b19737987529e3557613a666d2300f2ccfb872030a178635b69d146bb5081298e2d273d99c726137d31ccd1104505cae38e14112432e937ae2efa8c962a6d8d28ceb61b931dd143283729509e61c8698fa63c86da10f6f72d3b065286004b9f8db5c57e4d9a3395931e91fbed26143934e5dac0730fb4379e47cec2424603fc7fe23d68d3770c1ed214ad192580059c875e143ca3bd9f53e26a42e4c2cd389424ab12ada83b25196c80c4e1e41a5f72744df4921e8de9e264df9e2fcf04ba7205d24e1d73af5d11f67d86ce86d865c8a70a2824656222b438a303e3c117ceac86e985080d45b46c4939ee6b8bbb4e21400d5299731c1f3020cce55dded194e7c3d086b0820d1819e4f501c561538de13c28052a95527caf8a5feeea7dd3f9a721c4b1d2cb5f8cad46d110b4ccf2b74d69c0a5dc18cd54802c102f921b65fc1efb1478988b26bc300abc69324dc87d148f6c842d46b3eb3e866a9f102188b74cf140219eb8b4db4021e5a5188cf272e30fccc2037a0237737f3bb23941a2255b1d1ce2b46e50539c284bb2906fb47786a4a9496946febfee6dd5dc8f77a1a004f4394891dc74e591361cfaddc17eb9b7a2a37d6ee62d530754887e902535510f5d24cc518b15ef6e47ed4942b3d6add868ab5954f5ac892a22147f0005b87c50690ab03138c718474ede6567d12a983a00ad0a56fad72c78481c80fa1a95030e398bfb8ee05881a4708e6549c21cf2ec96c50916f46faaa7a3c9e20a0c2d9a9e84dca4a8697fbc10ddef46899385ec65e22ba408c28b86d7b514c58ee833a233e53b84f305b44a8dbf737546295b27dd93fcf92ab8fe276c0f3a18a345625e11e0e1aa142a7fa86742c245aba0663025c9f18658fa179263400d4987a3a347f6a7a0c20aa94aa2f620e5c6075adfd2cadfff74f5e4c9bdffe66c81c4842d8af93c2242aceb1fa93c5f9a760af24222eaf32e05825ca12764df9cc87fc5ed12e2e1f08630e4b4c182bee26041be70a8fe0f7d4018086b9e257c6ca9dd9bfac67932400afe2f83dccbea5cf45b5578909b3fa15c82acedfcd0dfc647799b10974c68db6846c0613e7e15b4bc81af719da52b2e2c46913b3a608781b75bfa4a3c5dd1885e773ff1c5c32daf3fcfe52fd2fc415ef7a13b7b5529e726674389a874e768d43afff369887ca1bd144156b9a7f85f8c3b0e032ee42ef3fac80d7552d373243637651ed555708971bacebd5289947e789c0be0dfdc3ef9332052259a028eea9febb3d11ef5a0c19fd179771f57d700e1b9e820ce5494f86a298c2d1555034bc5337e8e5ee468b756f6032e6f8223b8f2eddcc2ecdca926320df4217baa8393eab2101fdff69102ea0324d01f245cc1b5327ac58c4592aa2c0dcc2dadc2220a5c9d25f5a21e8b182221cbdc05446496940a85d48ef7de27159dbfd08c454904d6fe510876b307ab976561e628b0266242690312101e197d9b3c079529b8d005ff072d8bf86ef063c3fafdbcc2c9f5f1466789a1031fa2b45716b5ba788f242718f5f37d5541b0b00b51bf0b1d84450a3b45d996a90ef923c6b430548a60a2a19d3f7b811e9bb830e4a2db4cc73b73f8626b4ca8d9a7fcd3386a87475df5cfed805185a56cdb7cb0b19cb56de9aaaada4e737016d8ade766ec04ea83f9d60644f9d0423cea0433c7415b1ec893241e80326c0f54fb87349b0b434e23d5b38ab4a40f61883c5b3a553bf4a2d2ad68739c31e0483aa1d15f13934b45cad20e050d7682879ec28062d82b58c8fad8f05fd11147685de03ab36b633ba7d12a6931aea847b6200010d890832a6b9804795a686b7e5035192c9f3b6adc68895b835b8f34448da50f143bdbb2e3500072a6240e386599ff8eac15165c9a179f5d91dc1dc1e90721a0c0d5603735489bbb2508709535291f56a378a40ad3195906fc7d29d667c610cc80629f1c95981c393fe8e496aa34b2151b66913e5ef0b165041790590c18dc704e926c4553c50bd90f378cef8613d4056f264139db532f1c71cc2f89a755a9c71eb0de71d26386fc6be746d2f65f14553fd8491f0791e0e26c1f0c714d3331b645ec4140e3c2b7a102517ccc26ea2562f11514d0d1a26c9cba23fbf870949b67a298f8c4e66bbd8c09c755fd110410b5fb303adc2f567ca28f4df631be02279ff6d42c1f88d7954cbe57067c180f7817ea2cb2c60b269b52bebba8ab5cf83e7a022786e09710af82f2ccf197ca86fe84df0301f307e1f9df56c193a91a6e5199d2df61d61dc652cff078690112b4c9e7f51bf03785b3247f60d429eee5e710c51b6100096881e30684a0546bd45673f7425387ac67086205e73af388f4f85843bb1363ccac34188b20cb8e7495cbf626d575325c6879efe5086063d68ce29b5f85d0a9b744a511e7497a24fcada529371bbd1307336877586582e7a064f01613fb4e56d66eda2e8d55d4691fa163dc3d8f286915ebd0bddb238897c84a2aefe602b11660b46369fd408cff16f8025f62e33ffbe02a1d7bab423821d443bce4c83da3e9699936a1271a86724828fa670a0a8e763c825a6011bc7bfba3a05c26fc4553be6912e524bc7b7aacf6d47dcebfc0fbc31b806c1c16035fd1a85eaac051455a1d47bcef7ae1cdfd7e9ec9d8340124c603a527e4ab8a7d96ad982ed8ca4076759acbac57497a73a220ba78cb68a5079f03d9dc9aee0d9ab333dba3b5548688d24fc2f1d476c83dda66bc640e6a50e71dfe7588d62412531e3d905550da321c2eb7702a019778866015857528915b8b6a682e9bfb1a0b5c8c4c9f96fe4f7ffc3eafe718590e3fe82b0fee058f0cfb3bb2606bd919f05b5aacb754a591504f8471919d6e6dc84a22539dc89fc5bc706fccfbddbbb2de5ee39f7a766ef363287874d58e993332fe20338e3b29197e03d5e0129bb78f6e5ddf4ad08d6ea83fd222fe793c94ecdcd6b377da70850995b92d97efc631620c1803150e08ca1e3203d8b3afc1c18a4d959a95216e9cf3725800188dda5ba9826768a6041dda8e45893801411a091485a39252082acfe2ec344f4aa86b619877e75463150c52a2c06d609037dc0286a3da74bc30cb2ea4311a2217f19770771f5260d9a4a4aea03775ce3810432b7ce032a3205ca902d7ba11012a15abe70089cd3dc8e60ef0a3a087e41ab1f1747151616998955fdef5aa940b0e847d7621819b01a6e8fd338b45676fe15d593d3f533dead3df18ab91b162663cd2061542837a5ff8595428aaac4ed804a92b2f4a926590f4b144f3d01ef54e3c6b7fb95c806a843b6f2401f0ce3362ed040b9ac5ba23355bc5ef745480d6158942e5ca3c6997f97695eb37aa6c6c0a7cf743a64c1809a5cf49a3ca9693ad903f1c59f41965a171319232e26938b9881ab0de9972d8965860b5fb6d3b24b7964dc751f2c07b85876ccdbe316814e2eb8124870994c2d70a2f9c602f06c098d2e86bc6a555e336c2d2645bfc72d3862e3dad1748d1043418552a24cf685841e1f99a149b121cbe4627c27800a921840163e606d68802174b31aeaf2598cf302a1ac007e42591583ca3a52670054097ce20b9e6cc7865c789d6498dceb80b786fe5a25b4215572557cfea369cb276854fa20f47ee8bf566d8653f07d6c9b3d2891db9ec155c6135886865e580a34f5881ff6a19c5875f8cf3418fc2775639060b5472bd3c1f07d923a83e396407229c3d561948dba2c3e94a0b7f0c14885bebad52e7c10151474a4e2d0b157906ca1c4d2108c8f76ad9002b876a8c1ee3fffb0b902daaa4ab68357fcbcd1c840b4c67403a78d07f400171b548411da2e8e36bb23c93c06066089b214c3aa3e5797d9545c0036b8a6293c1c5fa29934bc661b692f8c6353a1e524835e87a4e2d834015bd24e62a7e50a5c05d14693873dc1f890c361287876f6bc936561c2e29b192c956f3d72af1d32d6d38504df1a2dbb3391b59f23500b2603c869cec10b74e50276caf948e3c3666201e8f4ce29be5807b86754c76e6674ef284ddcd1a393a402284fd3f8e844d4bd2af7f64ab899cf1c2ab7b58df854abf849dc2ec118b8ada1859bc5a5a0e28d636b83eb8f5a7cf46af5b74139b269189f3200e029605fc7071153cd88c7b20463e96ca1efe2480fbf0375a8595ad604dde6413de5077d37470b8e969c327a5759132681f12890e158a8bfdbffd42bc2d2a637d36b20083b9aef43a8d9acf5ce8f78dcdebdd07483428af52d9cfe90027ca6948ecde4f2ca4dae4b8bcc5c52032f59e2cba58c24403a4437ca57865098ab7c9a064ed831ce7e4c5ac33b5131ddaab44e3d8e6715f23a6d601c8db17164deb8a7d9e809f8216caf139f7aabc5facc6b08f2d568b44f84d5f1d1130699b37de5397d493823ddc1c8b7550d27fabf3a480e42f4a625098148d973c9b6bbcf350274b10188c396563e0caaf3c5eba6d44b0a892120f679b88400b923de027a77f9f7372b758a5cc309d8fd5e58909f49b8d2870cfb93d358a71f12c96a48427a6fe8c085e64d04b6d207fd54b7413c039075a8bb7dd78fa9b8c3312e0fe7158d29229761e58a103061eec6df5e07c7f1a25d3c7815bd0ad4e14746b201e60a02ed656bce9433b49f0eb7a0c18edad64213f5d12b9fe5b580417707a637b2a806dd2161f4d77b3db2d7ae9f6df970a8fd8a78dd84fa9afb0f312c65dc95ef0eac7e526156998c4f1ef4f4f6625ba38fe0dd6a603c63f07cff632cb6a29f693a741c19d969c76984d0a2120d0097e4817bf3230eef13b300dff81db9f23a78c41e52f97cab4506ec4be38d2a47130a2163b4639b716c836926c84d6588e5110d0b28a597c784480a0f5e0f946f647fe2cf1d250412675202e13b01f0f813a6a23cde528ed50133290a99b81601078c0d1d454e6f560b5c95344d4ec95be51b60547e184c84de36d08bef6d63ef31a18a579b0be45e5fbc524fcfa6130dd173c218d9b59e8f097e1dbdd81efe5741dbfdc2329b3d78e2798cad84c07be3199c89b06e04c6ec27b79d979f5ffcff3e3eaa5790de05f71351b3402a7fb01462cc1616515339a7b1a61b6c4b79dd14dedbba4f8d16bd08ffeb732d3c0d3eebade3f5d25afba43710379991906d391a910b598dd0fb8ae545a64804565ca02e2473fce61089c6baf1ed5b1ffe70b2fc270d9cd7d37fd38939e6c351e48102bfab3b395d141ef5a63c8587b4b979d8ce4bbd06ca0a81575bbf5a52b7f0cc3f105bbaa84c25671e0fd43e50456a1b3991e2eeda12bbe7e746487f6d32616558f0d892b344687a5db3d600dd1b8050b3c5aba9b28b9c6ebb5656e9004bc0cd9dee81f56b14559102d62fce0c7f25fd6e41df05996c465ec00f4e4eed35beec27e3e8a1a18f218621db9b08c6fbd7933d1986e1cb97e1c50a98327b90d08fbdd1afc7cec764a34ec866121a08483bd178af11c747c37348e61d1f6d8502b4fbf8280fac00e72c2211b6067b8c5fad3dbee8eb73d66fdeb30b8ad9388e787a842cd903019b25bc406a382ff1b86424a845ce438e66d77f37a0be80021a1c22b2ffae60ba80f106381f057ceea72b7b058ffab2c05169144d9594f04c2f0ad6cb1b9f5ef463d37fa88be00f21ac1926b67d4014a6111a0561008ebf4dc8e7767c159dfd7c4fc6bc0971e85eba605d362ab21585adf24b462c4917f689bedf4954ceb05bb858174d5295597e0bad44c879a2e706a080fe0fb2c25fce7af2d5efcb0216f103fd4d71911b1ac10babc1774c7b4d326e5dc8ef3e8722d4a8856956f407cd283cb8c2b4c590310eef4e01901e4723e97e623c6d84ca00760a002f9b3c08406ce566cabb7fd602e1fb091ab75718ddece22481f8b9680483b61a1a518d89d5dad2be51add4c979c0be31d93d3370612bb959a1a081e95e06ee263c86cfaddbf8a38d4ce989c64132e588e953b240f2c455920a5aa1406a1851dec42402dbaaf7b45ea3b3c954ef018c6f1fe54e580cef31d6e9f93f72b4b00918ea688d9cc93f6bd42f70ecbbb49663917d411779a07b71034202a113372ba8cb4d2c0b80a94569a3fad08fc6dbeecb17ed7a8201eae762ab83d9b7ebe7895f4f40137f80af8f00af3ad61eb4ac277e2002bcecb30eeda5abb0444400ed6317d552e0fec61a82205b6353f01a2b71c8ed4b6bd8ace186ed97c745173afcaea1f27657b28e894b89e73b6b67337efc61e54d0e9d9a3cce46166b35453a704c9fa55bee46d0e0dc2b571709a98a7ba6000c80e62b5281b78391b2082e37e3905d82b06c026f72c25e8d9874731bb301b2dff60310b29401dcfb81012a9e30c233f1943814b90cf5fc50adbe4f7f3e36b49f0eed79d105728008ace0d2a700a4a06a7e0ced6d8d12c3899b2e25485bfac89062b5e0c034bb76814915c53a3f5c9dc17221e5566ab047d512374e34f47ff6640d07921940126f0d68fa0549b59ea8d3641f7f1ecba06fa9ac0d920b1e76e175e50f8e5f4486f28a35fb1d91303df43bb045cfe1ce5d6377478769b5b01adaea042e1c2e54804196c418794f0767453cc0aceb4b63538c1da1754a82cfa015d73b99ddb256254a518fada7223499950f244c718200ccd6fc1d23c396b0c975f7bd6fa975df9759a9c02cdaae39eb3b9d1d5e40e0227a8cd2272e1541abb871f5d50f906ba9a48dfee2641b08d3b0f244212255d0bc6e137b911874ce28f27fa4c17c3b851112701b7f32522078ed4471dc27e92f35601d677b89fa4ea8e9c06f5193e1830c4c0ffb43a83511268ec3f2be51717a9e0e383c93a824fcb63091fc92f0601c4af6a3239fe7b30152988a86befb0b909ec173da97bdb6a8cf27e212dccf80c7415190513ea7a00da4d7aa575222badbcd2bdb199612c24b351b78be7492a4bcef17f84ba5be83a94373bc2f59f865e390039268fed1e0ba0c50d65bf3ace85b005202906da79c2aeac29022c8ec2f935e054892a6a91d2acf39c0bed8f476af8f87bfa674af517b345dd7b8ecfe727048867e944f68f5c9fa8443a850f93ca079eac0ddf13df11382a368a3fa5fb2da4486cef15f16f045fc0cd9c22984a33103ce56fbea729efc6679eeb7e85bc9dc25f48de58ff24057b319ffa2db6efb930da5e28582edb3a771e99df66325cbe90fd00bd5ca12789cfc1d8e8c400ebf2b2af74879960347f46791a72490acca44ad119af99a47bc4b02ae86a878de0bf036b17a69936ed77a47ba4aa5ddec477fff46da332041ff1d02a315b368db90bab1c235a4bf513d562d0f1627d318f4ab3c62f808da58a558840376ede86df34aedff1fd081ff980556b71c14edac6e5bca560978e5ae16d9bc7ce7042336356aa71b824c5ee2ca106b9868abb1b9e3f39b9e796ec05a27a3ad377ee04942cd64a2abdf7b8e33b5cfccb2419833c6290c6df3bf2452aa4e426c87e9bc11e389ddec676bf1c98d26679eea2ace7d3c74dd33cc7fb36e81579d9563e2262d7e14d4cc60818d564eda381521e55dc70981cc8d7a5b981c6385d2dd5c59869658984d368f4095befe6088f489b5a7a82b4002ce750a507fe758353e958680e5975c901434509b813c336774bd220740196f7a65ba5bc759397b1871c6f547286968915eb90e2ddba3d0ec6519305b942e4e16bd925eb8a2398d8f00b5d602a5bfec6d2a1b50303092263e23fbe3f0d2c0ad9c39426239f60ca27610e9b0aca2460ef842ee72f4371e96f7574a338d54bbede4b8bcb97f2b9cd737b0a4b39bf2197fe4e897c4a80a4aa9d814074ce3d419a2620fbce2c8810166011d1264523fd4b618630e60fc4d84b96f5071816f17316596dbbd949615cfbb0c142d4bc32b4525dcf62d632b62e044b492a77e7c5541e22593d5ce7154361c5602488a519ed15fd700eb3e8ed0156878774bd27530095fc98e46b226e1778a2f28aaf430cc149a44bbf374bed30bae81d7e0bf349403aafe58c22008ce067e4b2714d10f95027662e9b1932fc972dea43e5142e64d5d2b94b034a919645a18c4d0348369c3efe7086a76866d227cd7382ccadc08aa9f5d86a251296d92a4241a2812f011c919c9497094e42f960c0dba5cf0f6817f37d88afe9cd6e7aa350b3cf9b3a099cefe88d37434d875cfa87c2ec86f4fe145b0c9a470a5ff735a4ec865f2b580f178f51dfd054d430b680aa11f9c5a11dacaa07ef971c114f42dad8e9639348f70a88994d4fa2507847ffea030088cab89e6d8f5f31835ec40b781b84d61a819a846cb641791328abc980dd5d4fcb9f687cb0df91a9e3b01819164694f40ea02f6cb023989b6de30cfc9c63cbacc4dae4f5ad4ef289a6c8632736c0b3a82c49ab87eb6129b47654311bb06161b3fe53c12d679b07811f98a7d984e7fba0f20f8bced8d5d964f4aa804e6f43f7a8f29b33520f55cf3fba1a61751303adfade4ef51917d701d7070f65a90beca38be32393beb90ce71f0cda466603d601f0d143e1eacc8770705cc4b84b4134713e6b1d48174871a9ad384b9a0ac0fc52d0361bd19a3ac3b1453534eedcd26b974d1bbcba94ecc5b1d0051619ec5e4d62b325aec532d18ccb22e689acf21852c12898c45ff306cd91881f5b3fd56cb9db26df7f872f890e4c928eb5077a0bd0469e339f50578dcd1b771118369dce3efad948eb4174d93b2d3f603a3016aeb3cff69a0152beec80260cde047b3bc522c6dffb4583ea3fbbb9cc42f859db67fea8d7a0dcc4b7f83d21de6dc4c322a7e276ba7eda8eb6053eb37a974dabec406191bf0a8612d8c8e10a6dbf7ea929d1e5d10b0d356fb9ff5a68aa9a901a75fbd467cdf2640496d81da8c6b2d0196b8f20156deafdb2238b0702b1285d91a062ff7577c61e717fbb3e04ff72ee3517cf22bb4dd9238b3f09364e7323576b7270ca4cc10bd40352778b9adea81d4be041f8ffecba5d3cbbcc85593baa98eb2306cd81d1e30a6b7d8f5898e3aed429debb3d6f90118b8d758af58db3b2affb08d0dcd01541f3601527e1c8539e66256a74de71c408f764e83903ae001733fd935116867252bccb39e7dd1d2ce8dc0d9ea51f209dc560ffa70dd2a6653dc1c54372838f292da3f1651b635faa9d467527323b7b7e0de097ba02a6ad8982af8495c11407f03a58d5f4a5eddc88ea798f736ceb17d3d5de4fdaddacbc13fad717ccd5da1935a6f99441d24ef38c73c3d12fdf45c871cf6c68f8532101f3ac77419ea3e004226bcde5ff05604606dc7c1cd81c2a8392bd055f60aecde4fb6134b678b0b08a10a36099f4a03d50e5e243e8b07ad409f439d571e287b67b94183c8d105df7acf18766849e021bedd89382255fa2541f67b636db517ec05bdd1d1a47af627f4d4622fb18adfc88cbbc835557253663542f295e006788e0db77c7e95888fc07d9c58f63da098f70029c8085fb7c02b118568af9f5c69c9e96e1d66044a7c31627ead3796621c8e9555f4725eed13c924ed17196c43b5232078e5b4808f15c9dc78d13051671f70e6d88350ee015550ba93ff7648f2efc36ef500160920bf009b12655f6051a5061645a1f8ae4d7e6915177ea4d143e452c3d3d485889707a0a86452be417a5fe447f4f404b0b4248ec2912738df13b5d52a452d0801a88cd3d3c0c309f836dc22edc2145289ba31055d6fc2ee8324fcc4864c45ebfa3fde42484d665501533db52fc48bfcd5b3ccbb424fae030e2c12e73ba1803de8be5b1a9cb9bdd6f8de2a4f937432e6b45942530e8ebb84073b4392b90e5c1429d75640259e7aaa9d3bdbac5bc419a050ca72d9d0e05ae4826966c535f76208c2153c70db828037ccf720f756a2a76fd40fe68c7f55a45b03cd4e128261e60bf6f7203521e5a651d937d215a275d7de733975934024800be91d05cc4f3313593c2db3e33149774118e0cecd8ee7fd0c432b9c2ab76849e56d7729f3b63d5ae384284225eb03e6da906b53c605004a97e983a28c781718b35b4ca9a53577a92cc0d2bb42601196b76ee4ffe45a16c7724964d0700410af9d8f838b2c0ad2cb52190586cf1619cf5855047903bbd519ea985b855bfa8dcce444f71c8e6fa24764010202b9c5cf3432457d1475cf124af8f635c10be4152ccf9171af2463da00816e0ee261a1ef2f511b8955f39b808f8a9eabe60e9164a12045949a8e865dc8a8fadb40542ef685dc10126450c9702009195c42eaa43434f78356d7a61f91cc6f9e53b8d45e2c3a77a9b58b067aa69018486ed0f9dede21ad221a342b1c64969fc67124d600bbcffe8c8a1b66b113ddd173ec9444d3cb11ea85793c435d7daf3acdbf135f34d51bed882814c29a538f82ed5ffff063834007eb045f231c83a2543e1d2c70e23924d5c234126891c959e1335b53ba80cba50afbff22d2ff0c628333bdb15729f23daf6793b2bfa8011542fc4da4a2944ff5bd6b12762ad2a3c9b998a0f628cb4c595e387c2c06291daf15eab5eca9403479218664d74b2d78099b79dc329d0e32cd76396404f238c235501113dba1eec009ff4bc5914e2b06e65e6d6c306442b1ea8849522e70e0f4ed0151b8524614eb0cb3b3ade4bc5f7a8fa1184aecde17752ce97e9955f206e2f30628298edf8f69b692e8054c14b34d2e759365e0e058c6e51d96716e3edf9b6c3b295bf203d4e1a9e01cd9a7dffc119bb02a21400a082fef9ac72b2fb129549e043113594ee5353c0149c10008854018f13cec1ad25f0d5e5e3a1082c93a916dc827cc42f165c45ad6745b072d9b255b06e1da63df4ad2867e59609315b9321857a97d2bd2b25d80326f356022c6b81291c65b26688fc0206b3ab43145c4db7c3bb210028d2904a0a1b9f90fd8b086987d91cce4c6ef7ee04183b66cce213ff575512dc47fc3a571b56f531fe95753221359dc1b580bb83368fa40002a2b3559b54cf9e5fd36e3d0f9affd99afb21b8d9236b4dd6e47f6ab2781837697ee4023fd7e8be6fcb788b02338cfeffbc2ce914e263239a1224704ec4b46293905a748cc3f21057f83d95dccfe908e0a4cc06398854b2c5f0a1fa478b5b3d7c05b6d7ba4235eff7313f38a37970a6c65917ce82ecdef423d29fb88e90ba660cfe056d42d38666a057131d4109ce1eff00c3442329a9018f73125cc21d5a9370aa8d7852a075215a7da371569e1430d18df42e8efa755df7db1822dc19cc369210eda084e0ea6a0cc8bc1e4350ab5feeb0cc751e797f874544e6d09c17ca8ab2e15b241ceefdf7599a1760c9f26bfaf399cef1b902495eb8ccfe1929b5019170f165720410d6834d369ccd3455e746ffe74a2e6b2cf229c161f3663241ecb90bfda2d904ec43ccbf6e91519ac463d4bef157b0020a42470168ef99f77762906bc41c7c6778cd2279f442a1b7315516eaac5082b5afa6b567ddc50ba53427f815dba12db643851058d617dd649d6391294ad822a7be7606620289426f412edcc6c9eb928afd4d8f2c175bf9cde8fd6c9d0c504c29e39c6d087d95c81bb46917a35169d3505d129f6c0f4d94474418678718bc7c1b49209f3b93ed18a6a7237ae7cae6f74a298dc8c8b605160ba0e6f5bd394279a7e8b533c87a6954cc8c7f58d2e2a93bb71f3697da289cae46e3cebcc5de222b00f2a56aa0cf4da44388342b4718acfd1582913f9dcbed08a62b86b3cbacc6e3131e8875ad2bfb8c5e3605ac984f95c9f684535b91b9778c8342d66a94b6c18fbda44b84141bc71c5cdd15899893881a9350c67dfc6297eac067a99a94970a953231a7a7c88a234ccafabdb42d4c1e7aea07d553f844e20754e577822a66cccf396df93c41fd39eb53c3e2e952521480c95e955ca234160d00f94007a179a113d816e899e4083a267a0bfa17c15fa333410ba197a10ba191a0c3d60a77f72818a7d72838a3db9b88afd1bfdaae8267450f4273456f496a2cfcc05127be6aa12fb77fb2af4676828f468b6ffe50215734547d57d80bac373bf53ec4d327761b3296ff68be2e6040fc8e781c63d9d19585787dee711adef8c345f793ff163b38d32942d8256f13202601c65488e39235ab8feea708c13ff18bda5fb3e3fc1e2e3cbfd9c096767de664b644a2270a40c47d8b9490c4568837cd7cfa388509e1ac2ea149a005dd4cd1ddc194f0c1675129eb9d8f9d2226c08f8a50511261139688a5310fb353938a5d8e0adbed8c484747848d15b0a3ea48399b0908c4e6cb1879c5664c0eef81db240db17235b4b7e09c8c7fbbaa6383d4a681938662df7eeffa29eb6b7224c7bbc8ffbb1261da07840d269a91ab5e7f9b9f753d0822d8c7fb1a4c0b5af30495eadcf246fc800ca1b81855560f12ab078a9a9acb53f6598ad25e13080fee638a5600da006a081af0b0834bf6d11dbf4350ba8d6e99562a104dc9243e0ab16fea1343d9fa48ae0d90800b7f1188a4c08a46090402db3cd3b70fe897e38d83fdfd1080ff26cbe83607a610d6a31a229c638d1585520a68c059a19ed84f573f39f371a8379e2ee6e1666682025000c2aceb5f5bfa0962129c8f4010ec16b873457620989d23002cfab59fc1cac70c6c905631fc748704cd8b6ff92f542155833dbc039475d42b3da8333b3bf1d70e1e10c90cdf5b7ddebe099514ac28746de433d19cd61a9b4fe7a1bd2b1fe5598f1f7b5a200b82b3009374503a13f21dac07b8ae6c139a26a4fa62013796c9fed56e9283a1022596efd983bd0da47ec525d42d7dbefbca03479dec2e1730e9fddea9a4f6f5eba5ca4b42df521b51f0c3354cd7e889864cf1a5b0ed9b1567b421d0b7c06edc4bc6ac116cce0fcd68448dbcbc5648fe3ee95999635534543d2fc67993df8c0f0ca3c2df43d998c9f72989f9fc453169e5ccbf37f53e4a011acb13290a68c039a09b042458c08311100daf8185da0c386e935d88789ac5ce8bb3ba0b0ca4ea7886494d618c14bbc4e2c5513b95f51878fe0e49586a11f60b9bb013bf5014c67c87a652fead9be89279997faec80adfafb1e0aab13236b474ece92a8245084c3084a1e32999041f3121b7e50fd80b3b93a56ca472b3f7bd8a2866b375ba58663175d0adbe549e6f5c27aaae24488890e2b0263c218a0bd29b7e0b767f7d229d5e7ef64ed5a4d1c52a123ee138af40553d92dbd2d5270c37d0faecc0dbc29f59a1f483fb38f2be97e22e4f9928fb0e264a116b4550d6f7e6e9f7cebffa921ddac839d9979d52a643401636e8a98ae178f63c1c1c116ef0cb615b73775fdcca99211cd757fc312ad5b5dc6d29e988bc71f4e6ccf6fe0a6184f50c848c37a7c9e7ace20c16d03bb9319fc7690885a0b2d1d8323d815568611a4bd79a35354b850f5e0590fec429aa305a6465ba059e9bb46321b05b207a806f1e94d48b6f2d6b868f2f06f9e7601cd750fa225d954b9983c8df417dd9f795bcb6d03c5133a5797331a021e6892c6e462a972ec420a6420442d55ee0c8349c17b0b531607e5e734eb968b591bf21d0e43a41f9c54d1960a73629cacd636f89ea2837c667664622c0f36d22a023294ead2ea31281d901910e1dbfb04530b586324a944da76cbc37c4fb53f65dea70508bb7b29e8527df04f3cb35a48a45a8c7c706407ae1b2591c1a4182a2a07dd43672841c4864fe6204684858a78eded04581d46dc1a6baa43bcfcbff78e1dba680eee92e6e5d23b04cc4964d792675f700ce2f44d30d8226e638112fc3187cf725da723687bd5187f124642f02462b09f2ca4ff3c5024aaa2f16cdc998a4c58347f991b06ecc7cc4587c73abdda61e88c6bd14622b5c302c7f3bd1de205ac7488f2497ad16a22dfb448f04a82c2579aa970112f5437d7bfc0ed5ba9e278d962b1a6f61edcd260302a538a1b8e72a9395cbf476ee5a84e040b7db16ea98ae60b6bcaef9620d5f085f48c3975efd6de58fef1796193f96f47ff3efdfe38bcacc7140ac7ccb689ef329b0896d19fe3e2d3bcfd530b8228582626a8d240ed9eeb43e880727be9163286da3520193a5af63de148023d2817b34d9b012bf5ecf8b69982250fd4986f955a255f9112be105943227fc3be5f69826be1651c20dcd5ef5f4140a14b8346d22d26af6edf882fcc5336e32572544cc5716720f90a67f7dd1ad50c34a09c1b88cb31d0d77202ca84285fa72440e58413dc7cf61fd7638416863c30549a171aa2b9dca94dd88563abb430140d5427854fb91e6d8848292097a071b4afdb33bffa17563e5860b9ba3e37ccfb91756f964e04474fb7ae48fc27b8c889ae5808a415db7a631b7a5c62b876830f4d15e618f45c092c5c30a3ee46056433f12144a5c39eb346c4b64905a1d0d491d4d491cb8aa45878f08d65561851d3eb4d2ced0eb39a007b8d3375ab7adf47c1868560a9d44137463c29df81784e1567a22e0ef09cd25c2c1451098717364be39fdc60a3124a8778fd700105d578a0b6bace3a2a61dc3986e8303a22841a8191a6f261d8d8f53a9a75286aef24a6bbc851ea8624f6ba320e54dc024087097ceda58577d6e6ad3fff03992d4f1099ba9af27843e2a055b90bc1976740e9c42ff17312d522e1c48121ba2315a8151aaf1c0e723ed68461c021efc403095e91a66b424e25780e817b9f81424f8447bed092775117dc7a9d616006000742431e645d7f95fd65e24103956d8f9f932c552d7977afb90b596c477709a3ed124727785bad0b0cea5966518ee1d3ade9eee8e8a463a540258c29d32147a603c2923485ce3e9689b3fb358fa9d980484955952602e96269bcbb8bebdf126fd33ef027eb0ed9f0a0076d4cd5df110b228bf9f1e9f4c0521507ea3db8c0bc224f885b68d889cd2d1a0d0c9db89aa29b697845f422ca287479808a91f76d281f19d3b851bfba0bae218566b1cd145173228b35cd0d31718a1d6e1e56d037da563870c97c24b9231bc1e8b896864a5efb9d6e05ca11e3782b58ed79b8df1910cc350382cce51b7f946818521348a807c42d1bffa4cd8f5f75081d3aa5cb57a9dcfa295cbd0855cfd65189e50d343d1bfb092dd5df2ff4ebcddec8143ca5386cf83f46b18b25450c49c81db0b634dc07ba003de5d6d022489ab6a24cb8ff81c5cca886885db569863b6208207b3ffbc16a96ea3a50d84f003e09be5c28ae60d632b2691ab0f8c59a3a2c7a609afbc7dc261afca8b6f12d94274def3b3f3347218517d818c68197c4770758f4b3924bb8c5904526606c23af07eb0ce3d170a380966659abad4834b3deb72f26c6812c8d60339597160544ccd5f3183bf6489eec28abfbf23bdc2792cda4c0bc05767c00286039d33a3e1f03a17472368f3c202134eb07b59c4c5f414baa704106f797760d2581314ffd26f1c7dda8525c0339d240632b07cac804df4d3edd3ffd0f6ab85025f39697abdbcaf94cc577bb383ec5b6c43c527e762f22017d14120f14a714326b10b48dc03c34797aa70e8bf84fcf62f91232f202b4f23d374b3f710b0c378929d562a8ecc5433e55ff3a0d22f4e07ad1269b643d29659b734b4737bc52ee626faa574a130ae0521b0dcfe1e48d5df97772723ff57ce240318a0fb8ec695a32819410f53603cd5a6e348c021d81e3161958fb31e4c54268f87051c9c9c19871c917bd8348ea0e8ea41981c56ffebc8730f141ba1bd56c778aa5880737c83b0792f48e238635ce160c04df56151bd5e4f27a641c11d0b37ffa422e3c789cf2ff56eed24e13f579f91fba2ea7fe6a2ac79154daade768714de98050cfe87d5e0214f417a83edb1927e8e1964a575ec1273705595e66119ee5e92bd217bfd8cb21e2751131f25680f5d03d5a23a89000bed80db91bee64711e92c53414452cb89bfa026cc44d4df3f82491bdba5cc762ed3da1e4ea10a75c2328fc138abb6115a2b3a6135cdc287e14357eebe6c71114ca69cd1e3180ebd67fbc92c9bfb7f2a7d69e34d969bb768c2e889bcee403e48623607e4deb24c907de231a2f9df6e01f6b1138fd8d2b8d755b1f23ee0a4376e37b98c093624e3992ebc9528007b2396680c09c537e245a4086d58a8d85d05f46607ab00add5ff859eca2e4004a3f94f12097248065bdc3fcf5216a93f160b3edcbfe0206a15522d59512f3e2b1d7e2d632d7023256cfae88d7b46d7b55b2ac5bd3dcf4788bb6e015bafdc5fd03c6886dd859468adccda38afa03b04a09fdcb11ce882163a99a66e491979f0033a1a0f8009767af81b8bab5f703901fb7077a2671d36daaacb5d4f8251387c5ac5676545d1bfdbfdbbfe94b136c4e88c34f7f71970ab3161d4354fb67eff85c452f08a9e9c73f88ce46aa3b1ead4ad09993416ebd9773b5ac42893b62dfffd1250b49afb189c482a1a7bfe84d7f4694342b687eebe1f9895144416a303716881645e1fc2620f53e2518a2600ac111c3ab7eedd12438eae424e850ff186bb0a8196c849eb5a526e923a310e5713b202e448ad20f4ba64ae1c12d168e8e52b1de92e1e6d35e6526f96f9f88c040c4c73f04564b57702c986bd394ddb6e718eb980bc78c891be607b93ae07b2dcf1842d5714628d6615b698509dabff82cd9a6e69e4dc2f26a893619593f6df3ee2aa4a2f203488b6e8242d7edbbd3cb171f2f3a9c2a8d852d7c5b16d4c88fd430771b2357850ca30c913360f5ab887fa24b1736fe7c2b61a219a97cd11eeacf57ee61d30db956870131f2e03186547417f9eafd69f51c88e882240183aff28838e318c46157dd0b9e4cc7ec07f359e3ff36a33ea3a3964326b357ffc39cb3a5c80b4465d93938dd2cb0f61d045045c025bc00f5e089522da5835b513887631167ebaa50f826c93b3c9b1a51da2c2123adc24d8d39a2419d0b18b2026e8c7f6d47ae809b1a73a4aefaa4ad60a41efa20380eac6c8be0566d39c56ceb4d652471ffb13386919b9b75a94ce190ab436528bc018148aaf2e09683f831343e7ef4bf1fbb22bba019a60e2e469fb6df50531a0d39d1a01a34abcd42a23391a606274ba88b88e2e9b76575113187971530c51a5418b1424716f0406f05b01538b17db599c9ab43ca155b13eb6049ef61aeafdcef6db6fc2d805e81f58cb0c9c655dd2839222e8949b9f175e08c9cc0f76fac92a146173e75b831f58ef5e0e664848a6174b9f90c9b85cb0ff5ab8db549beede657421485cd4a4a0a85822ac45ad5f185536be9c3a94121346b3eaff78aa62091b242754de579a805ca4a5e4d9c212d4e2294916e68c50130e7d00976b8b04ecf3256ff03ace4c5603e06e9da2ac1349d629764822a09c932a14440fa3af8b57207ce57221adcb52eb137fdd84793216ea582c45cf4080d812e74a3a0986c7bc9acd1d7344f08b52fe28b4a01cd8d9da4e154c864406c6fa9587a26e7f4b9ceb82fca064ac807112b0422a278f66db9ba44c4c13a9c3630f07b1f411dea5817f5001d0880b26f2ff33f911d04de8b3a200a18701a5079ffa56481abdd943d3071e54b2397c88176836f8f8460d60aa8f25c87dd4214f7039aec26985a20d03a4e075517ab8d3043898c5e74ec103e3ff36a3b3098ff4d9851f5bece81d227038fac7a38b3b5b90000dc121a75df84127104b15c875146c4832e1d4e160863b446c258ec75173f792575f493ca6f7b77db724b295392326d0a3409ef08755699434374c690cc201a044d119a42484a93090a1308854e8603c070a36186a40320e0c66e88f1820b08f8a1b5d65aab035898400bdecf7791a46024498b4729a400fad84f418504d05a779c251289427c803c4354f854007decab90802494d66b395b53d808aed45dc751a976fc27a1b55628a89824491205acd09e021400faf82b80aeb0005afd72b6da5a93c0f365ddb6c8125521aaad6aa93d382453bcd4ade245f5de2c81c769cb402df6dff0028f0e74bb92f781a6197c42d1a46a6ad850d9dcb8c1c19193e357a18e1d1ff0e8c1f201c2f7010f09a48788edf8204e1d7edea0d281946664f91290ae69933db20c235d56c44e14f1c93751750d98cccfd80b92eec558a5fa17c54e1a3bc9a6099d4f6469c96124d90a205da62bb294ff05b8a2a2935af682e62203f249777d76cee0acf6114e984ce22c0a9eaf8f9d1d2288f049d9a32353d5cebb0bfcee6507ae58d0d1018978e6421786c032e8c070b33a4eab8e0e0002f08d20160a215d2d9b46f8dc1481bd00aceee7ee3f77c02338fba53b200abe338200de79469c1d469cd50fae4018b203f671590268e570f719e3f118ab95e372f17c2b2139c8e90da0e746e847cf8d20413e21dff785338684ebfc10224408014850899ee7d952c7d11f201080041b0ec2f541822c6176279b5569e7a6042dbbcef90089d011583b3b6209250cc92f07b4ab934c3082e427090b920a550f0640cef24369eba44efa218aa28d472c663f4b99254f5bf6dbe26457741443c20ac54a108bc562319b188fff0f328dd712409f12c013269d4baccf2d0144c14b78a10a5000134ede8387092698e04304e3a8785c1f49c4044ea488890811225d9123060069cb649d64b34d5bd713ca8c11b159a893669019a565427452386324b8754e813283cc28f38aad8a0d8ac524a01f1839f249d933637fb1151f1cf92e36c0376f2019a074c2091d122428dcd88d3144140e50855243313fc027dd7f7a08f89c22000108f849a116355119b91b705f5f28258be3b89b3999d8e3e707f4b9ff53779efb71a665ce6af0933d2b8a73b0a58ea3b5c74de153e1f329c566053cc02334531554502101493a015f02401ffa0948f229a065b671c6a6908d09845a94d2e2b4cd20338a0472a5281309618bf1e8e1452b24790e82322da658803fbb87685830f0f4a41775d37f49e656abb1856e354e1e327c97e45ec950f30d1f3081d0e756a3ac8d3464bad33fe6528cab90ace5fc588370fc58a154890318a491fb53dd3a7973316aaf6bb7eddec2886eaefb76c740873c4ea13925cfa12a25efe6e28836757fcec50cda6bb04d4221e8db33dd6367396a1a2d4b1ebb06e6f463db1a29778fe92420182d298f1e947bf49894dc3e45e6445dac208f5e94dbb3d4fce8b32db99d8bd3dce6450d79f49b277999dc2365020b8a59c92315ca9d87aae4a6572851eea74d675eec208ff428956f7ea4b4dcb40bce8f148c531bcf174fa0e4b0822066607b75993cd2a42f9ac89549ee9c70bc5f3cc9389e766b47c6f137a00f0e55b7c6eb8551c6c1f91ba00fcecddb803e3737bc80e51baf027d6e4c228200ca366f03f4b1517191555f03f45195b28daf017d6c0820d7e802875ce353a04f8d9c5cf334a04f4d8af6829c7a14e8934a659a3f813e343e328a1644463d067d50a79f017d4eb88c8cdf04fa605994671e047d667e6413174c64d37fa08fa9d5403ee097bfefef0df267148311e4eff3bcef401f6f8b5cda2205b9d475cf813e1d0cf23dcaf783298292107a71dc57d087b35c60b5d62218a8c087ba7b0be52539b32605b331c6345f7006832934b9ffd4ddd90719401193f2a50645e820cb1247a0b8f46b3e9e307002c6400b1a98308622c33ef4cbbe25bb164b647f998518d9ff037ddc05595ce1d3dd9a2a972a7a456ae92209303aa8cd6008297240851724efe22231c0a398e7f4a159053995dd0b43089cdc7f412cb2c0a126f77334483772bf055b50c12cc4a3a0118eb2e47e0a6eb1038b1078810552ee77af7693c7ef0545e47eda462f4823776bc1c51758fc61e402297237be37530feebd3b215851450840382348c78a2d78f051a3fdb0228c282c2f3710ac28f3440453f331db81871e628058332174e031064c9c9560870f6810eb316382871d299597201e332c4d748cf1c1ac053c8465c876cc683dacc2c074ccc880fd18b6705603207204adb47cc0490e12d16b79c2030ea45b0e2d23d801c706cbd152a5879b1a0e2d2b8072e38c221c2d5834b10123bbd142cb414506ec86962f4f6c1cd918694123a846112f45aa1b74a82902c6060f3aa452aaaf5f23ca139a30866a92e80155835baa054e049d6e319a166889612418aa055e42305303a2530bd008c254060cef000524433653440e5f1935930972f0cc90814de4500ac3f64db143378323ef8a2d3adc2f42a52bb880c28921d45d61c6132b66e85e5126875a240c8ddb82430eb448cd6e09f222b65add32821dbac8ed886ea9f2647661912fb32d5138c12a1974899570c337ce972964624ca105870d53d0b07c01620a1a90608aa421f005c2410377b08fe2494e962372f45024050e11609b2c8d8ab4c8712b32e3a5c0a62c8db0d840064bd0103847964658a4bc152c2bc8e3bf008b1659b078c9b10406b334c232e6478053355c8171b234a2e2071c55a8582247188cb334a2a22806541cdda82043092a763084511fc8918267449073064e9520070b30ea891c5e70b785097cb7c83992418e14b8e4250c4ccdb852450df2f853f097a5511538b8218b0d59ec12f4051546497c11451e7fa723c108381c419b10e5f958a57a0093395f082e3369f6c3a00b438a943ea97fa527bc94346e128c1040b2c806a715d70a9e409f7b0a1ea9cf85a3bf053ef046f980d7d452af4fb99498449132d5d1b07f29bbd36aad0d5276767676a6a494562a9c49650244a9fdee55365ccbf27f5c3b12cbd0ac8e617afb9147d8861320908b7e47ee481f2fed9c62f7ae4056cb952fe4fa08983807ec2fa4f2fc98a0d56c3fddf7deeeeebb33bf2370bf37bde3e54269ebb464543ffec8d2260bac7220591a85016414450c287b335fb7925ab0fddc21cea234dc71d6f4a1695032497914d3c8230f0ef2286737c8a3a48dc96860800c2b4851965842288dd7b4417e7202296cd0e549920ca8bc4e595c61fb410c9a172a5e1df8418a1c90a901d0131c5ef387b86b09131044c1841969a4f1c414af15c6fc0074a4a50c1164bce6b7e0ae0bfc30451a48559ec0c189d77c18dce537c092c30eb2b8408889d7f4a662a898244f74d16207464e8c31469431b52c74008418b01a40406f2fa0c00c3e70a484182db280426b20c9342181110d0a31585c11a2da5a435c650ce8199302118931a2882aba3062c78a184550a8ec80091939d0000723242a280c8cbec0628444825ccad208298603e3b245092b88d07204450a420a5996464f24651e591a45c125db7fe126fbca40e9c52fc8395f95abbb872d84495a9c48602d80f9140a1e67963cb39caeccb2844e6e8b5f8b68e0fef76af2651113245fc9a21b8a8e7ca8ef7f73b3825da102b5386df63a6b3e7696b3b8e0bfdd6e0d34a5cc200d345d046052f4ea9f41f205412f79a3827c3191ce9a37373ffe2ac897872bf853f130f78f308334d08bd40c2c5f2c20be0401f492af25398431abcdb0101a27ac85fc0bcdee389ca5f55a8ea32bce6768d16f9a78261e5e90200c0134e14842abc69fc231041b8f25ed7190af3c23498e61231479d408c79d106a5a29a19f99e4acfb437a22b578e7cdd6e2bda97cefa74e5f7b30ce0a89e1dea178cc5d58f6f4c8dcf74df6d4c8dc93cc711cf77865c4597d6b917b1ab6ad45eeb91f67b5cd1d7ce9269b1a4e3deac779ab79f9e25ebaeb86d2fc0aa7af093d80fa132afc7116f7a7701e398b7b0fa442e92c1cde09d29f48f3e794996b20268b7beee20662e2138aa651600a475597a089c81c0d0f5408275bcb49b039abb95a8bdc77d8b416b9471981b9f7e6099fc2190bb9e7fe95d0cc12a50e074ac113d636c9e32cce94bdfeb8ef20242d723ff373037d4d98ec699bd489b913d96aa4d944b63a8c14b9e764110fcfce8e28feab5053b89e5dc664f9ec306dbb21e2b8afdfafcad2679e52c74be6de8b019c79d3908e80744d98b7207be873ffe32ec9b1f2288fc4b41952877bd3aac34820dc83abb6491deebf5523b5c8fd8439f75cdf9cc53d3794392a45649547c91ceb8630558723768e730273fd5dba55e3de042348e40e2eb400aaeb13144fee185d267dac9cf19071b97134da99b382f0984ad130b067574a8ac8fa3e3ddb826b2d8269b18243e05126cd50c4ae9470a0b3129a3ba9537b966bf5afdfb376993bf1e48e1125cbdd7b5b8abaf7271d91bbc0efdebbf720d953d375b57b70f5e32c77d262e7410e6b119504a64febde7b028f0ebb7990bb4cdfa964f7331cbd88eb482c9147772277f8d663ba0c100beebaa17f6ac15d3734c90cc67f0a69303d7e1ac09ff925af8e3ecdcbd7ccea868637299c50bf027e9ad003338f1f15e2900567c997e79fe5fac8651f39387fda789901df03333ed39b4856f7920567753f539b365e30f8a6bf6f53c3f867fe5efa71e07cfc1e308dd7e7867e12fca677f0868629e0077f8599308639e94f95cdd8c530bdc9bd0ebb0eeb21d983c4edf0a97b6875bf431ac0507456f7dd6db17b997e6ec02ff0bdeb6a90aceebb9a0aa2a89e3459c9177dc9d02d195a2c022475baf796c0487e1c8ac78898aceefd47fc7312315d1e649ac1a79fb1998289024cc37196e024f2c061085d6bba4c2109799cb51f87398b76dd53941178945b86244a5653386f33e9769de72854969ec364758fc1c03360609392e94ea44ef774e53b489dee67cd5dcec36475df3911b9fb0ec779cbdd4b772072f71e8ed396bbc760e09a1c03553233073a5319e22c2a3cceea7a88756beca11eea56129f9f1be80beca44eed1c2675baef3ce6acee3b77e24da4d87d57248b7878767644f17fd672f70da6d02f0f9ad2b1d0adae89d4e968e830a2ee9174abbffb9f247247175a0025a3ad029353e5cb19389ac035683872e0824701068bcaf8d2448e2cbea0208717b894a5d197299fe50b9437c117980ef8860638cec0388e7248806b3cf150e01b591a7911c241c238b234f27243147c638b02631a0e19e06f0b0a6a584a5043c10d6a2310aae18043033139c4601b5ee48802d718e1d0f0e9ca53c1780a951944119ac10e4718ef0047104e85c1d1034e6d210446559183258a9c22c0b0720230aa84018611385de02f4b2330448003033082c080c920c6839492babbbb7fe01e848b3589d37e90b4289f524a4dd0e1ee4edddddddddda95f4a57ee94525a69adb55213de524aa9a434fc7196a465ea4e3d07e734071fe1eebe537174eeee52de784e6fcc6c6103aa4c22a5366624ade16e1384bb5321eed4ddab3b3db9bb4b49290d8dbbd3ebeed4dddd5dbabbbbbb3b75a7eeee7ebb25e58c0d777777779fd9628bbb1f818fc047e0f9d6dd7d02ee93de94dca5ecdc714c4a6f0e75f71ceeee4eed7b5d19a126d050a4eeeeeebdc3dde90e9fee7495e574771ea615ac50010f29a5fc1c180b1b2b23ad22cb518291f8db21a50eaa9af28d94526a3f09ca703492a50d123c2f579462777fee4e13504a29f59ac346ca1c29dd717c14874a79236f502aa58dfbf591524abf9d16e5bbdbc0354e744aaf9959a2c0f451a5261c21676196343ee79c7336f53967db0481a7dd4129a5b27dbaa36abe13a51fb6f1cd6c4129a53654b09b289512bc316fb7a494f2a9d32aa5a4725249c122354b6bed47a9941ea5b384a3cbf9a4bc39bedbe2f45086cef22d409b93d27e525659ddfda33a3e7c02ea3b593aa51fad734e7a29a51f4c1b2a97ebc1a3c7911a2ced2d29d8a284e5a80e49e029031dc8f9a83e7676886069e9411774904107864e8aa00674748258597cea24cb86029ddda13c04c162b5725c42b3fee8b9112488909eb3a711aef383b3b472f676f4077884824000126c3808d547a597b3542e674760edec58ae52cbdd1fa2287a0f1070951a312795d9b367cf9e22e0f1f55aae2475bc0d64ff594227756a8903b9fa74b68417aa000530e1e49ca5f57256c8723e447cce2a4ea488c92f572f67290fdcbd1c0f3cd40fcc3040e984133a241e12147cce1b5f44140ec0513f80a59ca595b3f7de1b52cb899ca5f5729608cb95ba5ba5587bd83c2127adb45ece56a9f47276462bad97b3552abd9c9dd59ffe7304672bad1cc7892c9d1ee28e08b6d65a6bad3d64358c74d9244e56c44e14f1c9db262a9ba4a4aec662b11a45d658b5d505d526b9ab565b6db5d5565b6db5f54a05e2030400d424db586305905656069e6fad7581b555b44b5811a8574b69b5dcff83da6879cced56862655264040d5c90fb0c22a0c003be89600d0199d512ef4881ea9ba3569e52e750283d1a07e2054e8071a44abd0213a744526838288280b2da22308a0d62a8474595bad1056006efb6eb91f002015af95866a653c090020a52d17adb5d65a7da85ee4b3dcafe3442be7e2477ed485e6b97868adb5d61a7399cb78727cca005cc885aa0c0da087d2954bf1588b22f09e201fc57128411e1444c88d903b718709b9d16508d0499de44c1cc8094002b5f56608242060db6c63fa460209a08f7d124a18529b4b1fd90471d5565b6fed1202d8341a985aad8421363a4b17156df901f6cc1b92a9577b395b6bf5f00748845e01412683a2899a2867d0877b15c0044bbd5aee5a6badadb5569b4d9ae24348a84a0fd91e88d70bf4e10a60824fa55e2d77adb5d6569bbb6aad434811aa44c4643299acc6a23414d1c30eeaa096d2b18e553af3aa990039e98699801ae373bb95494a22426bad15ccacd5906c458c605aafe56c9d5be66c729947475d68b42246666c92c8525464e488090a22222346401fee8d1c3901895b6b2f676dadd5d65a69adb556eedaa419a45ecbd96aab0fd5561b9419a446b9526ba561f5615e91c94ea0b5d6b1d37bd7e3b37fe0bd5704339216ddfd4f356f0a41f10b6feaaf5c61ea62b2834dc3b0ef795fa6d65afb4dab618a0f756bbeb539b27df73ecbd96b3b697f48df60c1d67d7697162dad33bd912518d8665a87e18e4d6bf02685faf457a84f4326309e17f838dc579391ed8b91ed7bc9f657a06f0a99344d8a601f87fb02c3ee42b2c2489625635f72e1a882a591adfd991233c110484b7396cdd2b3b08b5ab46f51409d1537c8e0d4e9e7cf90668fa1fb19d64802cb8cf31e4ea9ba75fafedf71fa06c7e45186cea7067d4c8f42a150a84779fd9d56338f97e42943cd33a19d404053aa5b32b7b5f282e1b7caf7a5ce0d39bb7a6195e7fbf457f5a1311838ccb2cef69133cf5075c3e3979b0beee6e786f9aa7f837dd54763e6687ae5f9218a3bb2c766fa23f0744bd5224dfdf07f773199affaf47d25b6283fd3ce27a983c517678eba0c2f47465581478a5f1ec38c610e40f2f0e013b6f1d42cb359d4227d5412781e59e15d4e488b1cf7813e5efd2ae853ad955287fbd2e56069c43033f72357e30a0c9b41b20727df9f501e77e920d808f0f8e58bc2c1054f8039d8bda5eeaaf084cd2177dd9faa22e9a570c2a4ce7d1b2fd8c24adf95ee77e184b578b18c7341966ecd769c55335d22532470545ea7143a85a44c8074d9508666d1afa16c160d29052275a0fc008660478a10c5a1df4445060614111e670cc8946e8d1f9b52e69003913d3fb586b407150c2a5c349b33c352ec029f93d2a27d70156ba75356cbf6c73b6dd97e0e787e5f116d3decf794b69d77956cdf5738dfa295b9e02ed3db8741fa9b5673e62cfbd349bb2652b364dedb2c42848c30c20fecad3a4673570f396b8b6cb594a945b63a8a14ed5b59248b787876764407c38692ed43f9b9a15f18a402f6805de8d6678b81c9b26f2fb61898ae1672967d2b6d7fc371e2c003ec16f39809c1146bd9d94fc79c657fe264db43f6cc9506665642b38d4aead88fcd264e6c4e9cefdaf781197739e72b392275fca792d98318e6a781c79bea1d64ff1db2a7fa8dba23def048b3ff11088e327dd364011e7b0809296db20b2fc0d02defe9f7bcdd49da783fa477b448a5bc2fbf456a7bce396b902e1176c609d42bf1932e1baeaed236567da55926903af447474202a10f82a08ff56805c06faa5c984542b9569b453d8b6abec1748be66983a83ffd0c2383a0420e5c47c954a0822973d7f432a734378f2617dc6ade2617d34b0d7c70d5df200d0d083e87aa09511c2a9cb55943d998616a3c453dada91152b1f1a79f3629c4468d8d1f4aad7080fc5e8c4bf49b06b4810a658b334f7d9a91e973ddaccd23ee4bab18661e7732c5e1ccccd377400bdd9a095f34853b4698e611c7bb749c251b74dec6cc5b0f7569892199df32a5ff7d69c5c23c423581674dd6adbed2ad51da8864cfec2c5a0b188e8d25d32f1cdb892f07f8880517dc6543de5b0a4bdf2b1b44ce42428a5586868686bec352b82375680fe1643ad4fdd7620f35d150f7c8f229a51f70a16865b9abacae64fa43dc8f3dd44bc82a4c64dab2c9ad58d9ae62e080d8df2175fabbd5b7d8a3ca26f7ffd3d3d15c42d32a2982b2f71ee8e1f8bc55f64aef7d2775996ed1af2e247bbaf7de837edcf144af746309dc5ffab99a493fb8936e797873a474d32f9764ef86486725d97b07aaefbdc3dabbefbde7795e0ab7e87d8806f6a8fcc860fbdeb737927bdfde0e9bf7a3f57e7aef9ef7ee799f3d8f882080bcc7f1fe5141f68b9e1dc99e30d36f2edde2fe3ead60ba657320d963ddd5d1bf1cd7b31fa898be5b3101bb2f75a5f7fe7b6fd6740d2e39f8e919771a734a3add66a71b02dad4b3a6f5ccae9ad6b5b6ad7a2645a0ff751699bed75b64faa5e6a2b5481618077fc0edb5eb170a969e7e9716bd52cf702bd1a694fa946e0d790dd295a459ed45ba66ad87eca151467f429b8c983098892630150c4ee9dc557afab294a493348bb664f595603295485074696f0adc5dba69dd1a25974c2badb7f451cf9abe5f67d1da0ca9f99a3567519ad4a14f6ddda5bb90227dcab34369d36a5d731655d13a4b6a9bb516297d941128a033ba39d45dd4458d45b23a55ee76b27b9158e677f7bba74ebad53d85d2ad21d943898a648f8edc3d0dbb2e729745eeb4d0d2eaaebeb9ba2dbdfb22a9d3fd0d163c3a51cf2bcdea1e8adc755f0abbaeab9ec5e13018b8c6f77df23927920196953b7d0d2dd8ab60112aa7ff5aec5e1651327bc03df81fb0ff31c1df83dfe3705f4c70f817131cf00b7c93068a749122b7c58e870cd3ee7be5472d767f5a39cd59dde395134911ba9f712cb97bd34aca008c6e94bb07adc8dd7f2b7742b2ba9fcea5c5ee3bcb13dcb32e027fe8229954c5653ed4fd888772f75d943b2f889cd57d07d462f71eced062f7a313e5eecb5df729dc5ef3fe5060670dcfba357e5bba6553cda51b4c237dd87ec9b64cb6d6afb468894297b568ed7f64f0f8c98a68803265d22dfaee45ba54cea2c99e1e99bed764cf0e5ac30e191743dfb6edf75419dc011efb96696a90ac2e2cd1840e74a3025cf333a7de893e1aa2a7f9bef7da49b75041b287e6f414ffccf7131904871cb899c669f090124a0cc55a94d222ed9f398bbe6321dbb180f9cc5ddd44b2e8539ad38f7d5ad17c83a8f7e6c7239f62d863b946184a0f870de3a1ea273208076eb6f1f4db4943b1115251fdcc779014a27afa35549f5a61a7c91ecf14049d46933dd3699e34a9a40e9d422edd9d0675a2b87bee69fb40ff362c7b1e7d96e98afb2e530ab95b8fa92e64fbdd9cf5244ef651c3c01d9d45c8124804cc1d4e21a943ff260adc30a7d16e95c2300aabc1046ed0071e32fcd3e340981254491d4a69c33215fa719ab3e837931629cc815aa4a94cdf81f209f7698b94524aa958caf6559225e5d734403438603070cd0483ebd72eba58f55988b4904844ceaa3ea5c5fa43ba2607ec432ed462adff91c1475dc3f78cde9512488392cef7158f3c57313490c6926778a54ecdf397f89c27a17c855abc50f4bddfb2fb5dc3946e0d75959974bf81644f4dbeb74c8bf7febccfc22af544df3bef4d3960022edc114668f176cda77e943416eedf7bef1791ef774d0894efa7c2d9e4a392fa213d93dc3565aaef3d119c29bd7497d00c6791d4b9ef2d8193ba956ab14cb71ca672d70326ebfec5a61f55a71f45d48f3b343ff2e407ccfba90f79f459b621571283ebd386c11ad6b0ef6deaed3b16d9a251a448e4ae54e8b7f155a670fc0186311bf7d68437fb85f47341db2e563d2ddfac5093f26a561eb0a1e66b3c0d35c29b1aa1076ad050f335420cfed484777e35ab252f09fafc9ce0e6fa2de5b76cf1d01c9a43736886a42423d7c32555f7978bd9b71797c211ab5e25da9bc7fc964481288c06d198ec99415240597e17816d3121fb3e64df65f69dc84d4d738498bd196badb5f7de8b461554ce48e5f9dc1d6365082fcfb7773e9e36dc4a797ec541154336d03b1f5bc1e5cbf33d8905a63cbfefc5d87bb56af1ce39a9e8a29639bf2f8899b32115351caf925b0e8208024a901369bca60dd9c351953b1c7f47b750f94e530b3c9cc7f1e4a494d2d9d46950a6efc91eeaa1212b3e87845de65dc8166d224598bb4aa1d7de3e775ffe9081f77e187b2ad54d183c36ad83700e9b41b8d06b13480eb2c786fe45ead00f294cead09218ecdf34c959f4efc558a5faa4d96c369bcd66b17b5595e7525b79eec5aabf62e690f0e83559cd764ba24014267b644b5ef4bdc85df405f4a2ef47f413e3f98b33888747c213a44effe823f711109016a2972c8d907f8cdf171e91a5d01af18255dc1579ba678001950596799437ea030ceef2171199676478147fa4b3c6478900dff873ce71dc75ee4be18d87255aa5489d5abf5bd504512b0a084c77847e98dc90b649d5622582b990362cdd307dff3ca42b7696ccbf4c19840358a440ca36596291e228072965f97206913af6c7c6138875577757d859f668867978b254b94b74964dc15f5df8780bdb4f5aaa01e6c26f0baeaf6a71ca9fee87b4e5acb39c355f82a52270373503a99dd8ffe6501e55398c61869159c9ec812d5ad10485b9d1158839b3e6c1288c2cd73ceaa8756bcc9911756bbc379b2bdd721d744b1ad980b2bf0e30a38e313650647faf8227d874d4dc255ffedefd86b49719a106a314bb0923281c738e8254a8af9c99b36e6ab87fd451138364cf7cff1b35ee6344dc4a07cd596270dc6c8ac69c990e20a99333cb9939cb75d4747891ae4ef64c1d64b2eb1093fd471d61b277a18eab52e9a88535283524945d7c12d3511bca2e06499d2af0a8a3967d6523ab39b35ae9e7cc72b6e41ce524e560d2ae2a94c30509a6a3960326c79673cb299343cbfe954e1db5225bb7461b990e303a90e6aa3fa748f6ccec8fc38f64b2a77bc49bb757c63a918f25fb8fd4892c6bfe5db6657f69c59c81ad818becfe58649f734ec999461ab2921cb9dbc08cc1864d63312942331ba02964d8ae10156d19b2dd90c6d8108491358ac8100262c758c331b07cf9376ad08cd4f1f799fd6b1559b76a7037ff7aa55bd2480c2ed96bcd22460e32583446cc2b51f6d935d6ad71a64a711ee4b14ec97e83c7153c237bfaab119edc21e77bbdf2a48e7f2781f8749f23140808082877d849d12748573e32f78a03cd358882d79e6eea6fff7189cfcc32d37066ffb4c0b1826305c70a8e151c2b385670acd8a0a0221c8e151c2b19c70a9e2faf3723b30489d070b440b667a484aa117386637d92fbe5d71a64ab0a55a0b9aac9f4e92a06223eeeee1d6dfaccafd1041e4db5dc3f57375df0d87d8f265aee57327b30a4e7a5d23d950bd2ae8bdc5fe7ac2035d53a5aa864f6a03bba49e2afd2cf4c251ca33be2bebf06c91e8bc3bfba5b9fc896c9d6627f9bcc1829962377d1dc8b314d776433998185494ceeee56232b516badb5873cce1409d1fcab54183b8d981490ec11d28891fd299aa37b31a639721f2aa54fe2af1225aab02a001a8edd915c5da9333f258481f044809212c4e996e9ec2ca4cb8466d1377509c3c50bbd21c3fe93ba001eb9ab3e15c2e32c5a9429750fc73e3a7297f474a35738cefb2b4ee4509426d02c8ee3b83199dbf12d72e384f9aa6d30ee39aff11b32987b1976970c2e7dfdf17e9bac89852b25ede688b5bf2483aa1b2e59d0c79b73ce69ed6a49f6aff45d62a1452e3c92cab2c57973ee5ea798c34103d38a671c870d34d3b009f30e9ab00663e33a8ee3be6fb7061a23e676bbdd6e37dadb6c76dff13bbae5a62366c029d8b6970ae62aa66b7faddfabae7d645446422fea6652a733c263cfc020e5fab2d3d2bd4f9d2f4125271491b9d6ef2f1ad8a3de146c1f8381bb0bfefdd26a3ae8e399c21b23329bbe88cca65f927b357e6e21bb694557df4b105c2dc9be92a1e62f7cc00472bf5734e45bffaeb82d7256dddbef164599aea611157d8e95a955cd14f55055f0fc0912a91fb6512961ad4a05b8c85780ea5aabd35a6badb5d65a6bad95565bb97a6badb5d65a6badb5d65a6bfd4a69fd5bb95adfd6ea4d81bb4bd71b25f04fadb5d65a6b9da17eadf56bfd5abfd65a6badb5d65a6badb5d65a6b450169d1b2d934fe79fe852a2fbc97a6ac60fa23cd7f9b566a2fbdb4a3253a6bde9782beef82c0bf41a6e782acaa62224c220f5a08dff56294d249e9dfd7a5d3f6c9d6ecd2c9d6e4e1d9d911e99d77d6e4118db68c073f6123423e4f282f449f41ce9a41b267d2a40e9427b235bb90a23fcf8f3bee2d7400fce2cd3ef2e4034c7c0a81c78fd1b904237b0cb28b3c503b219c785a74d29ce5535af4a1b085fc3d1944eb9681cd25e1d13e65d40915de64f05d669cc914e9e6ee9a42b7642bfef9dc1c63b2e873df851e188e6f0a477126149a791c4ea131a6ab6524bc291c5b5f38e60ce1e08c9c3c4a1a6d862d49eba1e930071abaa859439d9e8ed02e1a9a45658f7d2ab3081132c20838d7691b923a4de40006c85623216955750a5153f00c9d2cd0f620b03d209e887e679935142d524ae4b1805fbee8c4337f0a5938bd69fad8d4f0293c999ec90db307af139cfe8517a8c0e6eb8626a2827b88d6dcb5436dcc1a7d7a7187b4e94741da45cea28f7a301c49f868d39af76889769496a046e491da09216ccd9104f0a70a157b68d672ec4c293da66f2d52a943ffb46a272416232b726b912c5b0fbc64faf46752e70cece1148a813a60ffee955844aea40e9df9a50e7d22518ab5527f4a805330f7f7e2c8f75dc6ddbfdcbdf7fe5ce1b46802dc3b909dd46564cf7c158e3c7612de215fff40beefcdd59c92c571f7729fe440b22727df7726382dae544d78b5c6d24b1d285f8fbb96b60d3b28bcb7afd75d0e9452475461bee8537c1fec01cf58033590b33a49ea045dd9ea3252bcad2623c5fb976747bc49aa69b3c9fba3e5e92467dddb7d77d1bff7beb88312ea56e0c91d52ceccfccc8f63a7b5c3096506a657a8740c196c72d229220100004000e315000030100c088562a12c0f933cd36a0f14800d769e4a6a5899ccc35110e420649021861800000080004360866a4a1b00a52bfc0ae355071c08c03c4842e2fd3f6fd982c04150d95de1f8e592df0ffad3948cdc69483c300d979698a56e6ee7fd1107f4ab03ae709fdaaca246dcaa45287290b536f41b64b9fe91f076e9f7017e4badbc7dc9bc0d5cb1d8ed1740eeca15b96624ecf8c11b3275662fef299a218efda80d821fea72b34ce201a446428803cc01e39361ba00912cdbe168e48c4ec4210fb4d6109a2c3dd699aa0af569f6d33258106e5b8fe042ca4bd3c008cba0876942d63129c14c15a9f8bec41de2ab4e60c8d9a41c4faa4de03ac58e546486fe57fc4b815ecc408e1d6887c2769326cbf8b85880ce599e25c247046c6c6a91be89b99bc87468b2e6d711223583fc43975c6117c1de222245720a3ffd695cf0841110a2ab2a41732de6a1fa995529ea8991f821c598ef9530c36a8f569aa0fc335b38362806259c5adcd4220da58ecd53d6c03f804b9792faa896bef1e9cbae2dc021bdfe668260c16a7ea63a362c4e8821570eaf7aaf3967810092e9e185ae3b2c9eb2b9c0ddb918a8405b569870a9d361b73990f892e3b93369e3b5744569977342db9737f1437fa82b8d89423861d0459e5f2ce19cc56848fd0561f58560c39233be6ed4dc1b37ddd1fde5d072c4da4b5baee039395658ad53897ab31e9b7bc70147332647bbb9794610261e5ff68666ba77c0e0269da6d3698f4d7d35d128d5e395b11629a087e624ae0c59d8ca085b40f6b83c6cbc6f41ce27715925508a840e23051b1e95aa584a9442cedaa3b0b597e115c3b2f73384a2e4ee24bf08afb748da54e98cc551933bc478049a5edc4af108ba4f6540ee8d460f1cdcb28c589c65c4a6419e57d2452e2bb93dee17344ddf4f1358574109a18622c98ee2f52d0acf85e3ea0efbc291128f20267647228d3e55d8b56e9307afa7f81680a49e61b3880dd0c8702b129fa099feee548590a78dc21289f75e5fc3b47902ef21c158f222cbcb19e12f302e0ae071d0390e80da9247261fbea3a78ac31268228cdeeadb9e482b68467e8853c8a4b3ac57eb3893533fb2574dfd3d4f8ae8dd07a0abafa946ddcfa45bd2f2f567605974b016d0a9556d2aa75e3f96cdbf663ca8e9532e59d09ad11bfb849df3f73cb6b7ef8a86d3b0824b4ab9ca19c6c1a7fa9e1de2f16ad8d415baddac043b224d38f2f0a715f2ef24172d499770d3ef6a8dc46be9d5638ae2a3909d4da40d09d8621d54ebdb46cf1bda1049041064fba4a184c6cc0af35071d32a88f5c94e7086ea1bcbabf740d22dc3d9799df2d0b6ffb32cf590d32d3c1d6a5312e05b5d9468000ea6a3553d49b3ca5dabc46c55e87a33432d8b150f94b904083eb8e90275aeaa350edd53e28635cc9ae6522c26eac8c00b82e1784af10f64cf36f9b1ecaeaf842100b81aae3b4c88438792e2136ab014db3e8b602de6be58ca1e795ff1faed8b887933300b34f0d729bd6e550382dbf729367a8619751ff5f92b3b45b3823b832097d4d9ac015e92db6295550041b080c90be8f2bea6a85f6abc13190e217352be390d4b5ba4260fa9b54734ea9964830e75b3aca1ffae2b297563b208307a04c5a94222dc60403c233662bd721929ed55134524de9825b6c36789f5f227c4ebcf3cb51fde524f3100a9006967db472915ffd8f01172369450b021695557dcc019ec36a5bd51a840c0d492db2b9f692a6eb3a6d522ac79bf5b0cd34af1ee05f5f86df80405bdd93a05b39d951ea5272a8015e2a235020ec004783e8b4549d72f902b410db6eaf4eea3942d15aa6c88aece9bf956019f32b386a56faf7d5ec203443cbd9a41d89d0e1f6af9f2b7b64b020caddc610cf03c68d4ee91b7e10517cda527129660e3931fa9321283abd32998409c4a54420db8c2f9ae9893b46f85ccdbc459f7b0e4d5d187fad310f30544bfde6c33fd9c829340041e9d2da287296f65eb39128208e6625aac81ada5d8255721eaaff717e8d28f63068469cf0b37180dd1b593093b5f80458e010f46a5cd7c204a10c7b06e10b33029f5ef462b5fd2162e3a3cfd4b2990a140fd443795183f90b16e89773efdb29d4817a841c848e553ab9f69c3d2a070b88b32068978301cc3f3cff577520a20c056d028d096689d2fa84d51c46a8636f9bc5959805c490a0f4b997159796c7f0a1b6f9a4bd1ad9b0b54813eb2d1071cf7d8221ffcac029c4e0398e003e821229b82aaa31719e6b20e114c5bc470470fc7f7ff03441320578b8103f50f0c208d1f52fbb26de78c395318e0a40039ec86cc89ff71b2a20e3f47a91bc92d08f9431afe628c43ed108eeff4e31aa5373e9ade4416cd86784a71cad902bf02c8fa86abd810183aae3a420b690342588d4aed63dd244cb97177b173891e1c23fc009fd5f8ffd979ce770e79c43360f0eccdc1a69cce084fab330c5f1ffab9b614dd36a91356484dadb3acf1bae74f09761a5de6711c5ca43ec219c58f0268056559a298a607091ed5108243163ff5cf95d6902996e64b173bb0d1aca8fa5869f60eda80f2d8945630473975d9eeb960261d543097c0ed2f3bd5e9b363e4487f8a4d4bfb889e349cfcd72019cae1be4721cbde682e2be2ec73b5f6530bcb2a84ecd26ab0b36b3175bd3bd4e441a0321fb3429b7a6d868978f0f439134a5f551566cd6ee0e885d9b331aa0d2bc4a95a78f9ef2a233e3644d80cdfe6633f1e4c45bb5b18f505225c4328f3959674ae87aba2c94cc963d2f69c40ee9ee1265ecf5114652a18eb4731540eeaa2133712c1a0112532d52c4f10f42491c6182c20c68bbe7642cdae63c56f2b9a8326235d353dd7c363c18c2dae6f048837eb107f471098aae0e1b8fa41d95ef64a82bbc4a9a1828f1df2bad12c606c6acbf76b0940fec98e309b74b866b662f07d0a0e203a4a97b4d9ca16fc492e52a4c67a415d6d07b2a09dcfca4279983be60d9186ba2890d8080974732bd52e5f909283c254e7c88033d9a351b238ac7c1b00b85696920c5513fc7112806df0623b53fcfe5e77a717119b34f51cca28b15e03a62d14bd5e9aecd2f16efb912f119905f23f57ccac73815e3015bd678d750460ef461cc647bd7c9141890ab81ed6ef1a6dbd20db4b6d8b09762a73b3d7bc3c13189881b700942589cdd083642d233873abe8602de47d379206380c0703b81e715787f3798e102466503c7d083c976059360b04d864cd9543caa0f7d44a97a95bc5ad95543c005d1104d5357b725b6aa3688b00b532b75faefbe8c60f15510aa570f078a50d0598081f7df01e11a0eaac89860295836e31d82c21159edbb60d0c95de6bf34d1ac53d2fba50b5bc845bde77120eff0698aef6503ebf1014c936d5af8cb0c29a151cb33ca8956bf77e59f0567748a2c42fc9aed9902ad2d2c04b672ae9d0ce90ad17dd10c8bae5ea07988a5faed64f4fb61d5af633fc070ec65c01b2572d11b27f4168b9fd6b22c0a4c83802d0c84b62033b419069840721f1a7da5fa8220d3c94540733be6be6426ef15d43ee69487632d9880bac970dda8e85dfba7f1b358dfa0bb5a23c2cc71b2a3347868d8f49b7cbed6ea8b5a8f0517003514b6ab1b8ffd4432db23584e83fb8122a287175acc4413c97f5a812e4500bf3e555a5625f5e05518b834ab4a98d1d10b2fd3948d9f6638ac429d9fb6819413612cd35b9fb8b15d3c14a3bc0ef835e17f0ae142d5232e4a85cb6dbe21f539dfb40d85a9980f75f0cf13a240aa67a2124e329ca6abeee694a51dc1324f3ea04739b82040c7ea6293bfef85d0bd9cc198127d399720d9be02f491837cf34c29f5d721d29e98ee48a50dbdae0cd81d7a7b3904f2671735b5d408b2b7871dfd7e162796e8e1c37c1933a28d4475be4362e0d01f6c20f5380d50edebd1a792c852aa7883514b41813ae6768fd1efe3fd86fe4cdc2351710302abf7131e47df2c382e00412e8c60b3cf2f50ba13adb862eff581a5b2fcb10b7f6beb5715c53dc9fddf352d2f3f12792bb97341fd324894c71808350c0ef0f44c0b732e788c7b2eb98e7dcc9778cd2df7e3ba06fbc2fd7de7790366118a2789c607b300c99169111d0816a2629e5655914add96420c04c412fc45a9df983b60a8ac93770fd9036d62f0ca9dc0ed7db11dffdfca6b9bfb5538612fcec6f4143cd5a0d814c3f5fc81abc551ca5119bc432d38622201270e1b438f4754bad179e4b53afca5388c7dd60b5934358ae0107636a6b544c7ef83f9cdddeb512020b0bd646b96f413fc7e0e9cb0604b93e2d0be3a8c6186cbe6be23d1553554100d52f96464828ce8a05de011b2b663e58603be0920d89ba98c4c90b9b596b571fb4d159ca828595bac85d163aaf53cbec5cca8b35ef8a40efb658d0cb5c621fe5f2fd9d14fac52699415744212aa2a0b27900c3a0c179dcfcd1c1532166629c85c3c81798bdc92173e8a1210365e55a3892f9636e34cd103d157efb8a0ef5a2b7f2f6a71dc78c6297000c877f391406241cdb133bcb28b24dce2fc943655b80233ea6c2feb54517dc5a011c720c47df296e538a699705e045f2167f06ce6a722859058264f65924409bb213a24681dc52893c4f55393ebc1799d0bea605000c1f33cf000223cf8f06c8057ccc81ffe388f7f58e68ab96b8d915c61fb7ed638612dda35c597a9119f4833124c97392cbeacdfbeffd241b2568fe2f055da0fac6902f829d37b75c2a6083b7dd4414b74c5deee1bbd2e145199da48023573aa3254956cf28b0037e5c761c7ec35201d7382f559c38e6977e291f27dd04712f4f7789a4a199442b44c6250839eb6112fb8226c10ff9adfe828badc9d6a0f75aa688a5e32d80ba869fd4f8073d3e89459d6d129165090d5251c7f610fec597786b6b38dcb4f44e238983e922a4fc9bb5ee2ca70a205bdcdddd6a691f4399d40d263e5479503840f7d7b0255794c2e612ee9fe76910ce4936157dbabcd6440220c0f138a6237d2c300e6243690fbb573c8bb331b8fe10cecd448993653a4760248b2b5642e388c0df70aec192c7fb893f89a3d2fdb4f73af2e2b1ba38660bd8cd1b5cff4103b4061a262e407ef5c6865673a5eb05dd2791c1560a8ceb02c8398b75b6884460bed17ff4d35edd9e3ffd756f5960c4caaf8b6da984e7db487d9d1544ed560a75d81eae0ff2e7bef25c0305c98dca2f3904c4271f555e1585c28e937b9899c93ff9c4a9d5a5b882982ec7015e35ccf07f1c5d27ecf172f53e8d6479dc76a97ca73d036627854fa1f558c5c455acb99d7500d99e86f757f89c0bb0db5ca9e59128c63be3d464d5c51023453b3ad2f7da8d508f8143a6285d09633ac105836d2dac1c4df8625ddc9a01738bbb8e376cef29901c0102c59587bff8ebe467262480e12ef9d8f5b3ec22ebc5351663c07d1434a950bbaf439d246390d87b18d4764e3f68328cf1301e024005ca9511d6a39cb3bb44ce87a4f066b67432259d0b423fb1fa4eb4f8103130acd928857fef6587d7be5d3495c5be9f511764dc0ee05e3609802aadab439596dca91a9714d867c5db9cd249c5bb6108091809212bde8d5189cdbd6e33711358608b6b6d9edb06c93add419d8e8500d2b06eec9a7452af6832c9110c0935d2a097ab6e540abb2a3f2526be0089173c57ef19ac6061941aed50e0b344e4fd8a5e02a63f3ffa466913b46b0da0c9dfcafee80deee0b0706f9a64046f136f6ae8ead5e461770a330a31ac027f8c7e0e470d41e95598ec3a60390eb30d5ded207b59286318150b3fceb6808d0dd96e2b59ed92b441d3d5a19d198ebbacf4c5883a44c498c6cc752cbf47b6f0186ad466ae45fe0ecfa1b5a5a2205b2808c1d248c512a4b7ed91f188e0e6984e43e70189821041b4f986c13cf1236cc0175052bde824649e51e35eba04761a6437ddfc649d805a1fd82c019ea32dce89b29cebe4493ff100a89933608f723fede9821e05e39ce3962519fa07db26659f4faba6c5d5b46954b092c80775d690fe87a8f12469dd26d2e890cbf589b8fb4b4f63ac1ac61979fd9245a66173d038b834c708949a82480459b41e2994f6c972eefd80df54524c8c56fe2bff9cbfe49b9e949ac717d8fe9db55dbd1d074be059ddea17a9c9bcdbaf3bf237eb0f6e4205eb0c6f276e0d6554ba116a2fd207f0cc1120a1657573b7832899aaba2ff60c309c9a187ff41f27039b9af101168277c9b04278a79472e4658b228af00330870aa5180ee21f6d9cc2740ea76efb7178ae7fcc12ac0a935eaf1675d5c8d51eb7ca5518d6702d3e06a9084928f33e0d4b8be3146db3e6e84e9771a87d159442bd4baa60aec273bb16b6052cb9301840a2af72c32257d21a83ef7b0c710878640ea055dd541ce024e560bace0b6d0e4bc42c96340db2f1054b729b7d8caa7af40240956cf027ab9ef928b8641274111a7959ecd82654560c6c738ec677b4e266d658c1fa7dbc0a9546065214e53f23251b3e52b5288f11a4e33007233d9d39c0323ac3ec7046f7ee1716646e1344f468729b474bc009a94e675858e9d2fc16ac605261dad752eaea4910be660d080401c0d31f134b0361958afff753b27213fb586a86959fdac5462b0924117eda2437c34d9ca363d9676333f93ee66ec272b25eabf078c0f49c551960d46886c108662621eef82bf83b2cd15cf804fb6ede030cfeb59cce9b79d77cc0cd8abf545ede19a21f4623ef712c489339d60b556a5cbb3279c6de0b6448090c4820f02cda870d90b9b83bd7e56eea806c62e754db5cb3886c5c649f0748fd786d2a6b770cec71535960f4a5a0379b62332650a239d14cbf2b8342fdaf7707cda71aa432a72d6a95c931a5694b7570a62043b25c42bc607258b453fa02e2ad939575e2fdccd68f75337f602522d93833c224dc20f7c418c9827a98601befe02ae0ba3d30d2d62e2136c295854df7d59c377a5c6df367fdf2e7309597495f7e8385ac065cac7ec6504a6e950734e0982be3ddb76e7d3c987387704bc8cbb712192c4f6fe39d910d4b6812e200fc56128eddb777a137c3a789ff67009a8fa7a29b48d09e1ac4e0d54829f3665e43fd5cb8adff3471777d11ea4a8daa142166093701aec84b53630956a139c2cdd6c8d749b0db9ad58639b47a3cd512b871f4f731cb33d3f5f20b0751964a1ad33d05ecdd61bae4b3a4e0d0f9343a3723cdf1201b74f9821376dec3451429ed4ea6f71d3866f71042154fd818cb12f87b9a375661a1446eb33a87eb805be5557530fd5a2ac7b449905feb02ab44f6f8295ff85e8b091aa6b9bff99ecc284efbd7c404d66f504f7a140d7581806e866c59288bd6649c3672356026c94d0b2ead4eca5d35ee8ce7eddc5193cd43b359150089ec11da1f82d373ee9c987d6ce2381bdddce6910792c4c6e1edf5832d147a06d956f59d3face9a98d27657e508624cbd37813d7c07c4f2a0f90df3ab570062c05f414a981787efba6b973c62f0623dc493be0de5e8780b01ed76ca99506a21cea933f65a9ff5a86cf7a7d04c6a395710dc25b1d91df6179c2e6f84b4ef1ea49b81f387d26ea3be84dc433e0116461aa73fe25557ef495bc4026805f744e0dbe35b8b39048744fd1445f58e3df835dff2d47163fd6e963cd50ee73933986387329f9a83abafd0cdf66d4a40f65bc73675c0a8c1b68d5ce251885423b19740443a1dab4ff18d9081d63f40f7699124b42bbac703db99dd0deae645a009bb7f3deecb6448ca2b1df80e4b2b988d10f944a99221edc9f7adc481d2346052834e21d373f612a0258ac4cbb27198354dbbb0057251cc0751ceda4be21209d67b8caf22d1f181c71af882877ad4dbb0a927033df36e1cbbf1c9c1cacc0c2067abe79df1bf4ca98712f5bd36e090ee8d58534597996cadf4558a5d47b28a6149444c7f62917a755a4504d9eebfe1b144e71d95b08baecd65cafecae7fb041f6ad236ecb50e90c6465ae1ad11958eec3e832218c2fedf9940b4ad3d2a91787051858ed07bef8cefed98e9e3186f2ee28804d3cc1e17c1aa059227fa7a5595b6b39a212f51e05823fd8eb913b774be448bc1cfe660db6641140948fee413fce040c9a3659308ee50e250b76592e48c2173c7345173923985b5b691cd419d2890e28fd3bae34f17a35d902aed7416078fe988ca6dcb17dba28033a03494d9477a1117225c4d5f13852e4ced69f4e4f7989ac8301319f412e3becf85fbc666e65c882f6865e75e0e8fc4e0642c806159f37f05b206e8a156801f8aa0aef7e3264dfcb1111f9195c8a0e02a54e9c2237feecd9c9bfa5a38a8869fe03c4e61bf1120d5c76923b67f6cd212bf8072f8f6023377bc351505a41ce2f6838af168c3b24b0b2267cc0144069c00559934eeb4f162e05ef4d103d6961684e8e98cdf2b98dccaba7880be8c48f817640e2f5b937039c3cd7235e5bf319a57bcb362b07965a633b57d6be522665b47c4835cb2f74eacc4a9530dee024bd09649ad6468655dfee60780b7ba2703b1ab3a3a9aa02d16845b8bc390fb5ba1fec9754283d8477d5286d793daa00c72234856b10936c3b0a439416fa6afd94c4c88e466f1d60d3250c82b1f408deb93e833c3355847ae850d675c877fb4c4355aa027546cdc2e731a767e8d30cbe039286de58d6ba35fc1cd30e49d9e158961b34d678c5929e0a28354a71ad751f815ef2e1c75ee8e3b9aee5e66c8de1f85581789dc1a8b71180e8a4bc51e00a7838f445d13035958b8bde06e04b90432a05b638c64702b0b05b0d0492492de4b9e1ae3a506b7d5df2834e79eb337f15d041138e6c4f2743de43f48116ccd4105e738781658a40996183a581a50220c8514b05bc96a9216bf94e01c068084c3a8d71523724a789f516afeeac78e3496cd548a7e6c09576058a5eda5078647e4e3c1795631a64ad3ac8131a5eed70eafe9922cd782af7bcce3e24cc1c5289a0984cc189db655a313939156e00362802e84827e8b9ce6a04dd3841cc54e7e812670a8f7229cd4a52b2e02ba83eef1659f6e23392c7954ca743006f824f38ecda34cebbc4b88931ea2e9fca5ed5aa35fdcab5642904cb9d6c1fe442b438f492ce0c2339b1858742f6d0ea8072f1153e482d95756c5161b865cb751090d2e6961a739cb669425292b36944f2c07bb54334878aefa5b04da4dfac1ac5f5fe2e0fdc8ccf29db445faadec65c1dca39a9e4048e837c387201b73b9c570548c5cbef378f91c60b221cc441c7f840908fb0e870c6d30e8fc559e4a2371ee46f091e594cb9175da78f8fc99c0197092206be0ed56fdf175062d9a118674035298eae9905847935c161c4b42c11a44749c311f07037fe8f8ea6bd8a7626604719ea8371153f5b923d46fadc7285b433a3eb3d32eccad6041cec7490e684f102cfa797fb7fa9ba1136d3a20f3caed1fc0c0b7e2e95cca9b24ed518a64c42963dd9cde47daf7080b94ea5db0f16138855a675db0391a8cc6a078e8597cfb7421c78ebae845b3eabdcf33a5aec0f75c19271527e005845aeba3a42e5b53c2cd94d190b48deb9a396f81a293e899c91cbe7920817a2c5a2e87106cded6c718eb3a6b1cf7c96c7bbea6eed50fdd12627bc99c1ee6bcf2e9e70c0ec5aa3323b217816b4b67fbbff5cbe1f803082c71f764aa44b45dd01db33847a1070484b326593a53fc4ef4fe267e8b348b25cd0c96ce943ec46574a988271692577cce9fa9704bcd17bf7ec8c20b07308fb9847e581cecc437489817f7b87dfc6afb87fea244c206133e9120e0f951a7bdb7b8e5e700bc9c1bf9e01ce2113772739108d089db15dfd8f252f39eeddd50ebc94b243992563a16ebc4677a833cb733ad43534c1b7356553d9bcd5fe519fedeacdf8fd68a06e6a790a2cb0ab81231d066f20609fd9fe755b54a68bfcc355933d83e910c69f4b6108844d287a0ee3b47b3b4a64470f3c4bc64874c7671472e1a18ffe9563d135f3db9ad708e7195f6c81bcc50621e01c44f310e21689aa3168afb349e6e4d58128a23dfc86e8d4c45854268818be651c85c0ad9608be965f65dfe1521a471d740d43b0cfcec86e7009ed047cb6a4e8fc937a5fa15d4a46ad6d53d274bb3d944a121eda95734d4045513361dfe82819aa21aeec79dd34331ecd5cc80c34ac8b31c14338f9464f539c5d29ef545d5f4341bc0800a80ea4cc5f340bab19d9fa16901f140de7b8aab587da944e428e7718658e886387008606c63477d36e95370a8a8d7e6190763f290aaba94f47caa4e5aa83e5e9b5da84232ab11a92b7429e386c30a304f71ce66a89ba8131061b826f687f292093a78dc128d40bfdca2e7da179d6084a56794251492b194cde427c4548cf20ec8118ae8ad863f8a0bc0e76099c23121d47771ba66e06580345bf144d08163327230386b9b913270220c01cb5c3fd08e95d0e9de564940bba0be7c1d1e6318e6dc088b9fe7285ebd8dbc8c9e8cbf58a9e4165fab59c5ac30cf5e637091930acf1103e173e911117d037ac52c1d3895eec6b5f44f87ad855a14a2d384b1036a72a98e34b63379cebb9585776cdbe8c22801b80a0d3e6b1dac9e30532bc2f294ee80e709368e55d269b96044244a50ee9e597e6e46bfe6b92ce7bf2a827b54c64553a5a1dfb00756859ab8be975969e8940d2a65abdb0eed843b9d4e6cf3ebc7c3a4c78283618f367137e15e69e1c8ef10ec6b3ed0a86c487e8ecf026b1470d2b7a72c6171d969aba18f94969873f59f63caf3f77a7fd05e2cf6c91fe4ca590e4da9855182371e63d709429c07601944eaf645132f62d9212184543b03a1c2a01526ad9a63f81a4a712e4c7815a47d003ed4524d28f6ded2baa7ca4fb179dd1dffb1e9a0574e3d0bb9aecdd713a0bd4e2d75b7508d2bcdeca5d32850a0b531f29c21afab9c7105eabf3b3b4392962eb7312fce4e7ba7e36584da3907e5ef296f7038a055108625f2c4dfc0f538b9559599a968615a794f74497b3d27defc87d71bed7afc55a1bbf28edc24dbf48d81dfbebda4d893831e5727f421b92dcff287f7ef7f8d54b301853c0d1c07e8c981799538286341546571d73951359988ea21abc15578df307514fc4523e5cd15c736079f652a7df9a57b8208a793dc8699e7e993fb15fb7f492dd7f763c6ecf7dc71ec8b490be011eb8e32bb66c2c29883f5cd492cfc3b51451690db76be1e4cff3dc8c0afeada78559d1fd8d5afb689ad0ba290e7e1598f521c0e079090955328bbbfdb8b3e4526b806ff7af71e47d5c6492005218591fe160842740d856b79688c4903047da39ee51bb70935a8a73a91d780e097444d0fd993bb580488e07cbfc24fa705bd7ab21554147a040fc5f8f2c4476e5950cd41af368b9274eea04d57149f5c3de4cacda7a841460a0230a4bb4c97855baddc0a06058fa4d5b1a8af64056910b62851d16b14fd6a98326e434ef7074e62ea301e1af5f7faf6dd5b6e100cbedefbb435bb39e41cf7894fd0740de3c5e67a94b827df00e47d83c522fe6b86f9ba22969ad974c0316ba5eefa2ad354ce6a66ffb3e5e955093570daf21ef0f7a778cd6f0365746405534b99374834ad7b9efaf1ec5173eecced9d594371a5746027b08e9a794a34cf3c80bc753905f495446dad60345545c7e14d9ac12a5e25b6810dd5949021bb41da961251d3bb6d8e5775105c73b646d2cc294e4530cb98f26a3d32a6b4a596987a31336a208b0105dbda625d8c1fb33fc58c195fb0ded6aedf0d40b04c068c348a625b8695f70326cbc6519ffa0570c6dfc10c3ebbbc774f865190ee32d2c75c829ae87c2ff9b87441c2ec21c28d95d3bfa4bbd57830755a3c81c5735b0c8328752e48cadabd2912b6c3b1a7700021e5a2fe5ef1b581814c01c58cb15267c8a7b7ebf66acb0a3458329889181ad9339d32699dabd8599255e030040c02dbf0fa3e55680551d331682d15fd90220969b0962f996f397776b753b85be03bb461f35462e13bd3ab4d2dcc828e5144b1465a31db1de0a9063799c24658e2eaa97200d9a3c9354f4073c58b812bddace8fa4f8084ff9b711c93888f8c09be6e80f477bf8806dc9a8a2604c865ed73c72b46a87f49875eb2e8a59517d5fe2e3ac22ff6dd71907b14241e8234dd85ffa403c719b2b475fc4c1ac1026cec67ede7da4da2f59208e168be5332d8ea6b8e5decbd1badfd7db125adab2c1144b25b3dea12072b4a620d2c0de18137aea2f47a3773530a787fae25e777c373707969c92a140ecf90e6c19464c392c6cde8460d9f51bcef88b2de509b49d6b1d4e77efac54ff445f97922a77ae5d02d7fd817ec9dc85df3f186f23aeafec1450bd1eb945f311fd48138beb0b567bcbee282cd01503132a28b4018defef2d35e1df021805fd044162072bc0d4c8ff025b7116948578de5400602edc5bbb42e3624237991589abb84fe82d3dd1ce16b1578e0804ca9af87496b0990c70fb8276cfd4fbcc1948a9d06f61aa56772a6320555beaea9d5b0d36049ae3f505aaa0075a816120655ba81349cca4e0ae011ebc2c2607050b780efdb2d79937f30ac9855dd9b198cd9fd7b5e12dd1d3caa3f00dda80f86a628046a20fb40dbaff538f87281c2df02d242a2687c2f0baafd8210a2559d476acbb2950270d456ff147ba38caa43f552ac5f790ff2dafaa08fc2021de3de38af3122f66f2c8cc086addd71aa461863dc574cf8a89bfc8d692d8f4ab27778311d5a018d27b1792cc26f5f3aafe4bfa8636349e872b9c9a349e07610fcc3d088e60078d17982bdb1ba398d372e5a4a3f1793663bbe9299a700e904da34696f9948695cebe78a6da0985e5d709d97c6adf6001effbb9d2a8a2ebf6591887afac2a9fcfc4a84ffcb2bf2c8b66dfe1f4177a02ffd6632765af503eb897ae4d12787885b41ef94277c1e2b7a8315a556257f3a74a4e109a078a429c306dc83e763156c9f2b5bf7d2ef848898a2f5a50c7b6a1dd476dc8a46c0e9f48ddf4d80b28c8194a174f58623f94e092f185763c3e7dcec7f2679ddbec964e9aea558c591748d1ec00540d09e269b85d6aa36d323e32c3a46f047589373b102d4cd78a01c0361799fe3e0ec051a58fa7ce386224da25037f38a9d99753840754e29b4975b38474c1570e7b2fed984fb3d6b81bef4e90a616aac20fddc746ba452dfd379e8c9cce141a9539e55ca6c47b4e995a13494716c0c2b817f258bf23626e18965645abab432148e305ca34406acfda7320cc5d192fefb5a547fa492c4b04e15789bd5e8432939ab3b1842f1b96681ea8a10b40b2ecf6353eb4f0a3b5835dfef670a73ad46a65ceaa6e8246e48814507b58bfc41fce6393e6ecd0fb5dfd2083376cc7bdcc4b5c45a0234690ffb5e71322cba6ebc8611ec15af7f3f7eb0dc9c5e180d379f107c31d05835ea1c4338c404a8c407bce2b526d015c3d2c0569f3b0ede542d49a74acecdf2ee05018f1b75f014993f0917a7b7577dd17cdcc83b98d4c99f03f4e1c4c848416935df0ee9046a74d7f2e95d2ae4f62754ece0651ea2925bc5ec008e124cd16eb21dfc061b9d15fd06e77163d18abddaafc2c7af154bbd4df10be6a6139c1b12349287ab66001cb39c9c502a41cc903d0164588b89fb53d88626f4d7ff748029a6c3f72b3af12ba5e05ad6c2ec3865eb42da966887ad2ca0c0e0cee63696159e6a6e99d84863932d48edf1960566959be0b14915c8cbef075bd427e7ce6ebbf9ee060081ac327f80ebb403039fd9ebfd2ba27d54a3047dfb5b83c4f54f3a3f330387ecb536e7db6f5e8cce9aa3af8e288fbb7cec215cbc05774f1597ea6b76ed08355f5676305904022f4d5ce4cc45f0390c413714757257baea325829b51041b349b101ecc9b615c8ce5cbe864df5fd4ec5c50ef10cac34f92eabbc1e227854e11486f19730f3d02dffbe1e7c6d1c0c8a4b6230e2efe03f6bd2aa8c925c6fb5595aff341960fce379b546882275c2210d53f2faac91e794f857f9489ca644c8227815c4d6194a1a4440740608d5b754dcbb82a82918e07d0243d8ff05e3c2020a1a4c03492f4b8f770bb8cd4311d7a0803f449391c1c608bd977ffec4d3dbe4012d7ec9d45b0e790d377c3d698da676137d206c5469dbf29f14cc3d22a0c9887f1383edc716c4c488f917ee8f5e02050f10c3fba63d961f7954bb47b3574cba08b93c471c9a8ba6c963410e341c8bd34b5689f38cadc174cbd51a78376848c3d4ac0e465f234172a9d607d8837c82b77c5248189b1d47a6e214c00a3d5a6a7432d14c05b4d2c9d6c27b8c8e32eb474660bdd3e54656ea08076409f52110105097075ade3e1a03dbf54b026947fceb9fa29c70931d3933b0c06cde335897e413644ebda78b85fb0d1d2b173c440198de487e05b6b1900d842b0dff04f878713556392da5db50757766f1179b3c52c3ec5f57c4620303871f48b258fd04796feb6979c2abd0b25ebb0854c24991b090fed836b0721da7eba9fb9eb47880eeca3e0de456eb180d7f645a8f826c50fbcff9b9af36556802ce11c24d49cd5bd0743d077147aa115d60b0f30a7396fd437fe85804b0b6292f89a0fb27e13b295007dc523dc052684983d99c836259533991fd44532cf5ce62e6e3fdb4094275ddde2fef0a3f332bba670022203af6307327b8ddd9f2058b0dee2638fa83d70b8189c7a2f4aae49b1d2af4697edf996f6b248b673c1d7dab6f31aabb2d455aac52fbe7589f62c1c7bd86258da1d59a774dc42396f0a1267e0e1e3add7221b22b9bbc5beae020da2634078a8dee030d98c79f04f0c36a2274f7bd551d05547bd54a4b5946eea1f8eb3a03c464e3a64b02f22ba83e3c49a196585da5b7a1e93cc5ef40a416ab5ac1511cbd1898819438346c92d9bf1acc49d8cf7c97cd72d7f3e00eba34459291c4151daea98a4bb7631c1d5cc5ef28283f788793cc14bc433c6789204c5dd7ccccd777682fa9573563fb2c60d49b45ed5e83bdd9598af15761bff4d2a68918cefeacdb02a023ff87502a524762152a50811f0f25a485d90eb13b967cc82315aeb855f33d71a6832d370c4f985c1ea30fc0a907caead3e85a4c5ec28e5df35e00b498fe8922f151540985da429f4c0f102bb838b08b44493b547a5dbe0a730a2836e675bf38c2d6ebe2105fcc2579fd8e58c9bd1e75d26af8c8678f39edc68733e23a81823df44c5a918f36aff7689a56984d0f93b45ab36b8505c7a6a0c0d606760dbc2490d1ff864877b09943c76d6472e9ac0e8c82dc32a46a208c040d9ad110062a051f3d65043103cae2d13dd813340cf768c7ac952400dcd88182905947cc64be1612c6d674bc6d012d86fc8ca891aa0e5007f378391fba8cde6b65573bc489f47db4389776027a8b413800fd9c4039671834cc5120c1277b9d16a4fe2c71aa0b100c1da7e665173d555803b390f9124bf63548503e83b47217152b8674d39edf46199a7b12425c249eac3a005c0bc9f94c9f3ca807fdb7991710f4f0a83362200bb978a174147e8f47d23a12d7fc0772f88a939141b49f31580a1026038ecb280c0f4973e04ab4eb4533820c4ee778c404f040a98b4cf77260f4df4f0c51179168a2366d411421066cc4e2838724afb8862b6d37ce9297a9975f725ee565e36fc2886ecee78e6b4397f7891d42f34769f87e015a960ba2916bb17f684685d6c88ffb65f509ab7098d5191c60158c574d7224a7e217a514e444cb5362393f8b080f3a7c468b752e688c008bbc37e68e3e010aade8ce8636ddbc91b91068cbe38f4422b9d751bcd4ddbe28aa3a45fb72b0f029e223c766504ac8bb825c14596d758f62aeb2b9389d8d8dc5e11844f07f330f338a1c0d596e03a84ed28fe1078defeb511ac680fd4fd08d4882d809b2045fcdfe7e4ddc03b50026edda6515a96c10dc5670b01f9b05b7ccde935277a9fcea4bdc8f8e76282e20e71fc444eadc24bafdc5e039970624e6dd6e077ca9ff5995ee17040718e017dff744d231b025a5723af1aecf2b1dcc2fb46282f9c213883a895ae508c733cb08f67f4737c9d02f1d14d53dc6211937d26ffca1b2bb5d5cf8b5cb9228afad203f746ee8576ac298059bee5d6707e8b3e9999101ea49dcd3e2a113d49d49125884f0c52b39d00ee2dd994fabed2a18562ccdde7474747f1beb66c975abe903d19306eb598caa7a262d66bd151a606b37aa708da5458e469c6f39643732e87ae2302087edbb751073819454ccbd05f186474d81542ff8c2395d90f1d4a440b08a1c2991a40d11fb7b983e045fb07997642f868b2cb7f6085b5fd18ff130bafd8e984afd1cdc67b29952a43dedae21873151ce98104fb386909cdbedc1413c103bef74db6dba90205f24877e9a6c2fce0ba84dd403b10d48662f887f080a06179aff63c1d46a2f79d9fbb759bd9701660cede427f772abebd749d02cc0cc2a78143f638551f7d98dd3698c8b0c3b072a3f88e1e4dfd4d5faa9969fe583cd40ab85df62781f53c7571535a1a66cc3148235615e7d786153c58124dbf62fee05a235974f660fdb4fade1e2ff356371ee7a78dd5fba508e7a6548d14081d86a9bb0f80914973655f430897e5ae28b076f13722e507d5ca4ddd5f41676f7fb897066ee10a655f3350e3cec1e9cd22f84ca4ea046e113527e42aeda1e68a6c02a2a12cc8e8b52142a6678f4a61d6109f29031a6a5e3451f8a8b0b27bbde4a46d308ed23c51d3b389ec7706d6044958d64366c69082dcd35114d79f6ffc495f0411e152b790a1a145964af4fa5e390aeba2c129185981d9b42f4ff10283cfbcce81feb8127ef5e1e7b83c6adce51fa15de670581fe038ae01d241386c15adfbf9a9e23a1ca8ba69735426761328ff3207974122eadeace310c355fc7baf994188035db1b6cc42e83c475506b6be892d73683957007ade48150c070f717191c9399756f8554055e698ee273f0859fdd971cd5c58bdb5e36b4aafeb589ac6dbab54cebfbb7a0d9b5de110e106ab8711e04ba8e12ea0f30f5860c0fc870fa0d75f8ba7480808c45e0d0ccbf769e503a48cb628510062de7b50c7a94e0b7049e0c3d2b96f78f2310a91302ed3346e02108db90b4822a5f7f0426f7fc41b6fef1eb8cfa3800090059bb987dc3bffdb05791503fe65f00b3b8d2d27d5bea88021698ace612313d3c0d5f8f4e48db89405838fbdc1e61d922f3bb62e01bf92aff38184b1687f15119b17c930854d0292097066edc168d270038f4b5015335c88a40cc3a3fbb1d6bf9671090789f3f838a450ad6474c70611face23555baa4f97563dc87594f1934a15718e98dda6978bf2b0dab28e24844e19a92426e1562e3214c0f3d265e645c5d289b065f0e3f4b0048f520fb7537abb8c6e7093299cf6186071ab966d5b5914437d921a91e3de28d648f535607d71d35972cc5f17b2e332a0eb54a1acd74d128cc110e777731d851ae8ed510334a8872237a11fd685873dd54c2d9c762204520ca07a82bcfc328d15ed52a33714bd49378fe68baf1c4f79b1e89fa7820c7e1c265dca61b051033149e29a578033bd5c6f27f266d640e4c488fa5ef9a21f894425091596e9a0afc0c36bed2c0af624404174d5151b39fb388e0ced719b0100e9aabd76a62e8231070993090f05448deeb84d80d75396d3b9265faf34c3404d91fab1e635108afb34c26da36ab78920228b7210db3d0bd0c055a76b78299e3f4ea0f1147d77318242146015feeb140c3f0beb2e7064e24eb823b19c0ec07cd16643e2c9848e837c3e33baf5483fe21e0631427a526c10ef097eeea51bf073416329797167f2a038ae0c7c56e505f02112af0e8dac1ae39ab5313a07ab221354c0837beec0a7bc0eda80cb1f738bc56eaa4a9085434ca9a1ac633fe3d197109f48c675ffc4bb67d33212e8ff8c462affdc3d99fc1d9662bbc24d28797473104fa43f8fb8538dbabb19bb34f186db8674fe8d5e22f3d294f86111d379a359c40d617556f65834160f29cfb40034c0dcd67e51f903c4ae9268271a4811c608010734aeb2e838b4f87c4b47fa3205cf42078ef44e7e04ce0d765a1382ebfb7ce70b7dbc180821493c702a8fb269661f389c5da3bde6baa388f54729629fa38bc1f34428dd602a8fb9ee0229d12e260648e3b2324db39634d63f166cfb0aab70c90da0a52efa04584fdf91514e7dfe02b3a3247ceafabb7fcee81dfa19747184f5906d7303ee1676dd510b86f2aaa2299c79a71a065be96e7f83ce47964279f7d6acbe6b75278e4a57f675e7047d4d963c6926012e903717bd96150d4249a05d4c53daa4b3b35aed190cd68e80fd175f89c2956bcbf5d9a3d0d48695d44f8f438257d499d2d9517fb3e114a50e0f1b1c263887e841db585afa5e54505fd047484606a88c29dfecb126d45070e9a7830e2e7f90197b3e6b699e976e84cef068f44a111c097e4d39b9f46f452f3d8a55c80745ec5a03bfdb920669b706b9045abcae5cfedfa6091a8a2ef943391aeedb7343d2edd6f06f11d7ed145f0cc8edd7846b746a4494219b44660301371a76aba6ad51e4ca4499f8153d0346c94ee00717a4e467c49ecf02ccb12db5aee73329ea5980d759906c64a5dd9f71f98859a34a42547914e0fd271954f69a87fd0f34396d69fd12190da93d4c7226f3cdb1f54713bdd76a729458857cc79d406c1aa37264b5908617ea451005203d844471cc8d616d66a01d5d15bb5bd2b34c3de855f2dd86764b2a50c863bb9e479a18b709b5c264fb0288b1fb75fce50750f908d245eb03687197612fefc44294b2808ea53e55856379d87d48342662db5a495127c0172d061f4d863647038d4247e8240d2ede8b76924639d505ab1effda2a6fcf35e79d2f17dc947e0c5b83dcec1a03a4d09ba41e6de9938dc03666c827c486b3cdf610718a503c3c69de30b6f904491565c4a153dfbd37416414032592bfb8cf72b03ab5c7fd6703beecfc71f8280e3f8c9e95e501cac31b44392021d1086a14a94c783ced48eeacf8ed4ce5b9dc059b6748169b6f44dbb042e4bfa31b384101fc460b30a83d8004dd8bac54902e27b101ad317b160ae080ac44b8159ba00c326335634f5dadede2b8a542e2ff455802a79001b083fdff75ce93481f4052eebf9de38bfa35390406312d8bae1059de42e86e811d70b9057039c863ddf722d7878115261ee19a7e724259064e25827602c9b60e48c831ee388eaf7b34a33b843309bca779a292454b1157c0414074ee8e0fe6b6b11deec05c0afe9c6c8e72f8c85c72061d7e9a5927c0c7e3e569e59ed3ae0a00a6887a9f321b03ec2baf15e0d33c8cf9766cb13c15151fc791e533a7414b2cdd36f7b340878a7f8139a49cdee5c1e87f626a10636aebba67dc8452207ed9a800136654f26b0417153cfcff0bc60e70cefb9b7e74c3917f01f9ed08efc26e1a1e65ac5cdf404f7428532121ae5923a9b3015d5c685810cf90fffd1c89e7a66820078ce0d036434e87df3436331cb431352c614a9b470d92873b27cf837f6e7404b1d7502566f59eac97098d125f06c7120b20b2a3321e06f8917b345cf9e93f2d943e5db40eac70ce08dab437df7fad2eacc99b8af4c1a4c6c17ded64f599d368f984637c54de441266d4ee799bbefc2c9abcc35c61e6f1ea60281768364a9d4775efc569cfc2ec840f6e688e5ad495811fb183f1b98c1fb8e8c49a6db6ed20e5c5a7bd9a8832e04b0233c924c92c9edc045adb7e9abd4d6e681dd4316ec234f2fd4a721d586e65d1ef5033a73197cc672493d60c71a335915b089c239c5b81f591cd1488dad9bca900bcdbc0ac80e55d08b618c0af0f99481d7865ed24d94f3b22e06438694c5dd82a8b6533bc7df41092702922c1cbb04db9f21b429ae98cf9b924410e0d39bf89052efaa3e703770fd3d179fdcc6884f576a15ee943512abefcbf110b01f86052e2c02c36b3d7d779df238199365fc8b94ed6eef4264817fc2dbd53c7364c17353572934b1966c3198d321ce308d8ef149b6abfc2c3c709df57900f4239d944c8fa1254aea0d916278099609c309eb640a8ddefb3435aea3cfe317b1d5c1da517f40974793017b2a3cf301a746e2344fe4db9941d83e191b05dcce3161b5b1775fa7fc5a78f50ac526243ea070bb4cfc1b4702188ff9c6fea30caf76a895be0a8de1c6eba260e98d5db451ab882ba4c2556259d7154b5410d7debea227040269ccd162c22b51682887f71ae6d6cdc6441b3cb1e89958a87489b3749bb540faecf753dbfb2c13460df39924deb439789747d5b9d2e1f46c35fdd92017751721f5e2fb2e0770ca5ebd0525d4722f9888329fa5d5cebda5e85953943063a1921fb05e0ee35fd1e735caae937045c801bbaba05a11a0de8f9903b72322567994427e59540b61c520117dd3012a06fe7b9212484136948527c2bbe587e4ba2e496687af85fbbd04dd63e513c0ef4677af26750f58d2b112bd1b7b8c05104313aea0d8dc01d87221473b9a97727777103af59607d2b162c6ea29d9759e7f60d0fb70cdaa787de324a0e46371fc583c7a5e9633c7688fd785cb2149d74b965170595f29bcf5db083fd6e504b4ee57ea0ba0bf336a82dd0c9f8c08ab446a9d1ea1377d35503206e657915b88e1ad3dfec0845a55c2faa520804f4477d6ffbe52bd37ad8499624e19076703ee6844cc9d09b16f2e3d68a5d584449a7d94ab2e232b397571cde83ca032f017d976692549c16366c99604c77904de8af4b0f20430bf31481f0d17216a090f0b5480f7e885e887a283ef8102692fa2a90a9916dc3b3a29e2c8e502b5b7d7c13799569d0c1a8194d5ff6e9e5864ad385915fe2da5ee46714006b34a22a9b833eb494793219e3f19371df2ca35c33ed77d4eefde1539c596340f6b3be7fb41c15ee4052a7b7d6b297a4a418cb0cce557b6f11de1cd578c7482e3241504e60adf1bd4c197188feffc64250c7577c58d760eb12caac0468646d0cbf0bd6a14e6ca47c839cb65d13987c419d5442609ad7fb076a00661cd291e863b669c76f05e9b96813af911c1936bbbeb79b69d0c121aaaadf6081883c1e6ec7df9fb7f66ff0b09e923f10c14a9ba3dda62927c718f3fa0162f66cf9469e9805135f2f2777dacfe331d852c4584e6227db841fecd19fa5f751260096659f2deeda435d961b8a24a90a0a809fdfe5e311ad277a0ff85a41cb6c30087088cab3651c0532fac3b976ec9cecf3ed2ba57c3bea23fe58873cc43396f3c348fa8f32b7070eba3697e0ec777ef0c5e20cb17361fc865d2746252f2cb1892ef90691d7a172c516b3c0b6e27b8729b03366b75dd818e50ed7105aff7f06ec13d9c17a72412cf7f06f32198f6522fc1bef69a66b6021df9a42a59253b8b13b3cfb9f0bd0edf438ba7ff280fd154790652ed64c0fbd29ccac1c21c76dcd5cefd11690c3c8f31fe3cff5ada3d48a51fb7d4d58b630941c92cee564b6e3135f23926a3f14bb0f85cda60f612a3939d1818c1fa28a279c7ca4cb5ebdfbb4ffe85b0940c70e71caefa5900f6fa2bcccfe1b95566f7ac8da85c9ced9abddcedf93667eb9d5d42a0bffab88d3e3c64d0e6c4523744083810f8fef2473a4e7a70784994a137c5851f667a2fe9f53e09d7b5740f8def97105ede83b662cd1623bc6cfdc2991e9bbeeef6a342adba27e9cc8eaf841035a10a3465a450696a48e02cad4c9192808f8852f5e99cfae23cfe0b87b948cce611f9ef57587e80c0c23a30ef5cc36271a4b12645a408cfaf9b1fca1c3c266e68fad27eed897b1bb33b0c5c1af441d53caf86e0f738c2d8cf019cb4527c614a68f9c9d91654711cae70d698ab00cbdc2a40f986f53504fbcf8608409b1c18d1ede25622b39dbe389515d67ea887c6eff91b1bfc3be7d9bbb356ee6ea311d78d04bfc8bdf52f0e9203c4844d67563fc50002d251bd7aacc38798c498a1338b1fe4b4ae747440f6c5a0f8c25380fee02a886dc938fa3a847db6364fa0ea4667d14d67af27e3d8de8671343f5d37c564c9acfe6c2fbe9de7f250c8a7b86139f4c7fdd064e71258af8947438755858cd1da4c9938846de12abf8073a21153f8fc6874b731dd9e3d67cbcddf622edc701ece5c6d80784ca94c2712938d1c369839a1f53f2f10a1a89b388b50a73c6b3006c28cc8a1284836581cdd4c482ffe0e41292a66b199d7aba0348e9398db5fd0f0fda528e234f8b644ba0f1770aeda6586af18f6f3c2dc829b3f092938b67f884bfc40d9e3941c26c235adf4e3db32a2a16a53b50003f27c88798e51c8a813bd56eb2bf53ef0ef4e2b3d6a6c980e72674f21d0bdf477f302d38cd22840e24c6e18d0c98cc6a710c8cc2a633a7b59a90bd893a073ea1b87bf15830e1e384749b25679f3ce10e0f9ab26010404a8df627f4db4205dbc7edd13620740e93e700f20975447a18b1454428311383ed5cd4eb6fdb520b08b842e2c419d786a2a92136eba3081763795cee17d4a5746406de0c599266c50455c71bbc3878b3399119a68f65c00f4a3f00b3c149c9c6111972e88123fa56d54ced86cdf807a5525e46f2178fda42d813b48161bfb40e499cd429fe502add5f80511aab5ff998e0a242a1a476014bab4805e337c3146397d366dc2a82206163d44061443f2e8b2cf125029e0454e58aa28b4c9018617409688acefdd067b7312c36a2755ab0b75828d9f692017c722c8d2cdc2bb313d68a4e4a9b03212d82f74f85169300749c63028f0f29bceeff7a542a90a272fbb5fe1f091f70b88aefbdfe8304a47c18b8fe40b25cb549f3c4b84871e521a2fa2cd5b2e026d70c474a1f3dd288c4ceda3022130e10c6c79f6f6f671ce6dd47e8c21c8d1d166919307bc63977a0c672f73f52df0527d9c079a9486cf295ad2a44527e321ecc03e9d4cd3f4ea1970e12fe18fed685358124512fdf5165bd3c5cc0e3e6373a49e299fb1a4e604c100e981a19428bd4e117966214b60d5ef0340de153e69c6b135e512f19c5565458a0dee4d1d4cebd55e8721c7020071c8c63d784f06abd5ee6ef12d0e9e89ce07cb732f954c41b53b8e829de4eff77ee45134103a2fd7a2dfb42c1a2e533e25c093e9abcc1d6c6204dc6e059614c0ea6cffe8e718acbe9c1e937751df9fcdb7b7865653c377a8bd604dbd336054636bef8e5af6e03a70e6c4d8577c084c8a6123fd88c75cc013301366e430e8892c5f86edea7b7a38da7a3dbe12b38952fd54652c940e5dc38d7eaabc4e6e621bf78bf72bf8eed930d0ba08bc613eef4a1bada0c4100fe64ea4b1197f0fefe4a10928b0ebbd1f846922d9da780383bb0dacb1c228d8fd722de3a63a51ad69a69c93f26832f4502981a3c53f58cb3bd77240d73c2324a3e6357b17c198cad87bab4c9879c8e8ff3774d706dc003c7df622522c60ebe14e077ee41c34027f2d907ab9899cf5451cf98e9a92a71aa649da2c0711b3a2117d77982e51284ed203358308ae2c0825e77916290eff356c84ba308dc48fd395e06d39f4382b70e3577f7330816e14a9966545ff4d8eb0e91373f0451c509a068e4070aeb92eeca09a7fb508d8ba284d396400e18f5d76719b3e455ba8c6b4d8880baff76b08e782df31e179ad65b283d89796bcec24aec2d4264f4092a85b3e3b990e8b6b807c9e66faf050f1d21d45908a6dd38ef9a6f0bf875404adf35d09429ef82bf7e118eaae455cdf97fa88870c4dbcfe8c8880fe0707e0a5b0283bd3e5071b6b0428dc854ac7c8f24432ba2e8aa6e8c2489b7428d7c682e44b858ed3ed53bf796c16a8c974e793eeb6f75441eb1a8b171d7bf97b09afa00e0d48a0fa76a9b9c63c287cb3ef58f0036f4922bbe019683ed3c610f013a5f6ef60533f422184053652ba13478373993ff65fa4aca60625267642ad658084a7aa2c6e0972aab8af7055e9681738c624666617f60db09cef296e390bd0ceb6bb10ce24ac1dc5a1728aa2a9dc9e77513618fdbddbe623d80356947d7f4c76c67ec0905e9aede5538014a8f704c23981b5562f6c9e80b1c5cef57c097a46039c19327bc77c93cc494e6eb936fbb21ec8f4c400368294ca5d5e01c25b7c62775b4943e5861c1539e1dfc4cce1e860f4f936023bee9eb77bc921bbef14dd92811dc93de775a4d2c46ce90667db3c758a296f754a8ef31e3a56496b0d53f3970ee25e482c1f53a1af8c1b45184d9f7dcaa70ead4afeae48a3fdac81192d6f927d4818d13a17ae8693af4c3640f082446b9e16d7f9287223ca562ab430b8533102f2479dab407524501f32cdba74c7c0f68c61a4db77c2dd87e52363bc66864ebefc5d6ab824d67bb68e2b0064e9e7c54405bc2eca8c3d1ef714691c17fddb493e6ed744d8dc69d9676566f22b087c9c5f5274435115357509f7cb6afe05f901491e15c5c2b829fa2281a6aed613f8778c94964632e15eeef1a37f2b704432946cdf3fde9521143ba9159f237d8471e644471a313ccbbfb12eae2d304ffcbe73d21976aef9b81869e8a2a4eccbfcb0495c08a40d4aa5800276252a1d4fc45555aa8f5d2c4fa7558d5aba3aab0629aa584b00d56c907dc1aae336b70d3f72175de393c5cfa92c431c30b01a1303186d23d359ce414c36eedb037a6409d778c29228b66eae808d10a797157611dab50af4b2dab47ab4b917436fce3d5eb408ba4b15469b39a74b108d6cd05a201713140a9d31b143df3836c3f07ceedd69b7b4023224f6fa327a15059ebdda5deeda5600254bbe9a5ad29365748e3b82ec2a35d1f43af408940a361907f0575b640b3e36a8215a12e71a692f31d191d8459e31679c6227a7ed2b66a00f58a100437d436661b280f137adc99ee5d57747627bd7e5630fcc4652525a57343ce543c077e08db61c931ff18e5f41f4c0acba9eaf119d1380324376fa2e71a121e75fe456004ee9580eab093d0d35550ffd9080929055654721f9f56a26eb8137549d6d8ad1ce97349be734f74bd342b3fa362554e5e874d83ee0b536ac989e267fa0f68716a962279c9edfa37778815763530601eadac2b7f06f4b7c0d2b681c39bbe4c91af0fea4f7a31a50927cc8c2fed0c24064e1b1db581b851c6c1461368738d032ea654e29ced460d4c2b831b15184be0b33351143ef0a5b6587f264fd44bf577ea4825a9fe8cbdfcf562b2c43135aa084329ea720d21b8da30dab28474d7b473418a60d5f0a5bb8690ca76d01ba6fd01e0d1f0e30ec298e76b0719f2a8d07d9498591f07d22dceb31f7f3190b03ef1182155cc8d83f8360bac32e9e1ae9f2dd52f75dfe41e8301804ddec2216a35578eed29c9c0b927f0edf5b647a63ab775e01a151e9edf14185a1d96fb74ac8d0c2d8031f6c13138087ed194159ecdac3cfe0a3aa8e4346dc4a396283f0401bd8017b5654d7c365bf701cfb03ec77691beb2b089b50afc2430d07ad18ab7516f415d24e00c61b5d99d4f83a9282afdb90caefc48f07d2908149eb31177dbd7b86bd99d0d7ff52e4ea8df5f3e0b1b34e2752787acc6edfb8d0c97b0ebe5f1ddd0733bb0d6056769901435ec1f1c0896201ad9c03094ac2f5335b814427764c05fd679595eee07bb6cc84fd732eadcf9ef091d28fafe1e26227ae22048b01dcd406c0235140237064e130b6d657ad083673801d81bd506a525069dfa59e6587183d92822821c0e5e2344808a53be34eb17dd31d05ef80750ca6ea47790033514874732fb76076eb799df4305d7914460e2993b082510ecb7ea4c1a08b2a2f5ac3bf6d0a226e0d203cfa1f4aa54d96d9920a972c6557fae0f3b0f5a25284d49db82a08b7082af9768b34283e899ae6c77ab6134bbcc9ed6de6f5029b03f2c2e4833cce8685812fa07d9417aaa7f8ca8dea12096827492d3786f60aa0c560110db958bed3a3227d8e5261201a7e21c3c757f376e9008851372108a2562f5e4e735546b58dfa7463d2492c32d1b025b56b9f16a85d96efe5a9adf5397d7ba3295020401fd79d17d97d4aee5a215cd56dd87f877b3ae4312be155dabdd2d885b5a03d4c83a23056398e1142796eb32bbfdf83f3907f6093ca4d916656a2bd6c30db1bbb68e525be582468c490aa05b0677340952fad1e32001f3c89f8ee0a559de7ec2d7ed84c1df48186f19ac6862797203f562cf05438802b4d6c039f1e68263c727e4b51ce7c581f0c6f39362e6f580e022568fd0d4604930bd2ba0110da9b63831c63b9ddb4ed4f103405d218f36e50e73dc42459143c0538fcd625907eae5a16c698c8ceaad09b49231c0242cd64f823b5b2a10f012b9106c3c2495b10315afe9ec54df939b2c9714372b3a13a8bbfe5384cc9d7c0f0d6977a408e290d1a00f3d1b25f723d2a7f5804477e959d03cd4ceaf536bcf4bd1be892ce7dd2c99cfe6d44bdd18aa09a5fd04898a89f58f14faaddf8e7c4a57d54998fea27f9fd34f51cf641ee226fc892795f1cf1c4b3705e91db17fb9282cf12a1c79401a50bd21a7afe3532be80119a6ffe9b3740f472972969836e811c8f62f8eb21884e12965528bd611b0437d02b614103e360b82d9cab46ccea08327b035c1c777ffc15c9a00d61eaea7831283742f907ac5364b6091c6e0f7f3b436f7c40d9c9444dbd82e4ffa0a62906cfe132acd30dec5fc59b2bf17474ee6e33ee3b3c8322b53c1aede611c98e458f24c0171fbc3677208e4230d81533ae6667be5c277e1a6224a64bfdae64ad9967baa81bc75ac8eab99c667547a288c727d5838ba85c1d70afc4f5370f59c009eacf6ddaa79981e57a97ecd98665a38019c065d54101ebdd3ac5983c3855863c37d962f29db13748b2ab83c61007af56881e784c397d21b04a4660ddaafe8aaa0ba5bbc924362efd94cd8f63b0e0660a2f3c121ee131efdf2d0535d809c1aa3820ab0a1273c68da6de38c346557d2cb11f8f73f1d5eb701f46e6518e0e9538c89f166f36eca21f827d044bde8c22ebd3c080ca6aeaa9dd57d07703f36e01dbac76897b0ef5049834a5abb7205f63a36e028f9a19500b5ad0ca1a35d80cbe873a9965615529053973e15e13ec56c172fe57ceb5da71ed459dd01a4ace6c40ae99a824b5d5098ce207d8c8893436fa3cfc0f712c2aa42a9eeabf373d3a7fa4e0773d41e6db1a3b1f0f98df490302a46c54988e8ea402aeaecd049a0021e01398e027620321acdd1cdc044d8e5551124fca4d71604e777d247d1d01d12980ca84993d689d0d554dbc4bdf26652c989c1c5da1af17dc70aa68b09b4648493c8eac4ace1f1fd6b5a51091c7dd265b5d118a8da19b86d5d55daf92f1c235736a9e5f8914b0ec6072a6e7e05027b220143c6968b1607fe7e9740d71a51e513f69c5254cb1f0044117705856e42c75423cd5e814b504b8ef46bd7d2a406081cf23a3b06918366aed92310210305670649693dbd3a9682ca2e40f26b77eb718673d92fea1093650b733f93da8ce4283af72d33ea56744bcbea60d11c311876417b0ef56e717f7445ab57166b9d8198865a8a459d58deacb26b35690da39b4aabd5d9441bccad4933833936d839c251301820ddcf02d44855779dd992c28a9514d1dea1a9946d3771fbfdf395623bcb4a6f4fe0ca8390dd1e9c91d6e7d6746c33ff7d59006b861511142d922f5f1c9abd9a106344e47e73bfe626d2da14c8741c1fac06d394eca297c72c48f21fb0cee319b1a0c4446790f9bb78a0ab59f0c22fc973e90b84bc9ea0c5095ef3387455511bb47c66f1d570ce12970e1b4849d04d7430dbe25c6d79dc59b2bd63e9cba0cb6c489e9af450cce5d0b432453a404fbcd6dc65483d58b4620c323020fe3a821ce2650927b5b6e20c2a2aafccc35bc9c70af75758fef44316fca58478a80d03ced20671e75bf7c8884b39d0edcf09060be19ba59dbabb8564aa685d216c39a05de58dac23cc28e4346f9e9e040701e8786d108cea77e06fcdb08b3ad798895404be66f115217e22c711bc6e7b27598da40c9c65e438fda1ef578af854ac61a8f8eefbbcc7591c821010298f9d87d9c7583bd8d3f8d994e0403bf688c1d65bded64059c60f91558d84e7e97d90749c9eb386eae605d8ff36952bb0ea883f712c9085c5567027f060311857640c74283e332c6e8d3ff85bb14f95e4bc12b3c84ffc1707599b4e24a646a30f3a4e7f38ef34abc7dedfd4f3def6df8bfd9078bca138c7119a82d29db078cebdf9bab3885b5931a09bde445b2953027db98e177ca336b1d9413a37cb44d7582490fee544040beb909ea39ab6b6e13b734975a901a6988fb3c80a27b5ced598584649aae9c889696f0db1293bbb4fa3bfd41bed2eb159a2e2baa103667439a7cf4e6ed0c2e6db5ff06379b8b526e99a771dbd6b42c7dba2e48c8f3b8738d7588e7d9dac2b3eb75b2687785fcf75558e738d46ddc6f1297933565bb067570b0c8671831b5acab52dc1150738681358a9e85c260fe9b78e130ac13f8c9564577fc81b9c66a35e53fa6dd1d8dff4b47327950d390af8a09c1cd6b89144ea67b4d21ac8f576d0c357c4d8383c5fdc32b5fbc1af03979012bc6806b4c01c84727c261dc0997d1ed825fc350d3d0121d9b29e06e5fb2e7c98f6dfa9d7af09c8884c088160e31cf1e8a2cf1372c75d3c8944e31370076292ba0186304a7cdcf8ebd1ce0db9a7fb0a9ca4f3991d7a7c949996e8cfa5260722fe2b2168e2b7839dd219deab839ffb24f97cff5fc3ce566862529376d784630243d4264be771f25b7c6f138e7e60e9d77e9d5ea6c6de3b56cccbf7a9540b366ade53dee86a7c03bc7ee41a35480d88ecddb5e2f29cbe173dec0214a34fed76157839b7b120f149be3c1e240b41818bc0ba9d82f153ed8fa0366d92848a7709fdd47bd02cac48ffcee3f790b4ea1b84edd73e85446748a03d9f0c9653ae5df24dab83c306699e401976c539b871a215b8e2be99ee252782f0cc406a9880d0ed2f7503412e15ec76439d997c61a7ca365613cfc9353635ee5f03f1ef83b48cf7bdaa8ebf3358c5f8a26f9f6d16471f7407ec88df5da0dc6d4dd0894dbdcab8e328ae6a35c63b89bd8c468439ea6a1729bf4d333a665b5798ec7a822b7ac5b477dc52320f163e819442813dfafd70b4c7a22995d8a157ccef7ee962a562b453f1cb51e8d023b81442959876ad6d693a127e84d32ed73cf6cd95981d5d65b00bde120a3de408bd16a00b187639c525bd607c57f54b2928583f86955a059116997730c83276624fc8940469b0d50631f0c8553dd8ef6ae4e7862ce250b7cfd29c5f2e77a7bdd922b1a95518ef38b5abbace6027b19651886147fdfa22edb16946456c7bc5f0ce972e978aa11d452f59c8d0a3797169b92c4dd2896d2f32ef62842b5a85f1ae442fab91218eb4d265caddd2bc4ec146a6a06b2ed811f27231ddb56d3a24a6d35692f6bd5d5cf7ad4d1282b1731b2574cb0bcbd161c8938ddd0ac69a16af0c7196a8d8c56fe4ff683afa87d6e749e326804f20030ac4e8573ea0d2704f7959d3e19f123965404c30593b1cc8505ff18063920a3b191c600eca286e85f1b5277067d20417b8a7213fc32243a6d934c8282e95dc20ee254d478ca11d0327589d60f84bd6c104b018d67be947a8aa6beabca0bac1ddb3a8d9f5f018798c93a2edf7e44ef8805107be6e72406540eb3b88efc2caa534a0a9ba8994ebf0daab5948583a0f17c1fb91c861031da061c799ab6fd9ffc5910bf9c48880f87982c284eab7f019b70e81b8be7a6fbe851a22a37ad7d679e36350cc1a26ca28fb3e5d5965c6e7464ef495f608fe3b3863150fab9f432c6c4f8f6c171ae161945e18f6bc4c0f6290263e87f5e7ba533e1788329ad786caabb2e2e94763da0bf61b6dcf11023476171cd4bc17d67aba0d43270aa79e2166289dd78943a242e351bfc5a663e1f629131f2f9d4b30cabaacfe7d3a745cbd85d7341d2cfe3efc370a4a3b0aa6a4a7ed4a0ecae9f5047d6d626cf6c091914dda3b05a408758c654161e0ac562c2bfcd35cdf127467e5913fc64d318425e098777fdbab400161fd2fb9b993432d6b1ec0622ee703694e5128ce0d17f8105e549c7c12ab00b0742fc289bcbb87dc3745a3cd02f7bc840b9bbff1618d5a5a341d57f1cd541ed9d092cdc7779a9ef6a8f76674110808b8fee5cf6ffcf6ff13a2bb700d8e43c10d872e020717ebb24179d1fad4a0e414344cbedd59a4ec38dbae1c1d3efe851695a818455f8819d53d250418ce94b6bee8c10444f586131a76f94018c8292d456aab5c8d03e90398de1a4e373c778d3599c3e083205881c2977b3c55807cbcf4aec2d9b556b63df0431b047af3e8250e41a4a38470dae56e304b408ec0b2f39a4c4bde4dba1b43bfee86000ea8160b04031e12c9ef871484b7264c47bff06bc0f8950b64b2f78885297133df0db98023d10a590c894359ba4d2a35ee6f6f4b52cd7722a2eb79661000b99e925bae7a530792be15a7d156dccbc8b61717fe387062676aeeac0186d14cdcc874d6784bb7bf35910e22b4c3e35e47c2c5e985cc0efe99bd68dffe515d8c287f1b79ba2c561ea37909b75e0a2c177fa31f730ccbf138f44b0f7c64f25812092300aaba5a136f26707c927b3f0e665f278b0d009146b7b08e2ac2a6f911b7d2b88bb74c5260faa55217f91eceeae6447519d57706e07bc47c1890ab9981709380362251214b6cb532f7919722b9e60e3890b7c0d8b05835c842b357b25a0b1ca55ee0cfacdc49b0dab64fafc792a693f3bb31eb8b889bf640593cd55902f2b0bc9dbe08f1e3a4b4d933dc9ed2565b5337b600b833b99b0c45d10bd3c6c9ebc4e5ae9d9ebc7aaab62a73d13cd8ebdd229d56aed99fc1879e5d5078eb7233eca87595f3135c68e7aaebfa26107ca4c1a2330b83a71e7d9b95b7843066829fe0bfaf4033c0d614f7777199831090a6071e8db4532f33fb3074c3ddbccdf2465345b449639ba43608818e3a1b4a46cfd2147b1c4da9327cdc563d70f6714d84b14bcdd119fb20c953feeca2716895901e502b6e1c3cd9b6178be049f08747857444495a53e790cfd8410d9cf690102024a03d2c784f5e5e517164c4a8848d0b2f4982770937ece3d0efc3a0bf7acbcac1cc70bc2ad0c03b03d2cb7c501a719584100aa9b24dc03da0145eb76dd4422d0212ef2ecd6bcc8770dda5954960b1276adf0d3a26c1d7bc3f7c1d8d6a05843ebcdf9b6f30e8eadeafa2d7280ab0970a4754fb09000b0c81c120fce087ec88a1dce137b769e4df5588e074212bb372e1a83af89e21caa72a1cfa3d61e7a2135d542aa8d4aacbe07dca3b0114eea270f2d3396c8ca7f91aba1499ab4dfa9577a09d79b9067dc8e089ba4b4312c37ab0eac6687889d8b94a70ef3ddf40d6a9e986b3cdf9688a5971bdeb8b08080ab87c7207ef46b44162d5cddc9f7f389b68dd5580a7c7fa75aee9bad66ab515a4bb6b58ba032f98c155b8cd1064e7e1215dedd29322966da2d90bf98be7a06c9ce6ad593a1cd2bc2829d0c5b2d469bdb2c63881c043a0c840f575c59ca1a64e0fcd6a77f798192471b03b101240628a1200550dcb148981ff337d871924d019215761a742e3cdcf204f8ca5e4654fe68f754ad9ed8eeeef9a81d971a7d8a87af00da91a64388bd3e4ff8e223b8bd445121e4d9da23439baa9ea4294e08b942eff676d6f2b31c91f67d3e6d9aca85055013f4b2b1341326840f2251396511322ffeac542b0f2db43a1e2740db5073dad65d2ffa649685cf93b4dc19d38080ba43d4a2ab75cad74cb469e0f61cc878c77f9400a119fb158eb298bb2876c560bbaab2195ef5a235fd5faf03091b75629193d16556c66e2c3a4241654ee39be5db4f87836558fe50242bb644768dcf8299d91eed188ff3ecf631fa0c28d68ac90c87b07532d4f7f75b2801b5fdea9f3204df3bdd98d5a62f543c5cf616168ac5d31e9404296f7f6c122a463619a19c1b20735bae2c78352b57ce17c7543b61f61a1c102603be3f7b2a6e82aefb9326288035b8ee075af3c7d588ed3ece43154bb70d1ab822d1a2013ed730be416a3ea03b71ce29b5fbf58688b7686c44ff78480b7694bd1b3308cf5f2e53856d8012bbcd08e217fb499fc0c0698be195b50ac71ccb313cc1d502743ed5fa37f51b1c790238eae19cedba209f466023ad179625b30cc59261d2bad05368361dab818c517f402d2f3c930b7af0b2f80a17d3aa3b425900765b82365a4f8aa22ae5206e9a788560a3fdcdcafafc3c06f4ebd99245999c79374d2731ce888f712ac933ed656a491bdf796524a29a54c01e305c005df0594527a29a593673e6d633a9f8af8c1991315b25cf486e441366cda8ec09db56cdc663f07266d6bb9a8d0d5d931361e6487709b7d3bc7ab1bb343d87bb42d9de950dae53ff6af6871f01ffb74e2113bf4e4daf673b2ed7f5604b9afce87a8a65051cdf096706dc89bd5d52e906d3fa7b3c7dcd5b1ad5dba3affb10fce6027a5d33ecf9c57778d8e66d0ceae9a42d6eb2645e56ed3b6d4426d3b7577dbfea4390c61064d273604a5c22be0ce7a813a2a64a9e8cd041e64b7beaf2f0c3cdad5cb07f68d79592e4b86ca7a5932f66b1ad599aeb21c5b65a95051edb46fdf4b5e543adfeecbcb6bc987ec0e435cc8968123b566091046bc422f9dafece565e6ebee9aa6508a1dd8d4579cabbb475729ec831994cb610862e00ed4d124478593b661560c970f795121efabadd57eed5a835e4c0778500c2fcb7811a37a598ed96790b7ad0466549633c37fea6391025bbed5428692f62d01ff9940995d3f27fa9870da3ad8faa96f65cce840ea52ec2c30a5c1c2c6a491d794b49e1c5edd47051480ffd4ffc940ca39e7bcc9299ed820b52d65dbcd7f84005065c800361b904dbecd2600a00e32c89ee55b5987b432a0fbe4be2d2ad464bed3360d1b94e10e1b9ce1144950a1d9200d65952860b65789e2b4a7c823c727c00eda3a8a09ac0d5ee997e678806efc35245d1bb4e17c13e65863521e98ed0fe41276186483399ce2881436e88552e4b1ef55a638e1b1ff8553fcb0b6fd19ba2cc74ef94ffd28b22d45af3285cbb6a254d589fd82899dcb41be6506239b1d3144e7f73ee7c41bf3a1f9d5bd6c960fd9af56cbfe70219b6507a831bb566b43d60cba4e5bad3fbb2c67192d4129fa16bb65c7462fc70a2269bb973563bf6c988d63b9a6f420573350df730ee6f5dbca4fbcea3ed2f26d7bdcfe22adeb6d35fd36fd1ffefbd86458600a556025000dacb26e9033e7425a4f0ebd7a0067100f8c6bdbdff61f9143fbbe8f3f33da6758ba61e780bced33a80ad896626d83d2684bd1c40667c8b4a568c106abcd2980ffd4cfdb8abe658617b6144f6410cc7fea9720a26d0fb40257a0691d0b88abbd2374c2abbe0675ccce39971510635b4e9bcf0c9243ecea64e704b06b5805037d04bb41985fdfd69f5fbffe08768f5328a56676ad31a55c536b9d21584011382f783db95a12e446172d2328c18d94a41e84d05a6284658b0844745e666e40d101ab086b8bfb90a60710465bbab06ed8a08106223ea0646141081f6c809470856a0a15246fa2986c29b13aad2d33097483c68b4b0e2276d8a1082d5ac0ec586fa4f6072a128cafee9edddddddddddddddd7fbebbbb4b6d55b5ab0fbf2aa373ee9a8ae24e7bf5e974ea506badb5567c3b10779a7e08b66da70f6b75eded09b35a9d529a75d538ca62ae50e9fdecfe757477ac6bf7de5bed64c240aa9829a8ae3e17d8f18af8b3055e051f05604fc8234391736256d326dcddddb350c6945212341a89122376744ce8daec440e0a194fd80352f055e059901f8b77b417541f9a62aa90ee186060cbf169f32f910348f1d7d8f6e2ecd9bed682a87d6028cb99699d11246825764cd448c89e320a9e1e0a2ab0e0c5f1029f142a30b0d90dde00100001d87e6c3995baa8b1ed6331675d8c40f49fbe7f1f9a140b81ef379a809d4517d2303450b69452a09cb86dbd72c45ead70a6decb9d536c9fede17c666797d8388f2e8212768329e8063108f3dce0cd66cba1a3535b89aa9d77cb36858cf217615d1d7d6fb7273ace86c106a98a0dd6141bb4e1bd608378dc601637e87d166c10ac608321051b94f56c3087678333141bd42736a853dbe008131b24b1b3415a58c5123b8b4534bcd1faadc48325681bdc21b14113233658d3d9e009bd4114b30df2e46cb047b6410a2a003768c1b7c1f73628e60d8e788317dc0dfad40da6a01b5431378841e881370ce8946eb3e5d4b1fa8f7d6c9f5aa2eb64f6257c82e2ae803aa50823d4fda4e3a980c4d997383b1e8538fb9adefc79e86cd39fed2712d831d39b6b7902a77d1f94936abe30659654c01c69982440161f92235de33ff7ef833694af3ffc4f72ddcf59eb5a8dea8b0ad3ac1163ce281b5fa633ea9166cdc670a459b5f8880f7dfe28a539d4b41068b499e7e1cfde1a4ed922a4945eec01683bc0e28c2983ca96a4339492295c02e5a4b55a5c29978c991a10c4a2b74119622c628cb1483798432cce5d450d56acc03767f6e5e011537b71a6de2863b492218396d9b3bd11ef037d6839d4b4106820187ab45a8f974399082de85b99b5b6d27baddc549c8165c8b025bb63ce096b538e7b855bb5e6e4c85ad8cf91855388058b695bb3c10bd92ccd6c0681ce22063d82c483b51a899016561fda3e4287462b11fad0fc6b0ef50e09590c1068b4113a25be844c8416f5b3d3a04fd3a74c6f4ebebf11b21694849485393217764726428beff19b086d9f3faf767f76ad483728438be516c50dc417d33659c4a0a59dd26bcdb1a3dc39321375066d7fb016fa0c92dd10944d815ef6f20f3e9b613c5e8cbf3ab0e99d7eb33f43b0e99d6eb33fefb624d8f4e67ca47af9881c5204d26baf3573bf28957cf995b3d62eeb97a66165307416c195c1c8b3c2a8db5a23749cd71a914cf80c76da530251ddce0e95a37b71dbf4a74e54e85a51240fba5bd3366f9a6b45852e17bd6dcda0fb356d5e3ce89e71dbfcc99546863d7fbedf607021bca3b78997acecb9db6177b9a810e5e261b8f6ac7ed6a65b337775be5f54885e4abf6610c55433c8f74de342f8be799957473bf1ce62963d39bd55e933c7c5cefc13a8b01326e39f346d57babb57ebb58e3997b252a4edd433a6fe95cef049e6e8ce6fade193bbd3dd763ad20f0c14e991952876f88fbf3f8be96267fb96d373aed4c4b6e3dc9a36ff6c8f80f6e9b495524a29a5f268a73ed524a594524a2ddd795d2925d3cea618534a919aeaa7a1d75e0e6ad43164a23876aca462b2d2ac6b606226761dadd7b45933f33693808c6c36ce8bd5c0f4ce6a2ad19d975c925f43f4ce9e24d449ef153760b1b3da294da1dbb414e42e81b486e7c475eb6e07bf278b4165302cc6584eab9b66e66d7e5797999dd3136ce7b48e334e2f143af0ddb97647842735b7e8910fd5973f6f9842df653303043647990b139b8e366cdaacd84d9265759556dc2420db9e54d68226c1618f109da34e364c47dff956e66267e7b11334785dce9c9dede2e22c4dcc4841a573d4893acda02453e846cddbbc50df0e6ad7f6b5d5936a01cc33c85d5b59e79cd3070a4a6863c2a6d5c6ed5857a880704081113b418dce2d9d4e67024e7ead07ec59c0cef978c940834d77ceb75c404bec9c1780d2a44da5926cda94be1fed8c26874d5fd7d09860d3ff34559bbe2d0d9c4d1f286ccba6bf82859aa64dff05934d5f060c9b7e071ee4b0e97fa086cda64f43caa62f822fba066c5336569b8a09d9542ccda63f652d28d4761fb255082123845c033b80c8c043520d43c0e4e031c4a69f01a5b456939452231c5a5d740cb16b6187a316c7a6417ad8f43358b1c114aa38fa3284e6671772dc9c5f4cc85af434c11ead49b8346b0d6993263fab239f55ae3ae9106ead9a564308bd86b331b39e28dd13a57361a87b8484a09c3e6d64b41c7d5642d602850e7a8e0dd0d9cfb9552da454948a5a51219e19393f3f679c5404af989b5e3b83b326e65eee4291e77e0d1ce532a259f496bdfcf58c769cec3ba0b7fc21fd6f246a32e37b59feef1b8956d091c836d22c9be3be689e77fc60daf2d3a0379a15a487117b8a5227f2ecd039b9e399417f3e1d2917bde5e799f13dfdef471d748e66ed6cc36e12e83e3d3b5d1f74f9410d1cf8f789bef7be062e57b3323c9167fec733efcff0fefefc3b123599e38cfbdf48f4f4469fc829b5a3f61f9ac73ccebfd4717e810d5dc79c74dad4ca92fead3131a0fb541946f8b3f438fe433f8b2ca89e364a3f8b95c506f45fd0209a423c1c706ddae5f47548699bfa535df5b0bb83e3411e4c9bbd3a0cb8cd5a19548806f560067d3031e04120d0f8008499cea4934e3aa907fcc74e3a0208260e6b41030790042a4016900cb50f7940c7854418c36a73cc491bdbd231e7426cab698860daec7b3a8bd6be87d1184104d249461101c924b9934b3289d401ad60319d6870759dc0b0f0851394527a29bd01634aa0464c385cd1828298314e8e854c9b4fcc0b3f7c74f1afb73a1ed973cff2c6ccc6ef5f7b00429284578414c1a16b9aa635aeab881827149a8031e130d0c636cb199d0f1cf40595a32936a98702c4d43cb169e2412a87992c202a7c595274496f7b9f7f2a595e121530de03dd2cbff342c7a56949a8681c6810a26b9aa6b50d269aa4b5fe22ecf551d43c848ede9cc5ac5892f096dbf5250917b163a67c78570d2303d2bf381ed1b7d61fbf62ac8566630c866be3efc1185f5b6b7d1f814c001249296064a68cc04993021a220a4313274a38123c8b8517640f615c6c4b2f3c6cc8c24d51c650020ea45dbcaec670535c800b38b0761103859ba202d0104d24aa1f7024c8326fd4c8100419299cebb0848d176e8acea506076622822c39e140af0b9026bbef06252a38100ca9e8696ea06117ee827c45e140594845e934a604859ba28765e1c09c908a928513dce8705394494470e04c3675c5b08423810145bcbebc082da08900f9a07693850a3745bfe2c2813a3e806de1a6e86ab61881a1f2859be210160e241152513e61caf2859ba2e4510407d298d28495b8fad2466a07aba80a146e8aa20f38d0840d287885e1a6b8002a1c580ba9e85686388dc9243680e87053e4c10107a208a938954c51dae1a6e85a9870204f48455966f7c216111cd8b3b56445aa051c48011329a70a422a7a97283aa8705314ca81032d08a948f43786ec7024781521ba4069000f1c09288ea8385b0813e44bac061c288654a441a42859e1a6e86ebc70e0f8b4e5eb82908ad3879a1d47b829ca5d130ef409a9487f8af3870a39763850850a0c841ca9a1c24d11031670244c89991ee0b9d65aabe73fee0159c4761f9df6e5e1b401ed586f761b0752d36e480a2aa0229fe7d2a5e73eb69b17d16a2780e8ee9b159c5e69fd4010fc467cadd504e3d10f87a6357b5982d38634235d6bf67ce8becd811f826118829f87af957080f4bf8fede643f6bf3157208ae3f17f3fe5d736f3bd9f6e75f4b4b9f74e6945a1622ae7fd377e3f65303c1530e839652dbe31e78db3ce771f7a8356fabdbfd72cc35cb4868bd6f7b7b216129c61252145e7f47eb076c287ec5714b9efc151c594fe3efff714a4e098fb3ccf7fe8a34993668dd8ec7bf0fb3e279af52ffe5a67ce4f5136caae69fbe6cfc4a2d9eb5a67ef7af673cec6d957d1ceeadffb33f933d1a56eda3ea99b1f1d89a6eddb60dabe5a3598b6efeff8bd98b6effb72d0e81cd0fee6d797543e543583bc51823d28d9e8f0afbdf6da6ba9a52fe728bffce7fb9c36dafbf9b97aa008f5fbaefd3d188a326adabeaf61f9be7e0f7e8f828dfe5e7ed9ff28100733e8fbef67cca0effb0e7c48eeef6592dcc9ef3398428efb9e6806f9fec6ef5fcca05c53b3bf31e72668f3b53fefbfcf697fdff7eedfe75cccfe3e69b4bf2abffe4b7eddffb2d6e3f79ffb7c230853234183864d1bd0e71515a7df74045160d1f741f9579cbd7f7fc59cf76fbca38c02e18904dfc235b1f7b558a45fe736e664e8771d91a8064eff157335b60c3deaa722ae62ae04ba5d9f059d80e3b47d2ddea881c3a28ccb4196c5a05fafa848ed0bb65f4db1d9341c7346d1a5f3e7a4d1b956d53973ee57d9910f25cda06f9455728d0e1f63f9b9591f4b1775df5152f94ffe1c31fdfdfdfc55d6e2826f4511e4ec2847ccc9a83dc28722d1b4e5af61d1740451a4d13929a9eab4ff395b7e5a8f8f0fc99ddf0233c8777e0f3ec8cfc10cf202ecfcb519946b79cc25c00d15fe9c5f6ce74c75db998a55fe44e95b52651936b372f75a6d3bdb3cc1dd6b1d73eeeeb5d6a8127d686aadadee4380c21443a81415e8d5829e9dfbe4e0c8f5482392ca3ab5bc8effccd07fa876ae07ab0d948e0f49b9b3be3a9a4ba00f7df72217f2c610a6e7ee8e2de54f6faef89cbd70e4801aaafbc07fe8e784e9dc0a0ea650a62fa8ad85894d1bb56336ecfb6ca3e0c8c1bc3a2af48dd4a3ff22cc86511be6747d5477e75163a5adf90c33139b8569f935bb534a297d7277a772c87faaa5694adb94c40681862ad4a933a64ebde6eeb3365a7ea884bad327da5e0f3927a6968db5d65a6b2dc8145291495aed9592ca0d40748cde3cdb241f46c2b463915215969574c4a48ac81b2cc0d975d1214477e589540a62395a58beb4cec0406baa9f7d9a449ae6cd8924c9fa7de2581d6f7b9fffaaf4e52a01618cb194d5bc79967521b565ad97654d51a09e484511abb24eb05250721e588e8c9a762401001493cb4385222c97124fa09e483da132400d24b0ab1a56e0e2c50528a929399c59e1799ee7756d9937bfa1cb0c98988bae3877c53e005496162e618ae001c6b610264ee5d23d3979126eb8b2064b8e9040e9ca61aa8aeaa4e82a3dbe364739bcfb63449795f58a38d2e562b39ce218f18adef63eff3e6e8fa2a41c5288602294d6587c31c61863ad1a741b63245dd334ad7ddcd0598c31c6ae75e5b431ceb6880f1b048ea08a28fdd811840b2c4861023311468bf3957ff8e0b9b266e70d71ba352d1b5e362b59a5209689889f967e603739bee049846bdedceb2b2c069131dd510ca69da661c98c744dd3b4c6a2e5471590a31c00c86087940957b6a2bcf1d2242a86a6ad1dbf4246e9de7b85e0901fd0c62a93b13233da22337adbfbfc2369e13759158916f1eea5030f1e55647c0023248a89d5911230a02c55cd74e89aa669d71863fc40158ad5bc79d6960b285cb5b7f1b6f7f9c7506ce48052b3d65a1c57f3e638b6641c37e81a97f66214668cd501858f060f40440909020b0727a61f5b0ff2f0bca61d61de629e2092e6cd959a92444d55e117415c617cc4cd67f47d41d2e4214bf3e64e160243a06a7188446525fd35859f5a5a4b25b8aadcc802431a2c489609122663430f5dd334ad3f1c3b9604d1b48b12a40a9215942cd961a8284b5d647a107372b3640b36c18b0998690c906f19cb5e0d4e474e7252961aa0bc2f49497caef42055770042f686c9dbdee73f0731a10c4859237c94f190b4ccf0198c31fe116563fc98d25f6ea8670162ea4b4a7a2283c011aeac43d7344d6b9aa4bfda00aab55a6bad8d529a376fca4974546dea5f4455d980260ecbc3f322c0444816141baeac6081da216d28fdbdf75ea4174854f3e6de67e918bb57e2e4302a41c4e4e6c997383c56c87a822a01894aa8ed59aa6ef0c104efca95af231ec4785765867c1fce7295f5e3862c584c3c174e5034efbd3b8cd060481ddd7baf8e1b812c2b1b665ae802e6c8aae08293aa05ca4910cf53e2b583c3f37ef8508483871f3eb471c12a79c20134e181ea9bda638cb115a7797328ebc28a540d63bc83078f0b045f85271409a18b3c6f099b22385c523a9d6e870557e238716d16335bb7c106295467a288140162938401a213064953c5218728426ac8bb1b330c8155250b1a1b73499634454c6e8c2352f8da7befbd5f9e57e3684c51952a306db40cb148b8423657c25c08acd1656103b653c4a4470f5654a6603172440b143360ba88ae06e788de3c67248c31f661410562ace810e60211657c30c2c215901e1d13beeacdfa6295030c3283a4f45c0d679ff6d65a5b24356f4e95ad74d6bcf98e51b4356ff5cdbd5e2f307594020e343eb674715a62d484e4de7b3bc02a226a86252e70caa4a0e5498913bef6de7befbdf7de1b6669df7baf0f329ee7795ed2bcb952539228173ca62ed02db22f907cc504302624402ab03871d92e2e37e9fb84bea0c1c14b13912629203d79524adf7763f7638c19145ab0a2b328d41d9856a0a0c694ad1a8cb470f9a1288626aca41d2892c4218143d7344deb163f880db1438fad2728262555592cc797a738cb179c242d48407c18630c638a8bde3c475953e4186bb9b06463121b177d6351bcb256942f1d0f648d54282f290b90e6b1c68512deec8081ca064b44c9148a4525c7dbdee7bf015345762b8831b911c60e1183af5c0dc1c3f36a349d99ba1186499a333a964ee0e16a51b2b51bc3d7de7befbd75df7bef8d81c48bde7c7b5ed69284892dfa228fe64ea7d3fd90aee55575b459cc6cb5ac948446f6e55896a625e99aa669adbf96a065457cd9071411a67352121297c517638c31c6185b0cdc9861e90a140f3ff0c06121523b3cef8521d600c1a5858620c0f81b2d558b0a5c74e8b79382990d4135440521883821081962a0e25eb26240c1c31262c9c8bd684d4129c95154f5a6c7152c562ad796229e25773a9d0e497a160b619ed523090bd7552642c9427901855edbb576b8c24a60e05825314794fc016fc2ac0d1d4ef28164e6e56adb7baf99e6799ee735cd9b27c9517aaa56f557b62d401b023d6e9837dfb915f712b12fcd42248b1579f7c5e97bc37defbd421577efbdf7e66a731f7e8062c5c8045858900177ed09fb6a5961e9291b994e3a9d0e4896bbed5a4dbb0f88e190d28586861e7078c6d9f882e978dbfbfcd7e09134e6684988274084cd1c642e182294309e5a47660862978c0185e9743a1f25c6b65fe1c235b659cc6c7b049778a864cc9a696ae810d5400008080043170000200c08060442499285699ad8f414800a569c4c664c3a9dc602a13092a32008a22086611884611004180380634a59a42600471c1dfb6c713b4833f5afd5912d46e3ecb860aa1994b4fe701c9d03d9fbf7d0f1aaf73a3721fc2c5a6dc720f353cadc10a689976f5ed4584f67f911df245bbb1c05828bf6b97e6bc5963b97596571e304aec4c9294da166ea9ee60bef8b7885ee827b15cbb93555268b85b72e1caa921e1edf676c29e119f616a3a1945edb7f5081f2ff974522aee0ab99a9d4d8c9f8ffa94b3c3f7e4c659042abdb51ca1d874290c96ab8c40ba6e2a6f0dd14dc06f2a5c50768f9cc9ffdf0c3c7bac5e93fe3725863a2eb7818d9274e005abd0b3ce1fea8afa200bfc00b31f07805e5f1e29a5c9fbe48a9ebb33cde5fbfd058007437b277aa99f06f481137cdecb6d262fdeb5ca9bbd0d67d76276825bf8b9ec1d81f0f70afb52233375b43f2186e50c74d044f4713b362cfd90700ef57e9298e5fcb0424b4bce990869c108893ea9a5159aa918ce4d0c091d2638fa6510c2de739e139f2b8cf70edeae09aecb9cf6cc3fea8a95de1877dd7c0ae4c59ac0628a53e811745dab7ce274101630fddeb4243adbb33ec0615c86396f65c293b94a2097613c4370161030c82534a4832c1285aa61a0f40317e0172ff07f5a117304965e4be0af44fad6e4174ec5878ceff6a91040acc32965da42149ebf0f5313bb7917ccd6b24d21070a83e993859a63226cde89d2c864a7f3046d2720c90a727133b04a8df2ac9855bd09d9af0b249645e7d4c44f1348df61b9976fc94098e2a60f98d70920c8c156ae7b7256b57b0d6be0e274557fdf9f4b6aa6005c05140e3f55f8a9f2fd34b13c4194cbbed49effa94a341c8ea83fa3835cb5e300a73530847414b118af9efeb1c7336990a035b7b752980012ccd1aaf4beefc9a9e66e51d7133005dabbc6ade25152400b98a547afc7bb23c905f095733484bd4c57fa2dcf5823984656bb92e3ab95489cd9ae7020e0aaab0213833eb1fcac026d2bde04bca15fe2ddd0e450020a6f4e7c515356504cf82adad16c46aa31bd4f2819b73749ec6c8c186189311fac0059878d5a883375820c044fabd2b1e8ad871927bf9009e956522b221115aee7128d3e4f271288e88851882528bc26689f4bfd973ee0ad85a415adc9f73eba60d7a32cf1177111bd8ac9a2cb12f4445f5aa6f0c131bb5259697b83ee38ec5f0587f430690d31b5577e036006882a0be1a8e2292d57a0f49a3ca8a4021ba4027d3b02d15645a8bf3d243fffd3aafa720ad4daef9edc0e21ddb0c746b687d2c27247c4b320eb311b2362c2efdd6a0a9086eeda9cbf093ac9a0586100213b39a8cd0b65c8abfd67c7c7bcc6011388f79955701dfc4431b7d1204947646795663e0ec270d2a094d2e19475d1b700b99323e1562f2c2b566f60ef85a605a99e32c26001925f3db3005801bc1ca13bfe31f67c725f9bf1eb25c3ac32660d77e1a51f2d5aea622c85def1cb7884876c33e2d1ebd98340c78b497af6b745c11741c6bf22cf1f73cf54f82985f10dc632b04b82d4d105ecf2590749c93beed9ae70473847c6c029c5969e2a2406c801091198ea62bc83fa7b3965980c1c6a0e11c76cd9922d3d9bd663012cd07847a6a89df6c71eeb3a06969eaa3844991878e208493693a522013a0db8284db2c5a3162025d6282275e4d071d33c92b4f482d0328f977c60c36090abcfa438d9556580e9e20a969688f49937ca30200487425996c1d22983e8923c8281649b92a306f7eac156a3e8db56be50d88706693103f07bbb90e84f3734fcb23406b13d4a9acc9110c2c584ba11f57e28f8d327916c537021f7430176235a7e9b6a56485c1609f0904b09744be1f67a15340a10f292f81fd156ca054f0a371cbc5d195f4121fb4aff0d867b9be71a0940b5e3c29155986869060fdaa46e163a52c8ec056d391d5624539b13d3a56beb599dd0be33e3abf164d0899cee373113b3ca03df7b7c0aef5c1976ad5175295db9d98ee1cc3343e6822d75f2d147279eeec2c9dfab351bb605d559a89e7bac37c541ce31ab1486bfdaf7607ba3cc7a74a54420319fa5e858b9c651b435dd7da2aa9ad273e6932fa9cb5c88c0335a6fdd432d45fa67bb875d152de836e3cd9fef83b0fcf5b9e3e080c25d0b6de72661d65ca3dcc8abf3597480bc84d4171f3c076dfe23abc3ae6bea51e9e2b5940a742de4b2e9122dc20df62b789a6ef5508c9353d5a82e33321c50bba337eb3a73188087e10ee0422a21c48ad2c7a623f3860131d0d82b75bb830e43bb37e9b8f33656600f510e4bf143cad6e5ed720467ec96b655358571a6bb800f6f7badc895193673270a93f00bbbcb5c5574eec031f3508bc6b7e03138ea2c061485b00f82e349c50eee18440538d0c534cc4ca6e9fc91582f22b8515d15b9994a0837c26459419411948273e4fb51a158db2ea04146a02f9c2c6ea54ae5897c0d25982f51ab6d00cb020fabe82d5b2f51ebcfbbbcf84822c21b6667d23023cad66afd36e830301dfb206b937b24823709d87af0fe19b9a2e1d34760a964bf8f30e474de9c5863b16ae96ac9f65cf589dd229d11daef39593458838e99a8c51fe6fd31b23ed088a02254ac8098bf12b3fca11d670598d854a6a5cd80d7bfccd057c4eca37df63c3cb463f56d3fb012f8be9c21097857d8fb525b7284545f5a7ad7c2119a9e34c2f008617ef482529816a16bafc1da9460cb4da84329846ba2bdf8f2c0d7ff55e78b5ba1a4f55b76b3a2feef87a9b62e1071be1e45358c3f66f9208b5ee537471fad28a9e72af61cb688ae5e86a95e7e6be2dcd7d00017c3c90527c1ded244b242ef72c173dfef41b115583369148cf9f467263408e41c8f505c2d9a0a034afabc25cecb577b4b727530f966e492106194608934af22e163a4509d2882f95db647115d1ee9d6ae62622d788861ee3389d8421afe16aba249cf323d53e26bdbfd87200e726fcdcc72848af3599c5c148f52639a3e07ca7ffb773be20225f873e5e5ab18652fbc04a8dc5ac6df0fbca395d3b89ed6c1f7ed42406d7b8d757768070aeb0708f81b6ee49006170b6893aa4a2e6838feea2ec529f319ec69068220902affc8e31179219acca0a4ce326881fd36dc71f36e34ed53915cf41e8045dba11feb100027e1b240ff972dce3bebeae0fec5ef2ff9cc686a26be631987fe4b1eb4c0bcda74dc532386b3758a9441b0e482a6b35f825f74e33c9bec95a55a0d6acfa32cff7690c1e5f566b8a21169c2dd7e59f7e1baf7c0ce5bf5204340a66402c880dc90d061c4c270088995a3017a0b17c815cfe6ff1a14039200260cdd8ff2afe40dbffbc5fe441235c826499862c074407e47b2ddf50743db9cbbf64bb6fa9f1f70eb420be43edc7d2bb884131db127ea1c510109d5253eff6ed53034733e79173ec89bf1a89a4884f4541e8a93b22c5351eb1f1ce6aacf46c8f4a22d7135e5169e035b092363e8f244fa552319b0c7493d919b8f691ec95c845030fe9de2bdb27225f293f85ee0a48294ec539648c0a907e3990f81ed1391f9d97753d7039c82901c5a8fc16d9203da4003d0f181210c0b23a2bbb71b588a804e9f3b37ec4969c7dcd7aafc4e88105ce08dfd9f0deab5cf27f9ba06685dda5144928e15c7e40b085bf72706f737db9f45b7335d325f3daea1196895a82bee3786e72d8f6ceaac63d2e7f734c352502293e897e245edcd49b6711eca07befa78cb3fb51c95b4f1bd184ed88b67d00a71a04f7f49a053402e408e6da6e0a47a39c8344d210e877fbb60ca7ae7da440c2e5c45dbf488f32a782499d42acab4f53778200952009ba20b3447793eca761277ed1b316a67d6a802a2096e0ff7d3cd9f2f07855485f4d79f42f55ab5a56547600dbf8ba237e10f003d0518e5c2ac37e08969bd5f68ba78d65c941dae5bf034c54637a9be30661204d1dfc7cc2305c6519cbd63b5ce23b97b865fb9f599f717262fe23534e17985bc19ee1e2cb9a447327a8af3a59eeab73a773e99d14445d89ebfd7d631873d38cb457e42e8581a864626c2d62028b0f8925b8124a9a36e9502228abd2255924cabb8e0b84c9d6500fef1a8009fe341560e11c6c89527f3d44aeb1a5ff3e9f53a2c67afaba56f597555146e28eca44cb6145ed1695efb7e65a4d410710b728ebb5099114a5ea2d41b5a870c745842e09ee9ba773c164a43faf39b6aafce8cb9a17ca262d60c98afbbf2c12990848a28a0f731ad86538f40468b96e33322945e33e4b14b86e7bdb902cc1eedc9f13d1d81fdaae501849a15d880bf2099f4923f768fca0bdcd498a53e49fa7fdf8eed6ab71f917af3e53d53f4c445baa1b2d2dc9d1b3e68646302b2743a619f62a4601d3ebf94830b3e8af599cef76709afdde78662d228e79005feda6d4533e5344c113db28c8b9097dc79f9dd0711209465952ba5d08167ca43a6825b19d002facd977455e918aacdba2c610554d44b11f7deef5d46405749dfa014b9991b162942b0476410c3d671fb73b597bcdd6a1d9ecf2e23e7db095a7761bab791656f24b78190f002500acf835e652ab38ee9636a6bbbb2b67970c524bebd167e3ba26bdb6aa6991c8d15e73e8ab78813fcec3d3feadbfbec6a9c76533f55a0b970147298061f896529e2bc39461af8b5035c5f55c37decd4715d6f8040ea3610cf6effbc9cb5fedc8e77f05c5c39ee0d7205d4ee382ce4154f1d6ed03eb9e0a4ba12469444ef299a066a5c33d50a750c88b4c89f16a20b179eb5d558baf1156dc875b3221d1f27d6d3efcf290b391e5763920a40afc78fc019dd0c701421479c6f9ece52288a0e64769ad6290bd25cdb1744f47beb7302b346dd71fc1732e45bd31333492ecb5619b0f0a3009409941fb6ee63280f90b26e217cd10c74e155d2a9c4d70bc6ee86e5888cb8daa1156189e3cd405198c76a8ea2c443cd056d7b25144509fa84d04ffd2664c1704708c78cb0220ed7893f49cb6e12c081646ef33caa0c42f9460837a97c3c71d783262e951c7fb9416ce875b08e0c4faac0188952dca0a87626deb84b104d3206e77cff636a980910dd7b6cd6ba3698837d6812e74a4d3333fe7c9f46f4528cbe6161cadbe90d528ebcd21f197f2bb4bf9cb380adb9ad6d030cdce4efc15413cbd4718c133118b6ab67e85ffa0ded70073cc340cbd88002cb1e37a9a9283ad829a9666ef0c08ec7644bd0b80e14b08d5e4ae40d3bc6ff43b0c425c647f3876ed15eed1cb8699c6fc40c1001fe296bd5c8390ac2556ce7da5213dabcf3be6e70ebb8ebc29b09bfef9c0e40388d397745af58f5cab4ff0b6337dacc6a9b2d7997395976349cca6c2e167a362977da96e13429595ddebb87e4ebda71a971714ef888eb75fa5c991840430438851440fb103da1236fe9f892da0be624e4be6ba09210c2afd29adc1221bc5ef78790e1f9f6cdae868792894029c5eb326f20b33f6c4ea14259e76156bdeb1238de0ff5d67d9606a14fdc73a4cf90240331b02164470f770990a2fd84f58c50bc1de0b5410ab1c16a50d4e9bf9b114dc86ee09483c27f60983cb9249e5e1821db420cfdf28622a934afd29cd768ab9b99432ce22ab5f508bed267014026009f1764f02413ac3b8c743bd4b61a6f5c0bee3386791c9eeb3a0de7554bea7ca1fe4f48623557c32adb297e8dd724d8a5f77a4d6f1d79093adefa4065702137d27fe780a44e6f788b76a84744827b516d13f3d3d4144e82ea4323c8ffd44ddcba278c4e940d5d089e1ab74cde3e090aa8168f4b171d40ba76543a70071cf27cdc2fb6a38928709ccd477f5efa7b745aa8c27862f881889f0dea65926695666e6eed4de0c032c31bcc3bc9c16556a69da796a421af91a0c89e18d7bb272d7feba646a367fd8dd26e27240a38bc2edfddc76d750b9f397304706fb54dd7753a327d56590b77e203061e412764f61f0bfa1ea056f9d8982ba8fd12962f4df05333bbcd94df9eff502b422a29eeeb7ef2657652d168860d19debdc75fb6834102eea0aad801a9a2d2242d1c84b7ead563eb271cb0cb17ff5131c117c1f779c23ebc257c7307350e30bb02c04219521e0a110b25f371bab309803dfcf651ca459ce5e0ed053631e26b3c2890857a594f6fd1813670032ce09d380d72b6a0bbcb923879f9886f360fbd1225fab8e74052974bfe5f72e32e4eee0bc1c2d128548b2f5e91698c87f082ae3d7c7ce462cdce8ede566db87bf3642efc4b3d1444bc9ab50ec9f0d8b7fe3cb04f16bce0eeb437f94fcc49279ec5d2edac2ca803eb2de7ac370982654afb83002302d6c17bdf31e3b46b9f951baa8671efab1eb5d7c4b5cbd788602444f9c8b92fed06794c301c155a2a558cfe875eb992e90091d63a253f1637ee9b01531da027eedb23ed237d9641d5fca6a827f06c30c9fa2f77880dfbd536722da87001ff8fd8198c625911af8639532acf93936d204bff3fb5d07bf1320f347a3a077ebf67efa4aa6e835fb03ada2967b33581f80a0b008f628b5fbfb3b08b18fa1c89fc4f54ca84f184130dac132f7a9243f6f68fd1b9891f4cff0c38bf7d49466d64785cb5e8271194be27a69dbbd06ca1224a8096b831257c66dddf05a9ab7618f993e4d7112f98d578d5e28e7f4d7c00438499c9c41dff52ef98f4c2439cb183532b762fcd3d930ab44e8c03e42868456b7bf6d27264f62f1c9ecd28821dfc8baf3803ef1c30748c1964a6cbaa5b20211052018500b0af182f3ebe2ff9e7ca3d0623ebd287bd07d8bc86ef4ec4d5a18c882e8875a3489e50b244f29829e926a2d628529e19e5fa212145e73d302c1664d180a4c5222ddf7e2ab6acd6f455056ccf0c6b62f106449c52cd099ae1ba4f889320e23380c637d9a1b955e1300d598d00db29f7531736c29c338314fcb1eb4f5651085c93b94c212e2addbc071b6386fd362008bae80e27fb32f22c9055d4472ab30d2f0c98f683420c5c1d3bd2b184557afe27b85ef0c4daf65c50b1efb7141b69bc6110c062893ceb7a512e10e3aade9752a4b5285577c1d70f7a1983ba5f31ea3b2e38ef47c0c3be1bfeec445a8c16b1a88ea66f7ca4ed98710b3783850e8541e1dbae144273e210c2da46201684a97288db1320c757c72d6d13d1d811c08f40bb5f12174cda92b9235c877c37b82c50f2ea75dba91a1c305f5f32b3b379c0a831c23d5d0cc0789b5674c2dae67130a0e554cf7c9c54c66688df795307c641d0cda102ea0b33a22d32301773c8a9d909852f563355cf27384e588e253413a102ee405418f681ac4ca363e38cb151f7380c2366fdb6b19435f597a05034be190011139c83aea17264b1a41f70063c6a04578194750ddc3421f7eb1548262ca314d936f1b90f8d5592d2e026d190d676cf50371be4c072c29bf6f24a5a106e881f6043623c3e8327f8e2c5d1d1071fb72e0c39ad3ee39714cd0dfac749e11d17f2749240b1d3ec8fbe279c85272d8f2241e34d753b0dd0a0d363d0b5616d0283ea49ea7f2ca8b62400b2f1bbe6567bd86dcc9c4b17c21241440c68a3caa501c724aba2be35f5939a7ab66eb7234a0449e6802d6e7993d1d7b306182834a8592a3c401c84c6a417c3483f9d74ca465b3fd79c14ebb0b9d78d3f4b80a9325676c6389e879d7e4ec789b99d4d7dc113c9168392af8653b197e0449f09b888e576252573f290d7653174c18ec16fe05a0b4832575b7af6d688b732d84cb74f19583705a8721991eccfe636be928f4dabfdb6ade4578cdbb44f5bdb37b1c493d2629c6678d85c0e7ea7be1dec89d43ba622bbfb12e613711e990d7928f2586175f8d9ad81032cc3d72e1b9b3a97b6328b4500cfbeb105a2a1f13610da26de3bb1c52481f8a85666ffff0efc7a2a40ecdd75ecba1bf409d7323c9ca9e4a5cf565f644166aa224fa7b79032842e4928c840710abbca10f04b21723c6f4f84583b55651f73ad29af38f252bc2bc148426d6988d05e4b8526d196e9db1b9d0ebcfcf175df45d35c6c0fbdd8c58c028faeeef6fd6921bba1b49705a7a2a3e86177dcfaa963f8750cc05798317e21915a9171814706f71b88d8f79c06be8a2a318eb9ac2520f8ba084b90b8646a81f972d0f9f343b7d9c5a1fcd1ecd0059b8abef65f7931b9101400386d970150382002621f1300b88061678613ba7b6ada24781ceb6fd1c52373adc14d184b8243b4568e50c1decb24c74adc328ee4e0bf7dbcad5bca7cca540d942248f19b07e0f19abbdd1ed7f265e62e6e370b1237df4d6dbf9fc08fd1e80ee232b9913857014e0af865b144c971e976c3935a2ab54a27431023e140bbc83f8b7af9c2c793c9c8a49d9a69b4d03e720773a982f3d5932c1535fafa08ffc3c8b600e13c7fd746c52af1c201bd4fc76843fca4e14a4fe632945c34c8ea93586e2c94401a36835ccf9228c6966dc10d7eba05a1fca8dc3c76e879ce872592dc1837427a712a897705d3fcd7712281b3de6474e0901b11eed200993179105274e16dd26e47408df914378966090540cdaa55011b1b325ecdad300ffdccc10133404cb6e6fa4ba775f637e770407bc605220466f29357004931bde4dbba14ccc19344da0ef9a6e41604d2ca250e7445ee9ab723ba9715554e814aa9bae8c7a8448eb971830c780638321358f44aaa47e43d13989c2703434380f65f195e33fd3971e60f75ebd3ee19eca82d964d2c314c3977ffff2f791008744da08cfac2404e189a9fdb9269f7e5ca3ffe09cf7cd6271924b5abe546e4b0e47a0c5b5783b2e022e742cb0c2ff15b0d7474659f65b83183e90e8b50e47d5a1051870ca683ac8180a69ded554e82640546c8c0d277ce3dbe3f9ec75d6fa9c81a7a351d4ad01729cba9188065ee802b434bd22620af82932cb9ce691f9e516a7469d5afb8aff9b18492c5307c366c227be8805ea79dfd6ed2bb36470393d7aa4d575e42a6ae51b335516c4fe49c9199b0cf5806b9bcf0a395260fe6058f174b4f5030b4a163ad93d37db4f66f025dc1ce87d4580623cb2d38cc44ecf51c52951e61a3f363806e9f64ae7530475e0682f0de2668df18d67b99b37b60f9189a2a8c32412ff818d579ff39d2a96226d27b7fb2f278502c6e37f5c953d25a8ca0152569f9d3980c9d2ed9cac4a45bb5d3ae7a09137c935cd019ade00ae40068b9e03dcc948c02d9dbb02ae4bc37635cfe2437bfcfca3ef19c2ce402f7c4bc0553121c10d3a64e1c931cce76c749590b6650e6f4f706271a336cac87b96ebf08956ff8c98a40d32dbf180eedd41d0ec2ab91acc2bf3a6f6f761ce8f3fa4f219a198331f37cb537c070c91039b90cf31dd5f1b6d6d726ea87745ee650396f76883391d9e137ee4c89a501f1fd6d169b66d9a4814ed181ca9aa0ce67d5faa749bcef29b26b8f615327813645d22b8391113d34d7e02246afea88d7f17662855fbdca68d15d747761792437cac5aad9b02681d996541f317320595a956fdaa94150dbf71a24e88b7d2a2d032fd85f48f62c60a9af59689cb7acbaea4ff51ea32d1330aaec27b832fa668a57314bfbfcc257f54d36c5feb55d0f516799c6b0629d519198e67e83b4546ccbb9409516d54d5d9cda8900a621862372a9bef74ff711b5c58728b472a0dba9876a8cc1360b485a0801fc2b6935b3f9574c47131124fc74a9c403676306506019b3b662db263fab7236291a74ea5ecae1fd74443544e01c3521bef682baf139dfeae781ca00fdc0a945b29109209ed8a2eb4c5a014dcc457f4144bb3eafd71c634b0b5bb985649871ab937df4a172ca8bfc4b6cc2306c96f9241273218cdb1d16266b124174cd7f455de18624ae6ecac7b731d3b099cbac5241ab9ab1caa528151679c5a65f921089ead80e797e2a93eda25875968bcbe3e61c95a2c62ab6b6ca01e026d8ded065411f7b18dee1a3ae23f414621e34dcec2464d239747ecdb6777e7c702e2d33609f29033668c11b290d9f1cf93dea21508304d437e2590428b9dafd2a5c823c4de1512113426268d4da7d7a10bdeb4bd78f442dd8801902d74e56da915464c9578a41bf2e2e84c9329cdc3d8e03e018f22d160a5fa71d4d12b8b20238d8cd8509324e0bf996b1cc1866eed5b82f1c1debade42a4137ffd240410dedf0c15b2327cd39b7780f345f27656ed99967bcd2512e6acae9bd88678d87bfdd821240fdec5ae8678ec86cfb1bfae72947fae7f9b9829a2e2de6e183c1fe253acdffec941f3a1507beab1426789e7d11f7ec08f0d2bf33fbeb1acd461d14901eebc06c161fbfb90c282d923e85c89d9a05d5b5e3802472d5d3239444c378d0ec22f49257906b5cacef6a5c0b3ef0890e0727986da17d0b21b955dc07c92367c30b62da2b345840fe3d9a63a1efc3e861d1a8483ba6ae2152f6fcf313d6310f7f8cc33e27bc62408abd3cdd7c4efa03f0e4c24f7c38e9a2478ec83e8a2e6d05c0b934ab160f6c45d03c59deac03f94d99666311ffc848fd0f9cabdb2c31c86997f7fa6abf7b8a3fce27aa72bf9a57e95988fbb4d19d0154baa372d54f6565ba5375fd001845cfedc5fee5bf68dd4ca9bcd6f2fc20e53a242b2437834e9ac64ed72c40dca23e62ad36a02f10b64497e3d9c5596d1bd4188d365a75e26e88bf07ddb7496e759a99b3c01d1aaedbbc4d0787e2aae2c0e6356e6616796f9b63319cc5c29d66242f9a4ff4a75861be0878e84e6f2303d8742ddf15be0828f4e03aa0a838c5c621c6efb1b88514b8b92c017e2dd6506648af637ebc2c3c23cff827bf1a4ef8fb02cd0852f3a12311134add6a01750c27592e6183c00826251bf7effaa800a1cdd54838f8ca0039fe09dd108fc9ef60f83f2d5caba096c425371e5765b0d0495b872f5ae0ba4edbfbe4bba3ae8b6fffb1dbcf222e0b17b06941ce99657c72c0dcc75fcfb459ec391211a9002e36fed0807ba160a9595d98c940432f50de4da88ad4abaf6af3bf2bbf530e761a5b52e618db167a0f06a16e76cf07a06b349e35391e9f8a179fe31e8b97b72b58b1cfa6dc7558440ea558b2f070e1a7928a4d852e389b9f2522c7be971393c2e4a8440a5d0c793f84ec048b496c23ac5527480208616626292058196f5cadc124491319965caef163012fdd9e4315dde12a0bfa7fc9969005bb635520efd5bb469e54d92d60b3f7eac6c98bcc98c9e7d47a9f69dcca8e2cd5bf067c4ed8e53b65ed813d13718c16100c6459c26eb4c9ef58a44729c27645a9e24417a162f69efec1b2dc85363fae22803c72bebeaaeff4f8fc8c2d2667c16108cd886ffd89d23d9f0b37ad9bd16eb63867a76f5f374ab37743abf043dd594341637029e59df7d452344df405cc66d64b0b8f70e7d956da00c73952419171d0ac2aa94c986cdb102fa910a9aa7a0a9ab8266f07b0cf0737879dea738e1a74caaf3c0e5a0020a3b6318924f4e0929846cd9e93a01a933bef3bfefa196e3db2bb2a14a8e5f5d2b672680e4d561637b00d6b3e154c3675df146fbb2238f05744bf11a3fd6a8ee134810538be47d1a6b2adc598c4e7b42cf5a723ba92c0522dac961a5e1065972c967bc4c338538297e648b468584f023220c5ae89b819432588b07ac6fb925397c8bc29a627ca3618363e63c09aed2de3b78c9739eeb10b4e1b65c8ab3b4822beee0831318742afc358254cab720f6db7301644a68951ed39fde288b4e333a7c5533c6d31b8496d4bc06bef6c823560f8436d88fe1a404d719939c30d8cf3f3d82bacad5cfe05e5777cf07b6e1655607e01d747693ea2e0b6fa8e954636c976d5a86d0d8199fc48ad6cbbb92b22ff4b0d7f9a4eac471d5500cedace429c7c8f445601eb6f3803c97de6bef0e005e78793a0a2f5b1bcf347cac45a7ff0f582c75a5ebb5bedf53bb7aa8f0b99f9788ce011192b66ad55fc7be25820d6e4201f306005bd15c1abad68abf5faba4d030ed77faea8946be0f9d2663537f634e224519d543038d8c4e1a19f5279ac5a855aa7900452726107567d37155810954cf89da0f15070517e4d5654b3121d73795383d5054d7111b0bf7b7c3c3657821924011c391225bb39e59f5399161fbbfa68abf1bb5e1a0f605fa759171ebecbfa9be20addf42be75c1ee4ebfc5091da8fcc5c3a9acad05538cd13dfb380b521b869a215b47d269cb70c88870d02e460ec43b84cce70d5da74157c892a26baa0cc93a7a16bc4369f915c9e5bf5d6cf5d5c8bca9b41e3235853428547d4431331f4e4726e88efd8aeec2cf1d29a01b2a4132ca9756c858c84ef7bbfb3b382a5f96b1b5666ba0d04b54ae1b643644feeb883c6d2cce846c7e5966f52e39821b4858e368185256bc9fa864a4b03bf69edfe671008487010343996a6172ab7452eb0fd0f7cd543b0ab90ffec8b3126702759192608a6cfde1b629a41805ef0324f59ff38197d2f062f3aca7a433debb979d3fc314d8870aa16b57cf0676b32b5842502cf7c85d76611a32feb0ada1f4bbdb8392c78865d00c9dae9dcd2840468b2a450059a18411982292ac907a5eeb7a2116b05cb0ac18c5cfac2b94f503b2e482dc3bb502fb4272260e2d6e56c6040f747efe7aa01b87c69784948d64edd178617e50d9383725c3c14b5ea01f7265e2e2c8c49bb4dac227ed2a13e3fe0402b511e5b0fd6e29283073f264179939f0dd0ae1fb0caa3c732b80e574fd29e4c0afb87902a959ef45a5bccde426698263a6414252595dfc42585cb484673515e0301b5669c1f5b2744a1808a26209463bd4a0f523f37c29dadc121e4c610f88d47df364332ef386a261da7ceed7e06371423b25a17ea97f3d181162edbdcc66524167f05d7f7eb802f56d64852e773e5ed39bff1b684e18f3763b076b8c51503890f2a535695921075032e87a02c6ca42d6b1784d8a3e11ad8373f3bf01dae7b6892874c757cd2ac5932e8578946032fe0215a4b0fc54b5bba54ee33038b9d26901771812088cc44c168103b69164dcdd3c112726259192846b07867ccb87a432f873c63b82b725071b5eaa1228c9e9652dd5a63eb8beab10f5b1f53d6ed52d89de3b7901fbf5077f992a0adbc17bf149a70ea4879c87e592293b46b02bf0724db3deea840938d62eef3dc696390688c114a83fa48188446fa7800e480560d1347fee5d654bd0f9265028fc8c4a8d3300f73ede70ef57b5f72b481479be1848f3808e50f3f68087c8cadf4660f1cb3326a23c1543e7f466b4fe4ef467667671b10567cf52d7c760ca904dc629eac28a3a4c8ce84e4a0e010696eeb5c1522e6413fabfca2634916ad2298c0146ece20e896a4cf6af137fd595bf8c40d9738c17cf05c85d41c6401930a895bdb7b1bf52327a4372be7274fb217916ed76b6ecd4e3e6acc9e1763de4a86663d09e428614aae7383fdef23e9e26cd22f05385e2546e5e4b57cb3149f46fda81e73c17017e297ddf96c6ec447402ab1e109bbe5f73fc4b88e11cb81c7a9f68f2267ad9417dc1d9be687d11268b91d939cceb2cc55266336ab17fd905664f90ee5057d437602035c721fbdab6ebc094384903ee4aa959ecd80b6525504d9420d8883f199f9c9b62bdba451219817634774ca2c284a1e29884cb5d8a237c8a680c8c6795ae7a9f10cb10281e68e54ed9892ecffd0788714ea12a5d2f546bb8fe3fe8a83a80b5a345c5a3ab6317820527f94750c94baa135ec458ab58c242a71b31a24eb13e5ab0ae2094d117204127cb8f8945bfb498d6a234b2c6344298fd3c72db72559158b1b6d963b248539ec0bf0bcbc3ed296239c1651335e70b33357a43e28c1bc54580e3c3f8a746c229c2c9a00b8ac574f52b6aef015d7eb8154b1daade382d3a8de8c4e8aa5d14224177122e80332d24a4c81f4bc382e10ff697a46a44d99b5b64360939ddbf2ca120c9b782fe030a24e54cc7335ef069e61b558b79e02f0891b72932d60dee2a171a08d1a2e45c1435c5a8ebf2121fea39265c0717e460d049e28c9aa992524c002242340d56fad69428afe3dfda5943bcb870225ea958600fddd3125f0faad281f81e0024169b53f6bfb955ab203fc14993cf274ea0a97afb5f4f7f6f5d4f5c7e8fd1dca09b81112d334625770e42a82c98a3e6bfea9624a14d740cf0797102b4c8800a94867b5211918fc54c3da4052049ba62d5fcfa484ca35e1723203073d4ecfc375e77a5eb42059e84f3b1235532888f83cb32c2102f8096e958c90b7495032fa349700ab65d1f6aa37701c5cb71a909bf9425f9a5c65d71b0ef82ac4c94dd42af1f7bac407ee4c98ee527838185e817dcb9485a6fa6faf5f1d60ff37998dda26ef6733b0129efee9ba0750067d3193ea38bf2168e437e7b0445fb6785ed3686196002273948e9ff380d806fc09f8047ae011a3d0a05b6019ce9617f28f2913de3956d5fb7f5823b463e9e6172323b38dcfaf7ba2816296adbba8fbca152b19ef5f9d354c24bc5a85a6813a371890619a224d65dfd849965d2a4056ba028644a021cd043502db2cb01876c1cdfd1873a110b2c51c03ec6470160cc390225f545e63252ad2393623646520593dcd54c0d4264430528e0052599e79a25d4b3a57f7c365992f7b55ac6cf65e94ff90d47caf8f6889db5ee643d466b6deef53f2db025151c9e11709cb55d847d762d1b38a573851ae2b799f72d99b9607724dfa74ec817b8324fd0da436c397758a27aa855304716dd524397d0047297e47118b86e371242a5200291b08f931b34d75587edee7b6c5589b1867e130eb11b1b5f9ed6b73691fd4a74c823a262dfc23d75087f65f1a9470409b742ddf52019617d19488de98a5c4e40e4e3a1b50f33a73f5eeb473e8e1c61c04deb7f1801e012ec1781ec2f475f49ca830956adfb2de0bed1d309a3ab1acdbbab0adf5baf5037390894dea3f9f115768da7ff461906f342e912c90843e8ddc64f9a560c06cc8a5cef53c96832bab865de6d88092f2b676d0f14979b586882b12a0837fbea286b507394ab28106340fc23ac4109dec96402954e708f64d015cc5af19cf334857e62a459e1e96a99aa3cff924efc56674fccc3fd7accbee5fd4ea7421b6b8f646b1824823131a9f9caffd39a97d4062828ee5cb20d042844310c4434664cbae92d9385f548d8277132ebc1d00d20c65b30e1bd4b47734a24d8ea2057298db0d65cb36153d7ccfd713b75e73f986097932426731a3bc89b24a8b1bcaff214e2f202e431f243cdab43a07eb69163ca3d8bfe77b583cb931034b456adabb7c48f21bf6875de86a250aa6bc6619c56b3887ce24dee72a5e4615174bd271fa1f8fa3f46d02f8e4479227712a48d16b3c9607345f1613f74bf8d68272c2d432bc554401755e36d354e5caa4de4baced335bee1e48523a2dd904d4327b69a00f8829379f741c3628d855c5c26880a7c9ac1625830cef88506dbf9f9ad696539740503e66868ad783326bdd4b00364639e160d2eb996af184d565c2f54bd767db5ef0b8ab229fea668f9eb816f1d016e5b4086ebc8e4617a49ed0a5eed57906394eef4657d47f78546f4f909187c2d09b6bd15ce89ef05c83f00bd4dd3e8743fecf1f04383809153bb9133c06db4d593fe43b143ffeb3d2941da839e7cfd8851ad7b929c2bda63d3807e6d40077b9fd914adce4a0e3537e7eb37287b32d20f4141f2f83cac54d3fe37a06f9b7d6601c79cdfe9f53d6dbd234b7f5a1f3f4dd50773acf7d4da674c195fab9b441db9fb99d8715c3481268dfb84128e0c5fde2bc51cf3cef82a0c45a95c72f848be81691ec7d028fe432acef4d4ea773ebf67c2553b83d787501da04c21b4222e9c4588e3ffbf572dfc7b178af2508fba62d32642a2468328058e55cd64106a6083da52cee1d56905d1f236c0ea1b040474b1391ea2ecc8d8f2fd813505e7e5a6d27b425f4c4cae0d5eeb073172a24bf10fa6e0b3fb3e19370bed5a4c4c3c75ff0cceb158795186cc52ca24b6c8dbe18687a043eef33bdb85dc8f1cc526b1e712da88e074a02dad9b871632200e8eb5477d851ff657ac6949cfd26fcc7737e739f1be159f81772a1ee4e86e749a72d772ebac49c1919362dbbb860ecdf5ffd57dc62f9e22cab3a26f197f60f4631c8f51168a717b6c243c637af2eefe56975b1cb4027587e9eca645e415c773e5fac0f05685e009b1f4fe88dcd8a5a7f846ff7484b2fef85e022f3cb8e630606e238d4947760f1e78cc9747d081144d0e9ec89f62843b1874ab6e8031f08f379d5b96db45373c0e920da382fdf06d1aee668336e17534104736584428b97532b16adfd63ecb34b2e8d55ba39a6438a0cf26a7e6f84f341b98dba88670ed1c1f8b1abc8b611150429c385300cdf6e5441c3684dc6697855aa1520a519cac596a198bb9b9229a3cbc5945c88f587f381b0ef93702e4d4c6421398ac655a703461527358f1714e2fff1410929683d89391bb12c664a310f20ac2d336935b5e9821f1a9c3d02a2b3dfed295207a78da4c7e4801806d2feefc2e468e58615c6accc039bf199f5bddafb48f36d85a1f191911681a67e935267dde6b440c431c15568aad2e42a2fe44ca1acbb3c39618e286824596ae1f48c99471472663df7b3ed4e2376adb46a510f1b134090d05b9c4bec0f9a2f2cfd0b75a8c014d1ccdc30970a03b0290a3527ca6e682df96c5c194dcc8a40024858ec67389be72fa58128d413493cf9823c18f8b6b943579da2dc30e0c82ba5ad830403a05720ca98afc9718caa0d737052af561488a85ac590f88c5890d15de7daa45f88b744ba4ccb540cd699c799ff3f7d18cc625bc699d76600d3cc1f69f73e6f7624434ef07246c42d6e93367f60fd0c87acc0fb62020ed31fab165854eb1df1261a6a91b9f68f4b358417401d390a0d4ca4e147427ad9d49f4d2486256ba45bb2ea9d2999951096c425202a49c64ad6c6aa5d3f0a5d1d3404ae942d8892bdb3628d56385fcbcb18e457d94ffafd26cfb0f3161a0341a46398d5e77e06d5acd30a44c74a431f822cf304cf2f9e2e51ce6b38292fa96fc82abfd223779772d9d9d13d3ccd7a883342d038fe3fcc7326d67894143b9a06d0760e9ede01eae9afa71a863b742229a01750cdb602108775009a0d29e31af6e337b7e06b983fec99caf0ef258e5246695c4a2acc98f0cea2cc4c9b6a701dbad47075cd8672d98081edb24be31123edf4b5ab40b9814c2351aef153bb49b742e7a764cc0235c545a1e0e40477bad985aee064e10a5b60a8c02c64972df61a1343826e4034ae24e810a8a944c6941fc5f98ca2b819c191aacc13dfecb8ebe3e9f096972191d3ce4f5acb422386e7839fbef79ad23642be4eb28dd15754171391d83e3c2ff15247de8418b17c910bedd399c5c0adf99261385d78818ac721ced57543791fcd466b2b99443d84f76cdbd8bcfc45ec1f82440aa4739eacd847a4f8faf4e3eed530dc1d86797fd31acc0eff874b76d5ac2aea0ce51b06c021881e36a9ac388dd76ff7132976fba26e57044817a2b42a0f917e5398dc0fb1c695ed729cd2b4b476401c83b2bf0b3feb774a01928cf74339f7c9ae65c08bfb679978b4f9b9c0be5d734c765f16992ebb2bcdae6fbae501c83a201e083d6fcd6400ffa180041fa216a6b34b7ac6e48fd93d77848caa88217e14d93bccbf0d736f7223c69927319fedae65f84276d722e845fdbdc8be1a749dee5f06b9b7b21fcb616855e323b4829c4f900af399e0c26a4ecddf14c7bb8150f49fcb6bfd7a91e8f3d51406e6e250e2834e5760b6267fb7d5175e988bedee3833c1a2083271337a1dd263b403aace7f2705b0b583ec00959c5b5e9b30170c7e64de8d3c01b5017fbd83e0cc61ca2e18687dbc96219d2fe3f451ca290f6631d7438caa5b407c244a256e0592173d99e840877218bb1b7534904b041df26e7a9095b970cf5b5380998c451eba1da8dcedd4f63f24e0fedd884a5357351009789200cdb52132b1118419424d8a4aa1657ac8b18b4650af6907fa6084ca92ddfe7e15a0074dc9d5d5b3272c14b8f42748c7704cd07fbc142438aac554c311764f1e4bce270892e31a9206a60318c358d2d28b800ad1418b8f9835df8a2e6cb181568e5e2227c08e42221b5f0da11cd771a47a79bf1e3fa0af163a2541f73cf1dc42a2d69db33b2657f37c0abfa5deed7c3339ea2aa6478a41340aa839c668ec0ff9385a33e97705b077dd847a4c681175531433ad6d2a8ce2d210514bb31e2fa510098611c2a25a733d2222a388833d8179ca81cadaf03958918c36471f7c9ab51de33c7fa9f89bac4deb64c0b5aa261b8623d5c9296fe1b517466aef4b8b600ea0d5df1b7d0648b89aeceeaedf015b679b0543d20288f794261d48c4801b7d2aef6ca49e654d46d65020205544e193b82d401c52fd4d74e974ba51aca57c6bee1d634a4f6f98971f55d39a2f147a3a1ae982aa931316ab40793f7f06fa49466738fff0ead92764fe9af5cb0c7df0a058a760a4fa364da08afd09326c31bb66f1a647fc497284eb5f9635e5fa1ecb3162e1f5d938623baffa77d709200204b2d1f09ab92a229324287cf541cfc7629989df02879574c002e6448caae00716803a765d564c4489c8968093962087bbce5d2effbcdb46ca81c0b29dab2e149dbb9b2aae4526bb0d8d08015ec85a70df005f88e6dfac1718190b2a7982649f4d290f1fe6e3f24aa1f982991499a61372538c62b354afa181e2401c3309ee4c74a7418b06a0f9cab433af2d78585b2d77a670c1359b83959de82c054ab8b7bcda019a49788667256b24ebe139b5031b3181ad996b4175b9ba9b173654025a117ebbfa10dae93bbef49b7678492c7307237c644f97638c3fb2c7cce41c5f11cc806c1bdfe033431226a2b02ec0f014aaccc2658808c65753ee887794ca694e8423f9cfbd4c4ece32e98109d849767c208f57f7a20b2a36a27aaabb7ddde7efbbf5bca2de516bbdd8cb2a6522314aaa3e295ceb100fca62b420ec5ff127c3b8a0c0f72a1bff79649eeedbf0351025b025a026f6268e5ffff9fd781e8ca396eadfb7bef4d14aab6945834be0622f2df6ddbb6d53d6d5176461884a907199617f6c3fcf803dfe083fb8f6fae32e7d9ecb4a21f56a8a6011b36ae54a4483ae3134edc6afec0b6b38b4ec20b50cb55d4193c61e2b922808cb297cbc11471385c3085294995584498120cd4562b51d4c4e25eebb4de75705d01c3fb69e50e442f0cb9df9e4935d132a6c90e273e58453eba7afae8db4b18c41d9f7119e7575ac877a9b5843becca06eb040703882bd1f31423a2c4b4b35fffff7fcb8e6a4b838e1a5a8edcff7f991412e66a4bdbb6d73aad772dc76ef1ff0753e27153c4d6f0fa8f10aeb6746f99cfb88cf393deae4247d5961e09317121a759e7320fa2a044386760a1e8888478bb98ff7f1bdc6a4bdb9b931b971658064b0f04efb34940c3071c9878c2460a1fc6148814b034a958b105335d7591e05992a8c0841cd630ab08a786cb9009e0566dfdc1546d6993eae99afc80baee2fa938731c5e103c59d213970719e1cbd26b9dd6fb8ff6bb6aa1a2d4b259fa709e67ee30b29e999e9b9e1d18b11753b688ae73907716bee530eabd68cac06112eab96e07284a515e721017940e5a62e318600070062526af99dbafffffffffffdfa486e7ba2f67e011cdf5b48551cfbf9deb3e957be1e7466ff8d9e19dd66b9dd63b8d98dca6bdd6697defd86b55fede9459ccb5700103059100ca6151435bc128bc69adb5a681546da992d9a432a1f1dc3709801cd11419a97cc410ab8eedb2ea88f7beadcc8a9b48acd73aadf79d372722fc8f5ba068b760e432cd675cc679ac87ab7c03a087d7691fb772fa729c0601084b52e59197306e83395a6bfd83516da99058346af821f78dcb6262219625bb45e1d034d3b0936a4b9d26b0d58ccbe27516ee0d423899558e730acc8ce45c9725967363bb5eebb4de87acebea1c52d5b92131df2c0c8b43e3103954b63a5e78bbbdb78f02e514d15496964d09adb474a4d911b31122c0a30a6aa5a39903e5548a18c6d81c60d4c2c0f3feff350002755d9479ff60e8a68eddbd2c807205a18e63f31cb45980e38172ea80e2702369dc61f0b0fcea529ac12569f5b071956630fda40618316505e3d4a1e10cf18bc90de335286629bf303969a4a59fa0560dbb65fcb5b296f78efb4d8750913ab12179681e9ad18dd4ec5e0f16f8ff1f958472b4f1f5933ad2cd2870486184ce0d0f0b1c9d19379528ecb48fbb5419c8ca393ee332cef76e010d76307dad7327d422de17e587cc2302dffc684664176ed148cc69bf7e4db5dad236e40554c71607282b142b4d2f58f2c27b5261c0e0bcaf91e40e93cb663ee332cef70ec26b715789dcf052b644dc2aa3e8fd7befbd37ce8ab473dd991e0be2098311f2977967595ee54baed73aadf7949c0d01cfff9f6abcb1501c0e8703538d25e9d208d869f3abd26b9dd67b8d950dd58a0e6937e4a255fc18418a6ddc5c2fa4a6f96b041a4b44950b6417120a9ed110d251cb4bef3d01a843f3c73414b6a4581a71926355b2c1a56778efbd7f38af0cc115756f7c7cc6659cb32c8ca4faff0f3210343397e0e2645e35873a2d645614574b4e6f5f8ffce81ffda37ffa6e2375f95c7867657a6a7213e5b2650e2e24e6332ee31cba6514b5d73e2999709284fce1a69a343ddb17215250848f19a2108589625e3e4098402c7e9a39421f36da6e59cb1f37bdba2097693ee3329ef2bdf78e4ff413ae092b2149e3622328086a14032a0bb04107af8210b64a00b942c842404f3d27c3744974a4aae1332ee39c050099f8847f280fe7f602219aa4891d3d3b4b1a06f149b224aea0f27efcbe5daafcbeac1c350231874ffc349491733a20083cec114154bd40f142e72bc7c615aeb153ec8cad621c5cec6d356a715f9594a815e2ed9ea61b904c71c3df1cacc94874478eb66e86b8c4f65cb709a861565bdaca328b02de7befdf7efdffff41764afe4150c10bc22b925163a85de8707aa94ba9898924415560ea9184d4cce05b0c875a846c5286e804d525732f452715882a96154bf9cc011085322fcb12ec9764787c3a179cbc9e1636408bffffbfff7f15e06a4b5b20b90f5690a0534f9ca593098720ff6339834df3d9beae0865772a4f5165ffff9137aa2ddd713520835cf7b142923940dc0631a3d39ff9c9be8c2b452a1c0ee782a158922a9dcca20757a78bc57bd6f33beb60aceac6c0d691d6608b897b50e00b8785881d2eae5e340b2517c5850dcb6a82a30964249dc155e07569467eccb573f658128452cf6a524ee96321b7c205ab434ce67d6560ad55d47a11d3e22135215844763ed72d89a67084b5e3464a57d30fc0fa1076a9b2ccfecbeb9b4bbf5e8c50d95281830905cf75776b34b9f5de7bc76ef1ff3f7e934a45aa2d35c4a1ff0627ba65e11b002cc41694f725f1efcbee6eba75f71a43a8da5262d1883489a1b48a12b66ddbb6ad08461c1bad1db20f9d5ef25d585ae510beebb3867059c307d62aecd7ffffff52b26a4be77686dfcc4ca4767a6d1606f386aeb6b4dda95573d54cd54dd54e55d0ff3fbc329766184c4275cb90f47c42b920953f3d29563415bc8135edabbbfe5a3511d52770fa044f9f00ea1330b51d9f7119e7efd3d97394c9c2a9c9332bd60bb1da5d500699d1bbbdf70e2a1015ea20236222a088242cbf9a101b24fde606613578b158a761474c9ac731b9d5f10a6cacf9ff1f6cb53ef28ec023cc5178247e4d456baddf57abb429ca8e47012f214a2947ebcb35f331fbfbf5a74e3a27d9fd7fde6e65f9e5782bfd786b0b6dfeffd61c7c8b423752d4474f0652d44d4a4b8a8ace1488109f184827514957caa99c9e3143078debea5cfe432f40b964ea581c5469c034531c5451f6878aef30c378ab2d6dd3a76156530b0e0b292e2d234b242ac34e589816942c337c24391706d86b9dd6bbafcf3a0c9ff83d3d071f789c163a58125cbc9073c1a66749509cfe0dffff8338555b6a8583cb2488976ad512198990444a2226519376d4d2b66ddbb63e239296e6d76f5ab8795c3c2f58a759f62be8ebf875f4c5f4e5844107d8371597c532ab8c225234c3d46e12e7c41c617657f7be28ef7efb6de3d016582006385da240275891a59c7064ac8e1f9c3c3e3271972ab3f2fbee1db33a56703a51e6d5512ac913fec7b7904ecbed7aa8306d8a52b5a5cc26150729cfdaabac41d6e8babaa7e26b6097ee16d78db7036956cfcb0a072e65b127cac87523dda004f2ff7f8524d76fba86e3ec5c77fb1eb8f7de39af92bd5b3dcf3f075fbc0ac0f4ff1f9e2919455f7aadd33abca2b957a8ae070b9bda990a9a3a4e1d4d314d3941591d459458f88ccb388759c050d0e9028cf2a8640d1fc7840018131a380411c37024ce03c59d00140005096c7c8468880c1d30201285432231200c120980a0301800048202819010000a8d87624a567700de405ed00989eb67c6d2ea2842ab21b96e279de6e208fbd681e1491b1283f0a0e78ab5d7a4c142d37e4db17b50a9c8cd5d317fe94d8173835d8cac71ebd41b70e04658a392565e7a102cc4446ea152db9990e95825fe96b29a6ad34bde4e88d8fa6d943759375627ab07da67dbf585ce37a1d7edb396aef40a9d2090df600f722dbd2b9d3c6106d3bd020956a7469e2284d7994a5e6f7754e97c825405a236fa176542470057311b4554a3c8bb54b502eaa7ec14e64fa651302abfa8d5f9c9806252e6bd7034ed440b78c6fa816ea728c64c6858c95b266973402da9790df6960a440a1267d30e0f0d69f3cb0eda3832ec8129586818e6fe9dbafb1028382f46b37867ad8439e1fc911f546fa111ad4ab2be679c2426bcf77ff43f2fd0ac0242659361b53584f84720796d46b3e78ad8391fca101eec8823f983651ec9cd39c3b5b046b31e1a53afe985ad46646521545f49af9e196a65bc02007b0eadf8942b6107e5184d64efee89f3316a3505d7c7a679bdc0ec180d9d5a239aa9feaa5c2c946c5f4095d797cef48d412942655cbc2fc0228c4e3e35ae699875e37dee2fb9149dc007c2eea6db15e2603f5de371622ab9c86a1dc24f4879f09e3da16eed8c73aea34d7e6e23e672d7ed34cd1b8d493412a9a2f5e35d8891372c8ef3c14bf81a19513daaf3b73e18af59268c9da82bdf4e57894c28805947ceb54bccc61ad638e1a51b1898db1886e603d95b9061e2fce17050dd53ad6b07d482e3f06f487ee343c3ce2792ade3f5420cf72e5e32c2812aaec9fad357021667fc2ba3f19abcff02b52dc6eff12a83594e9b225b42796f0d644c8c6164953b5bee5e68c6fdd635cb5ff862f088729455d66410db812eaf0c994a5d6e999369835e57570c849c291179b9169c1a011830e0a45e5bf4664ba8524706097ad018633550bb77c030f3f17db693af4ea69af7d2831c90b9fc3c10188f183f7e53eea659d84d2bd2583bca602d5c8a5e43605f4014ec0934bacaa3746746fe38ac3ea214719d123f5f01e73a1d95780a1405409fbca8fc57b7adb082f0e4cd89d0c165e2d2df49b8f1586a79a23ca051695fc4db68b4746aaaacbdc0d94aabcf512d2e677b552583290e05c0c31cfde3a0506164ba69e70da224966d66892a5aac313bbdd880c46773dbbf128a88d8d69cea105ec38ee965779d16cd0f14fcf6ce0a3e38d4f2934a95e86bb04b874a510cf4bd43ee227f1c4f42b17d2312ee0d15369c4f1ba9852ca6e1b2e1b77e39c0d70dce3d78daddd218d0217c302dfc7da58b159464dddc9065519dfcf15530205202b649201dc270d7119189c3f84dfe6199b8fea0b92b480887dbcd063f90a71ec460f84e173cc83f78283ba7c1a073ac7528f1363e822a2d508c07ca7be4971be715acc5517aa3408c72e3c8e8e64afe242537ac53a00edb96672e70c43a92321504557c2f1900bc3d8faad334b5c3611c2d473f0301b6910b95c1046f9444e02d02f4346ccfa8e480a5c5779c88d016696649f2ec4b059b8aa954b550208ead3381cedf55d7ddfb7dad484ab59885039df3dc19a9107f96f8a668fbcda7d5c15d8355462df6e8f49a3ecf45c5bec972e8d22864269fd82b2e8dea1a90c4c147c89f3452169ff150f43d2eca540bc19c3251266ce9ec86ed488cf6b072d52a4d84b576df849abf702476c373ac30bb0d5070677f76a410836ea9ee5586eedb817f4d3682909571163027fdeadff328fa0f760696841e2d3c84c01d84c2e45da7b341f4fa8f4505134b56355f93543535b48075310f07517854327058d7cd2a330670aeb0abeb9075ebb8c6df4043175ef473f25e6de0e17a2165d68694a950f37482b4bfe1d9f579dae009548e1e743e2a42ab96c790a8e5a01731c5a36bbc1a0f667639d90e724e0d08eab5d4dd1fc8c904295f49ebafc4a2d61471e7304166498bb072bad3d098e0f67cad1d0a11a8b465f7d89d4a86d9682214ea24c9d65cb6cf07b1d07a8a76e50adc6267099f7a99a82b25971f580c0f8d90fe6551b36dcc12b2f57b87586e6603dc2444e2ed99073de25df5683db10ac4cda2d04aa6a6fe91248df88e39a2208a6fa4ceac4c6243ea9fbfd004256a163b7d026d31d52846ae7a2e60a0a813bb214e909b3b6ce75279a6827f63e2830f64e92bf0d9663f947938058984dab5968baec7e58e0c2e724590892c92073aaf3eb135b2670e9aed3225da1847118957075f019587a5daa7eb40db8f09abd790c7e6f4aba1566aa97b167c4794232d4603bc0087a284a939adca2ebd56fb6fc0742d64950402b6907f031170ecfe8b4b086e4e5a65e17fe9a084dd29a809f247f654efba224772f4cd3f40efc0f2fd53ef2157189ae46d9b146168972e82218a5a373fc5e353c6102792ab690c597478b338a6da6c5a3142aba27304a8eee7a88fa327289596ff4bba9c2fd6dee19e6206880701e526ebc988b470daec9070907a1b59ac7d41831a82378cd3f120e339ac9e03c1819e46379f9158ef2cde50b9ed414ccae55ea11f1c5fd3950fd4fefd359c3f20013610d407e8974ec7d354d40cc446c28be25b7d7d63e380aec27014379316d6919869457fa88c5c21102d5939e9b02aa1c5d9aa07d61135d8184179e954b83b08121d23341e06202a78ec7e07528bafe260660a130f5f14d1cbf948865c91fc80b228ebc9a8899146dcd28efa643a2a19400707307f048a579158978d167a5b60490c29c96acbcd0e9393f6c82bc9a47ab906d7687836ec1a93c8bc89c03ad453bd7250fa1bc6428425286318a71468458e67302e7cadad0d6e75768f96d4653af59a5d685d65a28fa9f3ab7bed20786b1561527193d640f9026ed086525c480fa0d2948ea25250dd9963634df8a58a30d02de3284da6ecaf42c41fa29da42a3f9ff74430c10e5a5fd2d6543f17a6dc9cfa37f59769b6305c0d3e80a104796a57b44e30088da86bba2328cd44346d96aad6f5fb5f54f00240858cc354da859c5c60505c1baf465111b210db46f8160919d4729f1320bd4abb99a6166f02348fbc2765e9c7e5063c24d5ba0fa9b88e14a6610160ac5fd80031abf52f7d9dd2d050814d203be5dd22704b0c979610790518c95e2ff2e938d60f2dc24b9c007e8b61c150d24305735a5119ca8835540ca7b0b81d072aa9fa87a3e8e86232148c6d351e12874d5297d4aca2b86fc5e1622381c99d8d7c17b3269430f89137356dfa78dc4edf7e95bab567feca12b7c21818bb922dfa954e45e3351908b7267e28a176076666eacdb4111fff32b4b75bc39f8cc8f844175e2601b27840edde16d95c129a577af48bcbc5bbed7aba8813f49375e04329438a494d177d906e88c51fdec94bd1b411c53c75e68eec171f4c5dba501f7adb7e0d111ae7ca3da1309162cf3c8915a80050f889b6522035aa0d1faed71ca81bffad9d6509423520b49e7afc1851e9eba3e981f0c167f9f7465ec56fafe2c25bd985a8e033eaad744527f4598ad10a8362689ba2f385ac4d9e88057f5d3d75bfc69a39ef828415b8f1e835f66365da1d8fa64c9c6df9f5fa5ad8be17578315a4d45b7120d1550c9e62ab3f894e8e50640f07b526467042bf278707bdf39de444ab42934e81eb857edd12fc3b8868c9ef17a807992754909607a55718f9c0fcf793c9de6bb83cf905fa08af2004cdbced7e22f3296b17b397e75871feccea1b87d5095e3171f4b1f44fb568e285449cb3fbacb8b64b1699fd9534a6e9c0820083003c104fdd2c2d0022f49dc05a9a2f7ac447aca443265a9fd3f5559a5334e7d7b5d5eab4c2aaa3de249d57c281f5d9a3a5afaf24946e29571a648851ab34eca33ea4ccf49a790fa2ae7bcf9f88c2e1c674d8a9323541e9e68ed664c8fec4bcc673e85b52ddfb2df1d52eee651098bcab94a20c163a99ec71f67aae087503703f9c81ac857b353945dac4d1fa4f63d9a2450b939df0bfacb130941dad04ad328ebc68dd60afbb6ea85e125b1b761ceb4842891c0bfbf611bd5639709035e7c9565f671c45126cf36f74503d1033373dd70065c3cdc1c542c8d47f750fefa195dd0c471206224787518af8a2bcee6e7015b9b766cae3918627d1b40b040ca33fc360c08b3eb13b630f8d5a21501c1f04f437c43e6de73b7ac6d2044ffb94a2548a447dfe5fa96242bd1860f587b1c6dc02777c1c0885aff8851517fd8add58031c6b118d09f727f1538d7439988fa326854f18ad1b9a0411366c929d00ac22db05230b2d5baf3d7a3887ddad0746a500411889e8269255170915d84c6a8fa60314a038d36bcee7e20bb5e75554943b1b44588143cfeadea2a7dbb775284d28337bf2c0a02c1b2c863f4152eb405d308fe5d6bee1633379ac593bba0696b4e7c2e9a23cdf9caea5a5035d6d0192520ca0be81bcf8e4797b7e68bc78579419010838c79f83160eed060f5c38813c42f03f67df79ed1a3605db3431ddf50149fbbb808bb068f03b6d779f2aba9893d644b5dc1c1fd986a55c7387bdcb779d770742daf41a1083bbce9f93d6f2c13366b6997b16579f995dcdbe0cd7ecc5e90a7696bf1a163d237aafb239d7cab188b554ecd043ef7a3c1644fc5a384a85b620943430f3545b6c383dc35d836bf65b03c5fc1608aea4341be432ae44753c3dbc13c26ff2cfb434332477329fac79c74ccd6ec43f94a74fa76b2ec068728ecf80c2e856aad44397402289cf55fed0108c1bf2d41e574d9e762e2c681a409ffa38edb4bec7d4944463468b7aea323979953419e862791762cd89550b69b437fe55a4496708a56ea8abeb532dea2d7a53ef642f05b1ea2f05c879da9a36c64c100cf1103b3abba288c9cdb5fd30617c24775665980df9212312044f4002ebbb534ecdf2aaffb83ab2d054a2fa378f951702247a9b8e3fb177dcebd0f505a4556efe08e347ca64ac818fc802ffada1a416caf2b5954690a93069c75c67b1831d61589acbba8b61f445a3ac4490b683450508a14f1390ef7512d45d12415df945f54d75790add3e5166776281487447b4f8789e90744009e04e0dd983eb9bac1f32a0dfced3f31401765babe40b54c5fac0489f4167a99534d76034a32946c99f5c00372a5828c790ebe928003e866de2e6d15b2db6873f08d2bb32d2d2e33e88a52c2c8c304d7b2110a4647d711e66202a573242942f4e2eb741f9f5490f8b54c7b7a14a8b0ed57b4e8fa0c65b5f5b3c883df1caae254729cf72d27a0eb865750c8386a5a06569088d2343445b37f112e0b25cb7e0424282faedaa3240c9dca238fab23de9835879ceea31c208e967f4a37f301a78ef5a10ccb86d4288971d2fd39b7a6e5f14139a6817d327871955edc1156501b725ebb42a0f6d5f4644fa77335254c7408dffe03ab0bf7fb4e04518c04b626c112ff0cf0674b52892781ac9d8f827cb912bed12ce0d52c330f60f40cef7ac170b6c4f5a52e447c3a74662b05fc89d8e2f54f7e981d6abe54370ca33352a6118047200966b6864b45ed559e2bd720e9cbfb4a44eedd1e02540ef99d7d495f1ef46e66ef40a3b79c6a2ee433eb3aeb74463ab1c336806a2a2a497ee348417f83bc22d33b22e6b97978daae38d50e8c0fd65a0c35df22a58e60541d2444669f59ae7d9dcd2cb17c5e3bab78d68a8848ac1e462ab02697125687db058f756b64c54e9dee53feb45f8d0d27d09e5cb34de81c83f4b3477dc7bb8d2e05d1fab5ac7e12f917acd7abbc46465742a14c128a89295db240f335b4b5039bd28e9644ecbc58e848c310f84cd51bf98a1945b73e8f2d2b2b78b5e791886aeb968bd238bad4422f87c7e6ccdbf58ccaf715e134325c6791165d56b1b0e1323fa38438a44ed4274c4c783a2f844750ee81e88484e7e590f23c9c43824a390919d765ce929488d10692f1c98d88c88d088967f6c499d2947934f20bdb2316158054213949a07c1f1ff10b698193ec488f1e98312ff00ced33bbaba8c25c01d9574e03d94b4e04fa5b4f9ee67cba7abc0e484acc87422846ec0f2ed25acc0bf6f9157d874ce6e0039758bae889a6b8126eb0222eedffb2ac1c44d6815ce313ffe5c1b7df752927bebf167547e88fbbacc0b1de8dd7a38364e334a88285674f032ac23b9a1e1d518d8a234f50688d97fa4ed31856bcf0834c04f375103a0da95b00e607555a93b43561b4c98988c6adb5bff4ae8bf9e11862ee3705378afd36ae39817ac16e215ffd4d23d4af8f65e053d30abf6db3f55cb36c70364a3b1ee89a679fd9bd83bcaee02b256fcb0296f67f325d1127f104ff1756f45720a8fd3ce982961721503ce8cfa6c3cec9614d5ad1eea3fc0248e3ed972cf46786a2ba31cb12f4cc0c4c03d121b56c4ccd3480628093de0c251903331d7a2a51e4a37eb025a28e75a877aca661604a9ddf4daf63880938d56e40e1718ba216e1e509dd36255fd663081541d303ae64bb8af5b3fa55305fb6f82ae3f282b7b255db14fd4caffb2c6f8a73c368d9d8aa7de6f115abe4a72b4ec0641e0cc91cd22a5fc78556502bf34218e64c3658ca7041a2bebdc3d5de810d10d510f9d5b1214931668b05946098c63e8625b93683dc116a6131f2b8053792b565390db09c7dc4519402ea9ebc139c24205276d547e1982eee1a68a3718e20816c97025046500c2d373f68f2c4e40f8ee73f60cb6e995067f3611f436544e379e766b41ae90b283d1d182c8cb4bcede5d0f3005b6deea57fd58ba99d39e91b58ac83a1385a2877ff4498f1e7a66cd2dc753569c39d4e3adf4832efbb682bb52101a91787a2ad043b8f4d48412fabf75dc01908e892356f059dd887cd0c74d871958d69fb08730afb4f5742ea9eba479bee060a35ec23d1d329fcb50f220eb081d356700a83fd43a4e9a5098ebb7d931b77e27086cb5ef340a4030eacb9ca5b2cc82dc1a8fcf926c76cfe12937f4ee6561381499bf3e8cd9d82635bc80990975c027ab36a4934b76a31ea6f5bcb7c353aaa16f926722bc84508747718fc13be86ac70f08d8978d78d7bb5662e4815768fab6252b58f49333a28491d79caf2ba06c61b0648be6dfa8125cad15312b07cfcdc6119f55e86305024c4f402abf3a43202baef2442c947eda790638f7c384b4283ed70ec2d6849c71a57b9c25918b92d0b48770a3fb0a73fa4e8cc49620452010ca25ff21a29a09af4ad2b9c7cb1f9ccf40f12354d6d448714a304b0c4b09c5e4fe68a06d58d7d393fae229fb3226481922b4da99c065579711a844b948dcb160d002bf46dae8cdfa7d151916077f15878976dee762bc0638593b1dfc5644762e94fbb19a3ea240db4e94683e020b92591e8c3c09fbd9a253ac2823f539d98a342b43f184ea54896093693f326649d6cf48c07242186b16c27037f2df293f10304d0fcd626b726b8014485af3963df57f835ffa67c6e18d32412502cafb9dd55adc1e51807b349cd9bee8506dea226f43aeae6082241283478e47de6711080e640c6f217cf2dc7a1f96c9551c5e36bfd22da871a6d14dd3f0593b0b62ef40507f51b3791b193460eb2bf64bec73074f63c2dc1cbc37addf8394841e423fb8667cf9447b3638d1d41dba9483808249a398ee10c88a9461289c915c2846db8e6210d684ddb50509910a176e8e147687246cdaf0a53182c5a8094533b20a5abf0d5c62034ba84190b15a01f90c463258ee7870882776e3de26394ee7814ef3560668bca09063cfbfb7a2d57dd408a7395f084b0ab744ed4c9158edb31148839895f0a331b67d855c5bfe55ce27067c3fb1b3865764063faa2707b8e59fa5b26647ce9cc7674912218ed076541178e9296b2c5bc56b0e6b8ba41176d13de9b1e7718428e9819dc18a510c89346407615f7e0ec00b6430dda91decea95af344217229c123976b51f018bc5ae032f6f30fe0560167f4df234504880f834b6ec90712363b4496c0024b3ef66787f5883e9e84d89e70ffae75b72fc204dd6426e0d796b91ac5051e6bc6587c96fe88c3e342ab3c5d1ee0620cc3cd403b9eb5ccafe8e6829160565fd8bcc5f38c21a4c9df3bcd5786576332f6dca7250a428f61f7be908c270ed347dcc21f78769535c7acb8698c411eab7325d41046be9e3568601d2fb19ec6a0c4e120c4d4c0681b86e403d0cd8fc125a834c70bbe93b82a111dff334abe3803fe894108d82dfb96dab6992315dfb8327b8255af3c0234e0d0d8b52db062c9e1c588c8aacb4bf6d432bf07abda2f503b0174c82a7761b4c92694b2e52bb072193845a491152eee9ceced0c0970e097bd61e4c6e7a5b3c4dedce93950f5214b97aa471c9fc9c3f0b0f455aa2a35d033aa90503b2921c09358f21821a42edf7ac6909c7403c94a77c0c6d68fc2e168e4301e265ab1efdd1117c92b5f847645d7c5c5307ba178024140273e9a52cdd35cd4618693324775665cea6fc6023401213d484c409a2dc1d30044ccf34e5a98e5feac85c3b3471e33ea4f28830bdb5d358fabaca478d4163b13d40fe8933b8ca62b5cdfe6000e2bcb22d6df44151ad2c78e6523d7a45426cf5df0780649b5f1d669f4a5df830876d604d042b9b1707eb5904ac47045e82cbc27b2a18ed2554461f779df813e5010f616bee010b0db2f31f224ddc8fc0f14c8c0a015f6dd007a11a79be93ea5129da8910ac624451ad5875cf912d41c2ecd5d71e80c4f1f8174c4f01ead1d02e57608eb2eaca40188c6674dde8c189c9b5f649d9a064266acee0de5cc835f6fda7e3f10a733d70060b8f77e049e4e26f42330f5f729334b8cd992ac929d8285ad38ea68612c71794281feed3cdde22bbf79652cabde59601eb02e402ca022ac8f8629f404f0d878f263bbf2d67a0dcb62021757006f951bc01f5428f212c5944944fa6b23d93b7dbed289b4a17a3c084fc99daf3b49ffd0be9d29dd80f3ab4b564527140fc420fe79cf756adb53693946b7889a9e929859929de3aed1ea42d95854907f1a48a45d9cbeab4ad41b877ef03547279c44053909414a4765a882dfc8e5e45b6b8f78e43d1af77e2185a01d5a31e36c6c2589602e04e1530484dc21cbd5c6b71393f32ccd98c1176fe02d2a0cb525e7565a0b213765c87ea41fee01d1d6b5191325604ba513304a87b3beab813a43585ba1bfd3ced67ff1be81c825bee0c85b931c28f4f0f2f3a0b0b595e2c8039e25dd738099d864e44a7a293b10b69adb5d66580700e6827f9104e4319750fd75aeb1f64ae61ab649652c5cf92b71a059abe0c4540a3e69b73cef98e5c9aa23b44bed683132c0fd21e013dcc44946b3878824918db4ba200bb98bead15be2e5cccb7130609c491e1c713724b91d2d1ebd9495d3f703f783f80db4b968f5e772c8e47fe6cef04e5e8bc309b97adbce69b73ce399732a1c9e5fc07092ec7bf73540a894fce6cc242d1a45505b6c37bdacffe4f9e094f280a4e92452a589d2a4cf1d97befadcb35ec230aa6127409d3b4584375c3b76a34ca968a285d381ed0488594a48db37791910d4c7cbfdd0a3dca37e5d3e311a1dbe1eb80809b15949e959298ab28ac63084894984b1297259673ce77fc542e487e45e773ceb9ee2ad73056d76251e8b6628fc4777e80ec9fa51d22ba4632d7709672458de50a1bb7bdf7de3d810593ac326cdd7c7a1d1c20d55f678cce24b1bcfe84c7f3a911b2b0cdc65818cb9e3c25600ab574a581c270cdeb913373c45c3167cc217356b92492d7aef7dd7906e8e5731413a276a105dc538e2b30424e5882d8180b63d95e4e039f90004439496a1e4541f5f7b49fbdda0313c202ad1dd7121f19e2912b4cc82fab4c07427078e4a5e9042b41be08318d7eb2f41dc4be61ca6a0710ff847befcd4bae9d03550d7cdacffe81df6e23e6b27ce9deadc68c4292d4033ba18e6d3bdf5c9b4170b98679fa4410604c29f7b49ffd033ded670fd4818531543aac9c80a4844148969016940b71b81af9f494682cc8883133258b0b45a6313ba11aa92a9efea9563dbef5de7bef2bc35cc3c4a23155710599a63584b670a530c6084f8a182c530060a9b274fa20da3bc773bb6f3917a2bddb7bef0ddc38a8ce2b2182bcdd4ee619f210798a3cc65c0124088daf0d3e3698cb500814141c54ca00d9128188170f506694592186449cd350c1a2f2d05918cb64888db130963d795a6bfd03e61a36fa25b1bfa5d88fe97f4db4df538bb3dbec46815768191f52153488986bb868445aad084aa629be28d110807a0793a07941bc1b1336a6388db0b542074fc21c09614b46fe7828af434fd74de4de7b6fe130d730b168445a7114c224e729093aa8f7de7be73ded67df81661ed55a6b9e30d7f090476479c5d867c98caf953dca47563ffa50fed8180b63d9b507f532d4f0b49ffd7f617b9c73ee33409201037890c9e5b0611caad22f1c4b90d23a75c1de854ffbd97fe769ded37ef6efc339e73d2e245eb113ee1f06c426bb2d2a2aab0fc01051e2a1416908c55893ecb51ba0889a06a74f18ea5ec38e73ce777cebbdf7de7befbdf77188e8ee9d54820943316cd9c1f36ea5bb5122a12b2b9aa7a7a727fd6c8c85b12c50d14e4e25ab925749aca456720b7b1972e4c92a2325a58c0b3f001c1df5a23037dd7fd68397badecf765c1a47526badc72c730ddf743ea214300491543502aee2728985e848085f7049296e88d02eb3302e1bf9274d67f0b8c59023c9d0132c5e92f4a81ffb6715360e312101811c0e483213a5868f1a545c5a42920431953e1b8c670d779e8df34d32bd14c7c65818cb5ae5c8d9014468d74b724276391661ead2f7872646e30249567e600509ea7222c245ca0827d2521207a6a2820a6f8bd8180b63591db9e906640295e6f178ac085dadeefb95a4986bd888b43a9124b9693f7d07e4d4f9e69c735ea42ad7f015963e5144eb7f5415887d94c7efbd37d732d7f0adebd828cdefefab4ce9a5511c54d2fdfb7afd4275d7a9aca319f3ae15cf7edbe8e02a4570b75787c690fcfbcff588c5341364187664b9bdee676092c81c5cbc24a8e5ead37c01b48e9147e0e3328f505fc11979043e76b9b96671d9c3b5993e1d5c42996fc34b8d9e7fd692ac6294c5370b904fd7d4ea900e8f51bcac25d7868d68f118c5c4b5e1aa984922bb267a76e020460793c4f94bd786bbbe755d3cc58395b1dcd4355f0903475c48d0f32c55145cf3d97cbbf1a2ff03c12012127efbe9007a849aefeb084602a6f9acf9dfb6467e8ce02a058848d040d4a14fcf5854bbf4882cc36263fb631ee1b68f16aa3c8f482c2bb82f171f91471e1988693e7b8110a4293c7e49576ae8a52de66ed9ab35eadb9e55d3547d1433373fad021722da74ab3110da54dfb6b70cab5afdada4fb777d6cf6d94ed5dfaa0ccf3a7f2d45d9cf1a164c8c5e99966bd217a06b9bf6c1322dc5e56452929bcd0e458dccb897930d37bfbeca0e98356ef8a804e8e6ef3a3d935c6cc5cd47a72c377dbd5ff8acd140ff2bd3cc145a865f63045e277ee55a4e5f3fa64991be32adbb26cc354d94cb9a41098b33ad6635c5e40852ac0abeabf9f4b344bed906c4cebb662430ca78f5aee7561159e564caa7733a639777dd4d8c0c4d1e255a6e6fb6c189e15d3bb00a95652af441d113a67755b770a460f5aeeb91222f29a61b8d50d5cd8ab106d19b6d1c58beab06285b49272e27ba77ada1b29123da9045a9a8839508a9acc6b4c063a6e3a501fb01c33286e8335bf8f5d0bdd906078953962ec137db6015aae8a8514763885e1d1a24ac10e55efd3a34485c81d1f2a24f037ff911258accab4303b6c2ab7568c0b4bca88c06ecc77bca7a6ec0fc179043ba77f5f0012f97b4846fb60171e4a5819a6894ad416a4a51a7b8178e033570033550558f74be79aa39e71c8cf0ace11e46d97b74ef09bff229b0c3147bbedd50e8cfc8434d1e8aa28fa228aaa54a30ba285adcedd634218a3c1c82583d068d75d6da9cf26d381e97b1f65d746a98c13755756a33a3a8e6726d22509a9b0690737edda4cd4c24e6114d266e41e0d8f3b51ea2a8d6362fae39df4d1c6a94c7aca518f268d41a4af9b8368cc5b7d64d2f10336d935f37f5b48687a206a0a7251e4ed01004e5b49bdc9a8b8f479f8b31c678ef5775bbd5d7fbf1062f57033527d37c4e56caf039194dd3343d4f143d7bcf20c3b9d65599ed35cd89f3d7ef1df7c7af41874980a229e734329c65b698bb7bfa2bb751f1fe736f592cd3d8d6f6b6d6362593a15926eb4a58f3b39a53178ec79b6be3e2a3d4968ba2e8dea7aceb8ca25c86660950f33c914260a54a3a7e26c26471c14d8bd3e2058c1825724adcb43738376ddbde80621881e2d530eba9684c2ab7baa63432558943a6a64ec408e862513fe9a29ed2c51e74cb80f802468c3e6b5f9da5e19430018bc15ebd78c3af793312e26ebae090a989e7338d3266e87c98ddda57676938254cc062371cecd56f37deed85040edbd301f4c5491d1d8c1719dc7c40a318322e10b6a8c005b23d6b4955bf88819343ad15cf72d6b455d755cd2c5ae09448d39ade332ebac8236c3d031339285ec898a1c3a2457fb13091d33bac9b406122a72fc9a66f298fb0993722d0241a169fa64ca360aaeaf2089b6985337938d36828eedfde9478b1234ed377ee2419f4ff51cc9e96d3a2021722da58160b03d17b26b28f62f6b49c1615b810d1b230117bf6ec06c1ce68b3fc678a3818828cb1d28693eb6a12cd90c1417e93cc251d93f802460c8c83953414f76f6f4abc5819335e1bc53274b0d22c7ec1ed9abebaae6a5e53959645f44c5ba1fefaaa4c8015bb3028f67b64dc6d6b5d5bbfe0a6c569f102468c1239aa7e95e0379d77f506e7c6c6b6ed2d3d120267adaea9baae2a1e13340ad7548dda72d757f57f08e1a2c35e13ebaf81faebdbd47f695507b57e1a8a3b98efcdd6f068c47253550775528e22a8246dddf7cccd80008a069318000005000c8371200bb32c09553d1400072b8688bc8470a030168843c1b0400c0604836130280c00026050208a83180aa439ae92bd6ea36dfea7ca910ad69099128a9eea697280d3369cc7d579791f71ef55d0c6b560827f971dd2462687598d7298dea9323796e3e83c9814d46edc8ec3e4929af372adf00565c893047b1cc78596fea4576a8352b68c51da07d49b481bc0a7a78b99be6f301cd1c4f6d05bfb62718ac0fd4d9666091befe28d53e58708686b5c710508fcafb438b3dd3eb3da8646a8a4b0cbb14e2167680eaa6b92c6ee086c210ba929fc25b238a188b74aa3cd7d19353b09be55bd2146fa56a7e07e4de14ae54522265e6d27bbc1f28916c5ff4e2d911dabdea70697b8b14b75a9be4b465a37ce12532d0d64362d408338227cff42a274c17d24e205827426c2e94eb5319d411cd3c96db116ea00cbaafc56cddee200404266af65472fedecb59dbd981dbd0c3b7a69a7d776d8def494b0777ffe17806c62448f9f02e7289daab67c9273c4a19c8187600ea86cf0b53633a08506d30df202739bf169c70e4b23995116517cbf6b8933e1b2b4f3981bfd8be85ecd6961e7fbfdcd6cfc4db4a59e290d3b1c6e9ccecb684b572de819f8bef3bc6177b423d15d1c4fe8f07dc803d111419af74d097d2d39e82dbf2a6f5880fabd490844e38b53e89e6a0bdb0fbda8dfa315410012441ebbd25bd4c4d1c0541318382ac92c0f9f84ce51afe6bb69d556b619499acd6fe0d17a963ba4b79a616826caf4a2a1a8b64a1d505decfe8bcc0e44cd44e160f9021529161835052c84a5f4c50d750ae49ffd3dadd073284c1ce93816c968a22896b6a35dbc97d5886027f094d60563eb3633461fb2bdd44ffbd2b0ccf0f411848342de1675c124996ae65be0fe0e6d0a94ae10a02469b847de205f5c18a9cc665ea142e8cc01e78430241778d75faec6b8d2941655729e604a8169515c8855dac1c4f5e5832d0c3b1ad886ab3627429dffeaf5b2b2e36ffcfeaa955daa0b74357db697328e3950bbb577d986d693984f71d684d7a817af0968bf59ebc963dad1d2158460a0736798caad6c96fc93a75cf1b3b7527f3c4f8bb9ca02298ead92f848c350ffdc0344a30795d87dad3bd37aa52808c33fe24a391f73df55278e13c43f87d31054e8e69b705f62a57d488009e649a85f1f08e5034c1909fbcac256049d1000e9ecf7e31250e5bfa38eb4fe6f4e2cd204a1598736d2e2c084d4bd30a9538710ffb0f4a738e8fc687c63a144572803ead2b35725a1706112fc63287900199431f616941579dc3cbfc8cd8d9bce4522c4e741654cd11d89756ecee993009e840746541d2850c95eff7e3f417a3ea90ae79ad4eece06240256d40a108d3039fae799f06d9ce2d1852dc00f5312124e92b88b70ac125813e3e5b237c2d7880ae63431ad99338cdb3de9ce2a90fd02847238cb20d1d489a5b7a73f87425649b89b9e62cdea00fa47c4fe6fb1409b166a676b63926e8933959fde0f50f34b96ea0166dcab78faac84431960a24840a53e0f34a4e0e81534678924ae950fa8b0899eaea000b74364c79e95adc32a94da6c00206a663759f718189939bf2e15697ce4170783c03b58c413e7406f1d807d07dd3ddd2d0f8e027d0f01368d64f78122c9436ff86603bb0fb4a396d3f581dab3ef93883005e6e1ffc422049bad0da16e043a9ac6aad6d717afe8e991d05b0d21daeeca8f844b620747399d53127b1729a47ce41b7eb782755b15ab1b8032da95656093e11a0830dd3109e91b04f856b52f7b1e17d1bc4c67da521c3ad3f123c043a453e068d2e606273252a0cbd024074823e03e6bbab4ab43e343074061121c0e2cba1202b4e668b02d7fe4ff37335ed10592f9ae63e2ff51656a8f83a8350644ef8d19f1e465f05fd8ba6ff7fd4b2adf512f8d26af41034873bc4d91e2bfb17f29409508ef2f30b94268b67f87056bd54311f62286bd1695f12582e27ebf3dd7a7e40af98e830b0bb3f8febf834c6f4c84371f9b9cf323cb116ea539b96f34f6433ab8c3c18615eb303548cc521683f3df056c5a43ad2ce89e0caedd2c751193937ec4fb7501d3d7aa0b24da444dd76002240eb309d9166c36169fa042ab484b0810d88a283ac198838136eecf79e7ea0818568681d84925387f90cc25a0f3393aeddba0fa7909ed160fbd551f1fdbbd1dd2ab6cd7a8c0858c3747d171e812f770288d3e88b9a6da15eb0eb5cf73e702b4c923c885b8a5359d2ee0b256fc40d32e62ba02274262d88d0dd152269ab6fada873155aaade5e68b401cb8ad7284e7976bcaf2ad49d7f458847db902048b7cc20ef64c24b3d23cf829238685422231213752d263865aaea29c6359e219c11ef58bf34fefde3ee0c69ef6e6fd7bcc800bfba5d85ee3b4d5cafb8db7cb61aa7c3cc89e54db1b15df298e671c0b2289b3ecb0dfc8820c2936385247bfc543321f5ad5f588e1335fa9dafac46e618ee905108440ef77a344cd0518d991428761e8047a0174054b190a14595dbc660137a7eee5c20c786b6b8be85796f6e700020f5470641a6e9eb1040557a38d41053449696911e23c7a1bd5f6c689358f9141a5af0dd5879c0198950a8a7e8cf22c516e40a17ea6775966643d36558a6dd9b7d15b52195e09bd8a2e6db57c39dc01ed84963d478bf30a430132cb734fc617df2ff3c91f2edb705f124ce80beae5872043c2f112223cbc0b1fc82159fecfe51c1d09254e8bf6973d814e3ad05f00e274267c9d6cf404c46ca534c090c2a07720249523523beae83dd4f2e0a37e380d1a54fd37591a0f76267e638e9de983781b7e5a9994fe316f48cdd2d4170e053fa14b5642b2422ee997f1061b87028abaef048465a9b33dee6ce35e2af4495e568dbd72f253b6b9959983a7b0dfe3759f0a3fb8617e679ce3b2fefdebb901e7f8f3835182e8928245a83a5be341c4985c1ff21546b5315752fc036dcf2871cae8b483aa0efb1927796e3ac3a9fb95e5165401a554231db328bddc73c7a29e3ebac09e75e33e55bd1184a45f304ec842cf26c8855432656976e945f29fe52d7ea20638305be88ac2fb8a2d87e06e6cca74c32eeac50382a63ed5e844bb0cb08f6b15a76703abb0c6e3a06eaab7641596f0ce291e68069f01ef1cec81a19243c61f338c6a6e59759594464adfbdba88a27477a7f6c38b92c2c5fa149602280be7ce294aee9f95869af4b80ea09bedd5a51b5628dc9da948cdc56be659ada762513d5c6c5d1bcf67cd60ca9ea045a05f35a51cf1da3068e9612e40b7e6d1e9363b3a636bef7fea125caf4117705594a31321fc0f34df1732520028c3f6818abfc1633b37e35066d857dc1c526d5e98def9ed33422dda035f41d860882fd607c90985897f5013118ce6f3c491e127c195b7911aff3a35c78add701e3a809172939e15a544c35bdb51def1b1b0c6955a85e562560812193f015085607088145614a74a60e062613223c503e6cd70319e03578ca5cfc7eab195373441302f5462846b31d85049d7195c0119d07b165e7cf027df9c04a8fec6c656272d224620c539fd9b77152390ef09e3fd4b6bef748abe67583de884d5d54127ec0406a5b4b49caf4284b27eb721e62abf3e90898bf832e72142cd42cd0c97ca221f25e4a8278d48e00c3658d209954095f4ade710aac907ccf3d6490c7a58ab5a1a1554db744f6e245ff69e15a5c5a35982b69581cad2bbc59207e963d293ee52fe72792509780052f21724b717d51fda47bfb998ef5e87d60c9dd9a74a7b14388d6a0389f8570a017afb0e0e6866147077ebdbad190b52808ecbf1dd6830eaeabce7cdeaf8b66de7f403846f288f10e0dbb29d10cdf07941274e796c6ea6b4cd82d8e5f10b2b0cc084b79ad8c549ee5b8b1f2150784244d1b421f0d185295997ca64abd0c56eb092e6c14f446618e43a1cce6dcc02172dc34747a9a80602d1606b9e79594be42c19c82315673232467fa971093c27bbe8a8995e64efbb177b028c216daccdb5c6cad7bf77ce24c561bf5aaa92a1fb47fdce82a6d03940cbfab6b3739f6ae19542b0ce43622cc94e0da1c885fb9359d02686fbe4ff3f1a74c1472506855efbb623a854a9d23d255a634c381137cb37d2a454f3a07d490b20f0abfdb7437c0eb5a919c839c02330a099fe87d8e4b490ef595976a50ff7c4392e199804558393b401adfcbfa95c60198e43d45583de4624c5afb208d9dc4caa993644057a582faca0810228f5699d0817e0ed035212ae30e56df041c7501a939ba9e94ba9719886e4c53c910a4e31eeb0504495f304001e9c3130a280a824998545bd67dadf41d189a70bfbc92934c04a362892769374b2ec06cdb56808cc88bc8507efa1a5d80573052ee230ae820f67e2ed66884f0dda1fa52954439aab931d47aab90cc423090c2d0cd531ee105881a20fa5007e33490d31d70a0e41c78a70d810876682265602c98ac7fe45ca2c04376e455bca3a44f1b1530cea4d700987153a75a8ac183adebe9b04e3a645f7c684aedd2a8690f708efac70ac00ed16e70a2a2ee4d9397e5971aaff3c91a0f9d7cb8a2a5fb60a901b652b64e98d5351f75f58416d3b60b942650333ba221625de755064eea799772e0346e26c2612c66eecabd5100da898327305aee267e4099165d8fccd4d882967be7f3a2f44450b4a3f9af1a18c1651bb10d44ed7d3097b3fb47e8a0efadf4dc3bd97963a8c2e939259800220a08adeda6a0d2f59d474f6705c4461badebbace76bbbbb00669f136f42d3acfac5264251759381f4cfe1fc70373cc3396eefe11b596cb6874c4d5938d032425546c0226958dc299f978b5146e633da1f73456cb62ac3eb1c2eb95bbeaec5c898d072680851711896d1190496e3fcef6015902672ec665d10703df6cb190c62affc8d01be4893200cb52174a5372bd428102119dba72bf454b1282b6b5261daadaaaed2bbe902a003c537d87779ed255ac50019f78c3a708815ad2a22e522a57e00bd53d342ba7c1f8db7a091ae2962bc3e12027725dc64af9e0306c2809078f6c961c6b13e0022125c9f0edb1842ec020da06592ec229641b925cf3e183282f2a583e6466ead46047480d3121b4e556e907235ba0c08c14b32c87acb81f601de6b17ffd223c2e2167e0a8f5970443d4e63e384f9f919c2ed2d6ba640de1e11b6f8b1d7548ada3a541e87211197c2a4913665903c10fc191669341a3c74bb57cb74f6223828912b52c439ce175432170e1f25e5ad4c02fc27eb06b5f8860422d942f8927e70f348601d7b6565264cf2453d992c00911c10910f8ea424cf6025ec0ddfc6dd6129f7cccf667f625d7b103574742df314de24bbd176d88ab25097d6112ff2893123726b4f7d3705672f965335dc0a998d6de4687a4d2f9a15fab9b1152a238b15e8f7c91cb2b0a653cc503ac778394a461933efb3019413416b3ad57bf601872b9ea80a75b3b9e7d6cb683fea97c481b0a5c6856cf5d651296b1117e30a5b81f73804febf3ba50f147419f01493bc66745e1b0c13e0b933d2607c6fdf80cf546028144dc07bd8dffd495972b0d076d800116c5344cdf920033430785d038b1a936abee2c681635c7f1a3e8463beca640a60a4f57f96a4852fa2ad1e4df432598627674724fb36ebbbf6d798c3eeca8f711ba4fffd779ab5adb5b4c59e7a3663a67ffe4142c3faa071a6d161e20282203b7d9a285f70928b36df9e8f054a97e84ef0779431af3f2342adade2f59f78aa5ee4967515d96e7d1a8d54aadb6958b47f3bb7aeba2418f06541652ea3eeea60bf58cbba46197aa75b77599a3760104e5cf2d34703fe2b1e2b146622b1f3ed6f98e6416c9a16de0bf47f31c58fa9898393b69753bf2f33dcfdd52c2c0ae0f33562f6f8f4292b2f399248201fb9926dc4e25773a12d89e3157d3319efd10d2267cfc344f6bf2ca0b50c8b18939fb33ede20528245fd18bf9c0265d1d153e471e926355bfe4cc8e23c8f070d0e5da37800f71bb1ddbbe842f6816814c72968a07cf3eec1d037284bd766b94b7f85362c8b66004e5463253616526a8e24b5295218818a409c51d0b16c8436c3eb0a26201829ee3f82769088ecd249910220230d6e60042a81d18a7c4a361041d02badccf7c833868205e2d9ee7daaa00d33b3c66d10ee16370f83f946c60bc25ed75963e512670722843bdb3d82004c92823612e533fc882f015842ba8cdb0cb07fdbd1980642fff1a3dd19419fb0206d1ee3f3bf47bcb22706388499e00a17945cc6e4271615a61d5e130fcb446dcace0bc752c9ea64f28bded7f91e2729f9d0f89aba1c5371d5c0261f3ec6c62fde30c2ba20f27d68d4879337fa5cefdca18a0685e228a55e07ad153f82ca7e92b1a344df1c659951bcb88ae909cfa5e21ecf8e7980476ead867ee60b0547d45569bb68f2088c3adaeb66b44dbca690e90194c6cc988d01ad172f2188e753b40025677a504f0c83ad83010e4bc18c4cc15086f66bcb37d7eb96abe5e9fb785e8c855e41d1c2ec0874129efee5d65f475c63a75915287682ee3c9d5c11164f4a261f7a6b96218e3524fbf8c1aec9b88a71fc0a248f58986a7b9dcf7d563bb1dc1481e26a0bd4521e8005f7ed60a89b1ccc7e7381b87d6c761a09edbaa83bef7fba5765428cdd57ee67a4b0016f71eb9c063a905350b615fdbe913234142c72bbbd2e6eca704d9e88b7a49963d8deec571e2697a888d48b0e34740b7b7bc139dec2828f201952497cea9ec2944a9861ab523e9019cd0dd10c7f8d22142c9a6937a2e660327cccd627bbb7f5b0051f6877ab24e6d0f1f8e34814b2c2ee70b1c86865f6010da6f2331d72b2cda7ba515c5f67be1f01cb26da7ab21dbc10607571271df67f4be584e1629e13dfc1f790dc4414e06830f272314ffdb5f02f58b510702945ea858faeed48b60f547c2074a001be5864adee422bbb17a9c6c12beb80191e1f77b4cb002cbf3f81598b7eac088917e9e6fc74b9a019b35db889831e6d8bd08c5320626ee97d31b4be69a3b5d6bbed6582b0cca977b565408d02ac90322979e4904d8654924aeb91e3e56ab2e8e454eb77d26ec31479d78d5f906f415eae7278dd8acb5fd42c9aa8138ecf01ed168ab33c0e2abaeb37ae10b6226a2ed78c8ac0a6a85eaeccc0eb26e8c4212afcf5698a13798745f6b63f4ca3f015287fe88f813032244205582f327b86eea2def4fdca92d5a4b9893736413b4a82b72e61434eb8013c5af8098a66ab01fad2f5e285929c889d9480d9fff47aade0e8ff891e41480c08db4a95d0eb8a98a569582182009cd4415cd39bcbff694c71e990866142a655a443804a78f2bb66188ccc5f7ac6f397b5be82b125c0aa0a3fccb278bfb8619cf8067f5e1c8f72552f09026a3a0a995ba7b619afe16694479f2357ad5a3ee0f455b74be85244999b39ab9c89bbbc0f10af3b6b971d0bd29b053a5b413ca3a5d6408c39c628eafecb5068e71dc4bd7760157673797d467b00160714fe14dc98193affef14efef4391dfd256b9d608ccb2e7e5528746aaf9effdc9a0ff1f3fbc777ec313afdcfae993d7203cceb1f218479364b3ef76599c78bfbae2e0f4e9639988b11f09e44a9a90023848473015eda18f8b8bb3daee50bcce9f85042c4286dddc57450ede9a7d47045735f20ed5221e4e5b81ec10317e1133b91c6ce8c504a06e3eae32906021c4ed4372632c2382211c89cd1d2f8cf611e2ddb823d3be645a76ee0a26c3b3d9203572bf18fb14f8fbee520fae971a96fec166944e0625135e6ffb2841dc981258df4e1281f317e1342193505f4cb5239331841ddf3109920094197047602004abc9c68a3316893b0208be9cb99735761c3077aec546e54f1aa9702aa2bec89bdabad420472cae32e42210c2f63ee28089e6298b07414cda6cdb0c9d3f95349b7392cf41dc1574427b17efa6658f65f1a9a0d8a5dabae09a70c0f6cc174487c08262600b4935cebe5c38b8baf61453a6a0cdd4ae758bc7bb7b0d0aaff77cc7a297b7d11d2afa99dc8c3ee894e8ebe3b75d6579562668a4814a0a78e5c500137e2f6f2a9022db569d5afb669d5e5a6fb9916bf5a05322f74242e599e9b762bed43ca4eba8caf4b167614fb80553cc4dc4552a1c15c1e77daecff408bda84134f81b0282b83a422e7b3722c94d59f2a49fc81c1ce82f78ade449a06c81621d38c011fd6dcabe15b84bc997ad83807254c3b842b5ef23506c19c4170a3e0d271cd67ec76f661152e33c7544d22d09260c77f1d2e47804129bc7e81a93a0580370dbd86ae4a3ab1a11ee888b0075981185eeff98e9c109d563ec86ecb23400841b1b882a6241401e6bc57d205c296f76555311170da33d320f6108059174820b5cd40dfd7c0dcd9fbea16a1fe18f5e1a709af34bdba5153149351c0c4bc290f322ca868b053a71ce5824cb3587879db50adcf4bc3d54468f938a986fbebe598d7a286d989bac9eac87768f508888ab9a76a49fd0f742ac9f54b7502a058a436f68d6d31c7fac65b49b8b16c5680990e0698647bb3d3ef363de5a84a212af04b05d8a639d196a033ec2e3353e0c965bf3dd93254c78f8488138777cf337603ef60dd8acd5884f87426bbfa765fd7365bbc48f49014d073bae018722352545f561ad1b12b6b96c66aea0e13cdd5fa5f5ac0d47b10e52fb63e85f4788250d2c6a2215c8cf59f29d5391072b1dee6585c65e5bdcaa5aa86cc1202bc0b7d8f02ba7f505aed67c77cd079a7ede91ea5e5aeb45a69f6111285c3d8886e238ba03b509686ec44c71d2c035a3da9999acc6a34f8cac2cdea9a72e22ce0d5329bd2c545a1a52821730bfea50bd060d3ce5dcf607f9f222778b8600dd900af418678feb2c9d123b68ca810154c68ad56960c4ad44c4eea430eada43361952c82f1f399206a4754e512383190abc704f2955e19baab069ddc18859d73955014dd4fcb4bdbca35c04dea7a7baf092cc8ff9035db4bce7e40c1b05d336e319e661e95ef9a8d10ff408692d31a6bff0e22a14858f8d68fb13006028abc6e3e8cf4f7521d1a05546ca480347453c5115e121cf4649507bac901ea237ae8952c1469e5dd91a37f560f382e2f365fb3c8d8b886df7a13db768deec34938196da69bfc4c52c493478236766352b7af3677006a70e465d902139a42d23155c664de962c13a17bc3ed4195b60517a0c71763ac9750b386d71a8229f6e3b40f6831087db5605d78f2f829dfe3a140afbb5067bc035303a9f5e4ef139f36f097034e9895d1d964277070950533ddc265d1ac0fabafb6aa9542eee8602b9034e09f013343d0445786a146fe073be3f710de9ce819fd4a0c8b48d1fa147753b833cae663106563424bab2afe4d4260a689a059de14b3b39940a12868c136e1c1247c15b78700ac1159506932d4049d103dd1d7a9e832a61fca8f98bfc28b9a9471c85e6bb16f2dbb3ae01287e7be53146eb4854662a0e1a181259666318692353c212ae73c86168e05b81bc56ed86859c4f39b0fee0272dc9f5aa895250fd6c12bfe6244f57c33379e8868274a1d767d64ad456fcd5a1d860b637c6c5ecb484a596509ae8683740f45a9d630da16602d40e270dd5f09527b355c7659b10a11c0cb4130a17ebba69e1a9e561fb44a1348c559449e00bf9247e994a671dad93b4a4d4fb49f8af22ed463e34676796fae72827bfa2925f8bcbd855d0c35e17f1ceaa6ee693adabe523147c9785d7b3fd09b4015c7fdd4dff6f268e05226f4924c8205d69a4cae112c20253048cb34b04fccdbdd2a074a7542c304505b4e5937be5d596e595d81638aeda4ddc55af7be50184c6a473f49379581959e2ab16864bb75d7d3db01f7139293b9ce9053118b9b76355e4322358e10fcaf68617b025f949b587f656a314b05d19161b27ba7d59bea4cd9061eb796035e9f6e68dd5fe8d1a56d541f19ec6dabf015622202867eb10245eefe4d4dfbafad193566cff07dca12aaafec0531642904bcc03e5e0046bdf293b9db6132735f02ac515161c212790a9994dfba5669e4525859de2243486480915cf42cd353150274a4e5434f6a3840ecd1e53aa0dd7e949623d44ef49264b91ab510221c9f2f5733593944406e28c2bf322595e39af025454ba417291bd53d557750be8db5fc1559cc8b4a9524204b185707f0ef93013efa38d3c6e0a78a95c03838e4e93696ec2adc56eb97f1014470448d81a7f1a5c4b0d8b81e35f406290a8b59fc3ed093990dbad45ae638781d110ebf47b23de83a026bf4303c8283ee75b88478b66ee04a44cbde13cf06089cdc6983a7c1bf8b7922846cb03f03c94bcbaa427275b4097279abbc5434a96b2e24f0913964e95e323db6b872f471b736331f6e0e6d72502a8ffdbd45df689c8a4ff9e1ab47ce8a01115730a06a8296f7abe38eb34ed2e0282d9896a5d97098dffd7c8f6dccbfa4181ceb482a62252fd40c8e65155943d63d6a10701ee98a54dd76b076a5208b5c7f777078771b2d53cfa97da15c376146ea4d01c8f637b091e2f5c938188328a8ab7a0316a6a20318f292fe0f9c16d0f1ccf29f4d1e4acd23b2962ecc6f2cbf77264a356b64a13fd4945e58dc6d3662f628d5ce6aaa3846c7eb52c331b581e5e9736bb8b264471e7640839f95b411a772a3df226e4f6b4ff4a90f7d018df138b961305491574ae1ccdbd830bdce15d73366251e9d7aa3e1f49358adec034c16cee19e4ebad5716c234c233f5be7d6142ab2f87f74cf4e268299cba865dc60b682b2d51acaac2564e8c5701430b0d9966326ae61bb282829d9c25bf7332b0d432855b9abb9d65810c6f715932ebc91aeb51f76a540f6ef6891244a3fae460b7af2856d3cd832e6009a8738522782580df41acec7703aaacc28b5b20103a9b3689b8cf54ebc9c335d70e2eaa0d5511c8f7b67830ce31d5f6535f0997ab2e17f451cc542a63bd0e09fedc9480066a0184458517e2044214133295e56be268b5c4c0988f15572acfd743f118b1a559420811a195dddd3bd508aa088c08353d39ac3db24836a956ddfce788a28fb546f54bada2e27725f6f2f1d9416b54b5cd1dfab34645faddae5fa2eacc1a511c3a75d42dc65a55a72c942a55bba83697d6b1635a5bab55b5aa56d5aa5a05e61d34b5d6c4ac128bd6c8c1c517d360189824d8da0be38ae1c11503c3ff310d7f8580011160586cb91b9be11ac1da6b6d0f18c32c5672c1b4a6c130f662181fb0d2b518b09fbdf8fb5e7e5c62c182055f0b232187857980d1e4e8ac2e161a6c2fc68205df991a3b609ea9363c1bdd8d5cc2b4365b0d0e13f0c55213334176310f316c2f86e10f0617db8bf1adc262a3c9e1336f369bcd66b3f6add454d681592fa671b9762771661470758757a833d9f5305ec2d8e28e852c509a8b7fc5a1af71d0301afe0a21849b18b1d78d0830441061860cc7e119ae114600470881ee8b3136cf226a42a0aa038ec1603cb8605a733fdeb85eba5e2e3e6dd3b6642be2366d3c80532bd1d0336956995534a8bbcc28453b3d5b3e2573a787480c81562f7e34c01c0db2fe87d48179981903d3fa3e50cf2ae05759ab10fc88f07891122f3d445b2e8c841c16d6008c264767759d68727272747678ee4c4a4a8a2a9a3bf499f4f4d86c5051b6dbb95c3475cc2172129744848ceaf8b431e9993ae6107c9c5bb4c82a227a666aec80333b3c1f8f0dcf4677e3462ea1041cdccd66b3d5e030015fa79a9809b28b351083c966e0cde50405c2ae14418fa8f67b6a0e1f7a137331993bf42bc771401cb77bdea8584452a5730822a40e7d5965bb34397ce6cd66b3d96c361a1a188d140cb5b144ad25d1cae1abc116d3e808e90929e6e4d2c56a52093127386738e7d49cab3959734281c39c7a4e2f36a0eaad6fbd0e815afa592190163bebd659bf0a81587db50a9f6c40430fc359eb20f8bdfdb83cf52f8bfe7d9ee7754f4f40e95d9773e69e6ae043e7388cf185a2df6bad7d72a2865affa1ad09f290a542c0c560616247a7ef7542f45a3f8740f593e2062eea29ca08c768d9c10d9568220538c22668607b6a82034f33c0f244031078c104155239ca1502d1a456cc0e1a86042c9b5b29b06145a5b17420fc614512ab06482055d26a07337c532195e7043ec9808bee33ca3f7840e5692087199476b0c08f2e4e3e5069c28054d20ab03c6d898a22df2e9fa2a8d1f040bf643ca1b3cd264a77b71a1664b141bbba7c72c114312af44c974f2e00420e3ac6066c88d02b1f42a0c12a4fb71a0ce8b0cba7db125f831b979a1f6e56d4c840eb2e9f6e5f625b7486124546169ae583cd0cba25448d14dac2a046078d9190a185eea03e043a3bf14a682b45cd94b6389841735d3e59d1410d34d8e59315aa1568dce593952f41505441f1c31314542e8c2e9fae0cd16d974f504ce9a3ee5d3e79d3a42d93b6aba5c961f667983426d87d22fc34fa3f4e2e67613e7e22274c9613c9e7dec97cfc4e982a27e2b813cc67b6e89747c50b07424495e0881c9880026abe94ff16f8bebee7795ef5ba9f735a99f34b2ba50e19c057f2f6a70ecb83cc9dfbf7b9f4995c48eadce7bae851420529aab327b4fb46ea1124499430f92165998af7f30e2af3fefe4f0a3e3e29de5f6d49052d7c1bbe94f25e1624cefd7943a0908641a4ce7d2d033d2565a5b5d3aec6279d4b9f1f6edb4149a3610f195d2a39926423498011f9a55f0848f1fe85622275605398f360d672133fe247fc02fc02db164acd299e23b01ce831b4d1a6d08602cf91ac5f36e91376fbd35667e0f728292599ca72ca72504a3c073a1c42cec2a48a63933ef35f4ce82ca1956ec3a43acb394c6aeaf673d6faf57a9183ca7516660a6e1c74e641ead81fbda93477ec7f53c70372cf3f66e9935f7e0d79e818e8517291d18b2a2d730f0b5d9f86b63a53e2a0ea6cda2a8e7daa1e0e4aec799e97a839a84af3f884e1e0453167ad5fafffccc73fa2f52b64496184952b2fa815c78e7a3956d65e2b0c7fec1efcef0bbf0fffeb42a030fc3cafc525f56ea8387958ef25f73cafeb3afd79acd5117a3e058ba0f983fdf80e015a3e7961d4e5d3172f9d842e9fbe38f50c46f5eff2c90b5bff2c58c37be9e6de0e40f6fb7700b2d38e9b4070fa9db0cc9fd24eff010a189ff4fe43432058863fe8c380390cff7d8ec3300f12fb7db15f2eb57c4f5e50e9ad2e9fbc88d2394edad925cce20db32f1f6639fb536bc8837efeb9474b7fc23891c3b8173b7ecb25d705891d7379f94fb93ba0619822efbd38731c4841282a93faf2969dd2db95e9b4c5d55da1d27e1de7cdd5c779db50b4c551673bb4c82b9f162e776e815222a1860aaa366fd672f846ecf7ef9813eb1c06383e9da613152f9f372ac632b0c3d54110ef980e821bc6e10de3780c3508b5cee1ad3b18a3831edeaf0e62fc20f6fec378831dc41ba683e0867ddfe30dfbded53bbcbd0e82fb7bbcb9dd43c53b4349cb60b1549d4d259c45ca6613549dada6fabd38ea366d544481944d22a478ff2add6c94864904fe8bef8ae39ca8e7eee857deea8a86abfce0b797c2f7bacdf10efb0c816666a0650f7ff51fc428fcd5cfda9c809c4d2366ed8b18913ed3ca01e46c1a91e74d2a06fd4e24c279933af78ba8787f853fc4f876ff525d8c1f6f222ade79a3b22b542643f585ce6248538b54bf2fb95c2f50fd7ee6f0955ca6d18432a3e03d9fa8a8c8df8832597cf425156fa48ea414a094524a29a5391407dd712371e453ca5fc6133a6bd24355d500436a5a18564e289afed437d0f56dcd566bebc33096562606aaee000ad2f78a035f8b7474a1dc2857875ef7fc7b74a150d95402824e4aef124d1cb64452a577822e2efa28b7d468d0b3368f6ceca8eb96e9de5befadf72af5fbf48554bff5de4b61f4de692475eccf5bc3e06b869296c1f6f165a4e2d0206549f01146b2b60152b43f6228dd5a23964903a42c02566ad69844c0dadbc36a42eae1ac51d1ce4e7bfe0974399b14983744718089898159db316bb3866bb7a967d6a691cc2c82ce682694a6dbd4612a51a932917a76a4a0e5418d9b5823b8457752259cf64d89a65a2f9dafd84b2dda4e396728a262ec02393729d8409762673169235b747edb237d5ac8216704e801d40eb2482b4546681cddbe4815e75e1915758f7864545111996e7f1ebd9a12a083a31e2f55b386ef8863c7fcd8957a4a626acb402501057503277c4d0b426ef8929c7be4d4c484d8c47ee4f0255a88007ad8f0255ce7a3cdc7843c6b2fe1fa920b3543b811d3e863a4f133f8929c33a326068b9543838f393f832fd14368c4f80883fbf13184dcbc00f8922c24068c8f0989c5584262ac3762fac1c725213f63c88d908f3164c6cd1b31c1e063ec61b03ec6d7f0255c5f6a41c647ee4b4372f2901eae971962c34799afe14bc0ce2d60c46463c4e4838f4b43be470b397c5c3a7ada90a7c197e86e0123a61c1a6fc4e4e2a3cdbb64f858f332ff537900a510005aff01000a27ecd0e1b345f81c3b84efd933d93661d208f038b6085fc20ee16f6c1b3c7b87d208f035b608afb343f89c4d028d8dd3a4d208c08d98449ea5a85847e0990a2ad69fb145e04b2dfccd0ea1059ef986fdde34ff7be665ecd7db6cd8d76c9a8fed19d8879be6c13df3df7ebdb7613cdf280dcf57f60ccf4e3c6ba162fda23df444b6d00fd9432f640b3dd11ee239680bf11c5571eac76cafc7d8d8890920e1a5c3d8dcd3510d4ed06576bd61024e5774d7c619b04285831e53b987d93c9b8be17bef49ef6cd783f6cfbbb65bcbf52bbbc73d8981ebdc8e617e612d1734b9cf362acb372abb929db2960c4565e0d7cf5b721495adbe7ee64265acaf9fa5ea54af5febb20f21c5faddc62c90d88a14eb7f1b1f21c5fae0c659a4583fdc380929d65f6dcc8414ebb3369e8114ab4d7e5b766fcbde6d190a702190fd56d8f85671ea0f6063201b43559cfa413696aa38f559d8780a5751b1b2b06dad460bb22512b728906a34209bd206b0256d858d6b50a2c9102808b6b9aaf44b28157e68d096341710a248e9564603fa2d1fb153afd82613e3890f0ac4824fdeec265ddf042e9ffc4991cc2927940a37a9a507879883a3cf8f5171f2a07ba48f91bc6ea712c9ebb1a43ce876798fc8c11756875930326858f7abf77e76dfb765cf5bda9d73bea25bdfa3c5e3be12da5b998ff96908b47a4bc51687e54c45bd67ff747e90ce2c2df3f1fbf6fcd6962f31c65540e9f24b974f5448f59c41993f24afdec22150ce79cbcf2fe77361506ebdfca1dfb7f60d81705f7cf185173dff6d75cf91bc7aebbf1fb8e75f81fbe28b2fbe74c997623e871278d198bcef421edeb75e863f70f782bc6ffd0314f0a473610f98f7f92597ff75606bcbd71be67dc7a3c561dedf9e5f7adfc1bcd54f6e61b611cccff03418ce949fa6f52c1665b558ac47f2fa02a9cba72024f174f9e4c55467cd607e3cf282b57e056bf1a04bc3eff3666d58fed9f3c36c58e63f54c6082d66fd7118cb52b183b15adfb71b307bc8c5a8a48ff991805dff97392c3feb3587651e747bfecc412acadf30a8e82f833a7bf931c6e30b747d8f3f4665acff3e29d67f0f6aadbf87ec9ac35a0f83fefc3a7fabdb23d85b7a8fb977e1cb70f54359fb495fed0f8ab9f37df7537f28b8f1e7ed1955baee6dee32e7304857469440a4e3057586f4b9f3866ea70df6a907f4087e379dadb5a07d2b85764207f1418fb28a4913a5d7eb17a864afd0aed86b17a5b55a1b02d94aefbc925b8d31748c451ff38ca939f93cf2418fb6e7095231e7f778a6627e1c642675cfdaa4628af4c19f9ff6207d64cf2385d2338dd27306bd1f67979ebbcbbff43985caa3fb2f2eddd45204ad4281b86f952c27e59337a3de415e99ffeaf927d45b790dd173ce398725d0e34c9a20903326152753990ed267fe4b08cd943393599b47337a9ee9f9e7149bb3075ef6cf561a0af1f46761434165c922ade4c8218a7bcfccb45af9bb93d4c934d480ced2ea97be7519c29339b91ba0faa5d4de67c20bad32819cd1da8c7e9f365deac42b8bb6521ba2dfafdbd2a6c929150f80353aa5dfa753a86ca44a7d532ab4898a4ab52a2dce59eb170b3405a2e9bb69209a28ad512a773a61af0de71654e64f277e90330a0595258bb492238728ee3d33d3a2971ed15aa8015aeb41eadccfd2a707a9a31e2177eedf298552b90f04156fd3bd0fb3a4ef7d6929f580306b341dc73cbec1061a408cf1c37cd1ad2f5fb4d68f5b8f297e2a054a1abc97b27b2927973da34ea0432034b54d9bcc3ae4e3cbb56585d9b4d65f1f0b6e9a7291a232ac33c8f510889a60737d4a51244a6d98524ca929c53f4ef79ee9784a4da94937ed610d7a9c5c3ae6c29a85d4c1df0198a51945655ca62d67ad6d78d630aecd98982f32675d6162608c4849aa29298a3d30738ff4a1344aeae0cfd1c9191454962cd24a8e1ca2b8f7cc4cab953bae183fc5d84a5b9f80144649e61812da9b818d12373842460e6e300411371041ec49d374f974031a6474d17a8b9a240d5ab1c31432a44c2103124ce12483298288d140cb74f934050e2b9882862a22a6b6b08192144ca4c0ca91142690010412d4604b0e36567ad8417f5d3e4981c39314519ea4c88013525fc840aa4a8a4aaae60587e79c416ab59546ca5aabee81c3a27dd51816c695c1f5bab85aabb43119a6abb6a5bf7a2d0b5c85368570656bb51564596badbdf7de04d84f4a2965ad55ca4eca18970c8c18af191a2b613a2af06996b6b6d66aeb586badd3565a6dbd2f6a635418564a995b6fad55ca186ed65aabb5d6c26469efb5b5d65aeb4d4ccee05eedd55aad4642cfcf99c5e10f5c8136d44820818405b3adb56a243e962765d7aab556cbc5d85aab46c25e7b6b95b11546b560adb7d65a6dad55d35aef9db54a296ba5a94145776d772d87f306659db5248da25f132e51a89a1a1ec24d2b46de3accb6a1c2014aa5863a51a79b17d541043a852a51251166c4a051a8386304285414abed6e6769ad26ba70707042a0ee69c4dcda4da03ac5668a060935503112708e4e8d9dd6b7bafb6af15a55abea975a45655ea1229d37fd819b16274a6b9497ea05fcb6d42d4935a9ca4e5555945a65eed0aa2896060d1a39356c805f0f0d7a01bfca5a85750bf8d9ea96a42a494955a2d02a5a55a34c9b4da706abc6c66aecac749849539466deb171a3d6a8280ed7249badda2acea555b656d52a5bab495347dd22aa26d96c944ea1d8661424241e1be084326d586a6755091d121c47a69424c1918b9860c4041957ab554441a064337cb3a42023fcb9a9e0433acd7a2eee000e9b9027d65657104feaac4fd46b33369a9576b5a3d90b2a5caf1742781c06bdce1a1d4d8f725d5399e78541f93d8cc320fce3e4611fe38c45c73d7e3b4e1edf9297cf28558f46a9d3ff26ada087247db2cb49fa749555449fcf1a42caaaf59056532c9bf4b9ef59e9f4c7950efafc90e67b36c8d9459ade0d7d7a3bf4f9439f770252e64991fef4fa7ceeca2267accc8476a24b29ba64c91bcb8905c58a624531bbccf59cb36785e2efbb48f49341a76cdb22d5d9cc161287fe04926850aa36d0240571c49ae23ac767aaa40efd981eb8aa9c333b5075ca67945c543ab3ebe19337578cf84153cfd1a5113fb8f528166889eba692dbef532034175fa2a2032fe8d60b5d27de14387a1a4734b3d05f717ee4fef198d4c5c3165a4a29a19aaed0b2494a293d20a5acf726d97fa9239f7316db6b2d37a70c0cbc0a81a68ea17a7d56174dbf42f5f9b94b3dedd39f96523a47db44adf8a41c5937e972c2b5d63ab26c9eadc75ca1ebbc4795f390ba2b085dbfda7d6b52c74ad1d65a4b29f049798f5c41dc5a6bad7452b124f1b592d2b7750acba66b6c6e6dcecfeb3247b7d4a4cffdf9148a4b1004c1a736e9537fcc330ea91ed9a79ced0bd7e485674e69cc9d4b1a73a7d40a073ab5a15324a963fb422569ae6e5287be4c171e524de790ea0cec92047a51eaf44b68d4290da1d83a45ba75fad47290d659656521c9e072cba938f3a55472bce26987f8a4ecb3cf266aaaaaf84246942aac9001a50aa94b4301292c5fac076b2de5b1d64eb90423a234fa94467913271298d426715d958602d38baf7ed0cb8ac45a1890b83ac72da9f02d2930b8ba8e8f489074efed38bb37bbdda30b86fb5fa5e1e5a37d0c03f7b31f012177b4f75d0b4abfffa3e33d541cda429ec81ef26405972ebbecdfece9e1a1624ff7230f0e2a52fa437e8642f385fc1c7aa197319fefb7127af64eea7cffcd3028bff744bb09d10b7dd06e12f443ff854f3323055a49ed5e6cdbb4f03f14db3c251dbe77a96cf446c5ef1e4709f93f59d1a587cf9f8b2a7da6cb272ea0baf717bc51970b90aeafdedbf757fb7211fa27b4a10d231a7a17de05de64e89fd0be896d365da7d717be167917a8f876d02f742fe34665e115fef5ebacc9a4bdc061ab97b4173e53b1fe0b5bc6023564a9a2f9af3ec67f39048ac1df15aaf0a4f397e10a371ffe0f8db167c76150f89cc382c2e7cf390c37fc72a162fd6818c455d163547c096d1891d0bff02ff02642ff84e6824175b542185f3dde525d548471ce83761e1ad52e3ac8a72e3ae01b8eaab349eb3293514df80a207a7decc457213e82529798825467978a2a2c2a7a175e8643887c919fe11016de859fe190205f84dfaa8a53df858d6b15a74e79576391bd847632f445ef44e889fcad530a4a4751b17afb3e6c8810212f69451b3664c89097b48f0387c2cbc5c3b61b1a55c71bd5ebffd48f036f54fdcb2583340cc317a272d6550cb5affe72a1b290d7bf515466f3f5439b0df37e76ef5bd8308f631a8e4271eabbbad0f88647fc012d8377ab6da54eecbf70684b9ad096b4cd63df3719c00fe14d88bcd00bd9432fc321f39bacf043bcc9d0d70af04e867e0aed5a01de09111624ad002f4320dc5b16e81379217bfe904d7f288606cceebdfe300c3f535152d98a7f4e54ac5a3af7e0a7c556d161bccc635b9d09d5d4d4d4fc931545f4ee3152f8f5f51a75ffdcffdf86efedd92595ad3c6fb5e279255b714f48d284fc0c87509a10de440234a10fb7b72f14bd5b58b83421fe53712a6c0a4db14dfa7cbd7b0c84a340ba10080e8906f0b02134e8e94b1a0e895678d890f9422fc49d10fd7c49fba977684b1a91b1eba31c71fa58b3c74e668f38bf6598ddb5475d80dd64d280fcc72da9f05f0689b6a40575bce75bbb6595d0ee3d98bf21900be47eebed4ec2bdfd6c8ba13de6476ceb3d541099224445d014215c8ab8212e5436ea4065e396a8222c45246948e8edd72522292a1b97823e89cac6a5217f7986a2a212958d4b42def21c65e3f1f57111405f1fdf5208c0d7c7fcebe3fdf5f17f7d2ce3ebe3292a1bc5eff1f571172a1b47781f545436fa7c9ef24265638ecf5da85845fbfaf84baffd1801e0c883d221ddafcf45a1b2f1392a5436cebee7eb68c2cb9a705429c1864211277c7deeb6e3eb7357747c7dce69f5f5392de1d7e7a0c0afcf6da1b2f1c54d51d968e36f7cdd799eafcf51d5f8fa9c179dafcf5551d9e8fafadc172a1b738dca46185f3f1b51d9988fa86c84bdcdd7cf50a86ca4f99aaf9f91a86c9cf9d8d7cf51a86c7c3d152a1b49f89caf9f81a0b211e7697cfddc4465a3083fe3ebe72a543686f0374d2c8d005fdfc7eeb1fb27c3183e3ee931ff9b1fe0eff60cf0e05e15e063db662fe15cf8bd9fc8fe1c444422cf03888f97c86af311e89f488e8ff1259a88185b190189bc8c03ec226ec494c7d5fb8c90021fff7700648cc097685de4f3464ccfc71138203e06e07d88e4c844c411fe89fcf391bf0c222fe38d9802c0c72522ef539483c88b453e39de8869043efea7c0f928e303f0db48d12a17c540ef79d1e64bc022be978a3e666455f4a01171843762f2f8c89fc7e5e37ea0ef762754c218fb1ebe240badde888926d4f9d8fd15f2ae8d10ed574225f4d57b2f747cf4fe0abd354037c48d98c69e97f12579b4791c387e0060adec71f0257ae88d982e1f7bdef225dcb53f8f832fc9343ee2c84200e8f92b742f1fc1b742d7be11d30f1f97841e00433884be04be44e328e1fe8f7d007cf70228e363cff7219b3c248bad86bc21af7b23a61e3e2e0dbdec059ba18fbdd0237b23a6151fbda7817cecbe7fb8c3206ec434f61875fe065f92836cde88098513f818aec01b413a7cf4f136413a27d8fcaa48c8c7d58341ff152024e2464c230a234fbeb1838fe0873abeef061f6f7c0d9e376202f9a883ebf135f892ec838f3572d00e1d0f0681e017047e6fc4d4838f4b41bf83a846d0eb10d5d07923261d7c047fc7f73a3e2c82c29f407423139d40b4e263ec43a255f8464c28f07189e84f287283e8798adce07923261b3eae1e053e861f4de4d0c520875a6b51cc30e70c6e98d3bbc00d2608f1add3cc1b6f62cb70e34fe811a4a202bdd0b7e12619d682d0d42fd7ef05ecd316557ad8e5d3164b4f7491c30e6ed0e1061e50fa50f03d0cfca4413e958f9be84ceb053c9d795ff74c576f17a138b588fec8d32907730e7ae4e9b5b35d77bb6b654837dd6312ba8174fb45515195663ead9df2625f86db2d0943a756b7badd2303ba8ee9f5e7519404d42f5235a14c7951ea6c377c5f18638c2fbe574e4d7ac48684caf26c925f6ab30a68829c2fe6479ed0e011206e482273994a17df7be57c65f1e2a95473d63d2f45245e40263d932039d271a1c1a9a63995ec6cea330817d36c928d33082abb1f44d63a499228febf8e28c1b1a0aad0f393820f2acb16df231587ce7cc4a2441e52694eca9bbfebba2e772f770c32cfa3398f66eda85f2654a4f49b50f0fb3698430e3b2f8a761de9a949a54759b544ead07791408ff368945acca3b9a4dbb1d78d14f3fba02f16f5fbd130a876eee751c8419e8bf49bf744a222bd1c0a7d024e2f03534a2ba7e530596953be932b39ac7216ece327b24e2c9df6ef9c734e7b61d2eefb44973761929ce83ee64dee37794293586259ef4bbee8abe2c849b32f2d975095561a50f025b7716cf41c5dbe0f593da82a89f32371260f79eb31189974f6e0e93124199374fab35b6aed8cd1449536457da9515f7aad950aa41fbc76611c729164a45fdaa5f505b47ec9e8f2698ba3feeaf2a98a8a867995a65091d63ae794f35edcdd2899e69b7491eeede674b5cc8e5b5a39bd405cd8911294258194321d30c6b5be72cd1547dffcb552ea1ee87d580f6aad557c69dd43ad55722bd80faea84b2ae60ae4596bd53ed45aab94574a7b6bad1573f395c7273d37618e9382a5608e9382a5608e9382a5608cb96b6deebc8fcb1ccefdefb3f46ac115e47256e294c070c5e5acc429ad585cce4a9c124b63d64ab7ee2a94360461625c32b953ca1cf8859f4ce7e58eab326977c48814932454b4daeca3e2d09fa220f6e2efe1e1c11873d7525d67f8a5e4308ecd4cbaf047fe642d032f2caa6670420b2f3615d002072ad0820b951631f880164ac8e0420b2c1998fd792cd0d55aab0d5556600864271410b4e8741ef1d490a5d65aabb7fb08028106597ce9f4bb2b2a8c3ec6bc3461a5054659246591031b2cc001171dec6002287ae0850d515fab073d7e9de11a9468f3e707a9b3a7cf5a8d368a7d7c2f1ce8f42d8d396207bd7e7d9a3f206207a8664a29a5f2053f4851c1e40310566044a35f3aa5309d524a29a55b6237bdbe1c73b080a2d7267aa512a207343cf5200a219e7cb045af5fc32d5824f5faf5a9d3ff931faac0814b0e5774e8810b5aeda25729adefb25adafc41a76f5744bc68950d5039bd806a6a063360f10414aada155540c1022a8a3ceaced13c6533c595256caa74accba72b543528b72344f08315c040bfba7c3a62895815475451938123ba4802064850bc76974f5880609a33533a73753ad07494407ffc309ef41c42ce588f6d7ad26ccacf44168b5fac33169f4797cb8a7324f33ef4fee3f6e43c5bd2c02d69af8f83dcce38dcf3a8cea6d8bdcdf36d968cb1c57e37b88d264ddac00fce12f0676a027ecda0ef54d43cca1c1715a1b3b9da9f8c9138f8c10d9338f8e70ea1739b46af5b86d93da9833fcf1dfc9fb411823481fb1c8a833f9a4345cc7134497a3ec779268d153dcea328dca3e01d4da423d247569cfa1faceb8e261295cd747c85ce641254d88a2ae2389b3ac6f825ee3297452ac338ac50b70b74fd31eb26cf0acf7cc5d4f4c8faa80fc723f74bd7ca43651247776225c759fcd5bd94ff5adbf5f3b0a40f169befef260dd37aeff1f6fec31f66b1581eeb5b5e4b8af72ccdda7abb90d02f2adeef46497b8f7f9c50f07e918af7c30fe8d57b7f04c9d7c5d6f1b75aacff58fc07cd7a182ab2f808f6d6687b8b8f31afffa13304b295d0d2c7f5d3f534fcc13de62eee8710e87b560af45399d74d59571dc6a3cffe43c13dce0e7ee0ffd4ef827877add66be9237b8bebd67e8d40b77ed4bd25adf4091f7f320cc2df7ad686015b16ccb3382c88f5def770f13166c4ff80d959af6b6bb3feb5fafeebe49c751572706f4fc3f017e9dc86611ea4a8cb4fbbe80c09c5b97febccd2303f42c5cbd3af9e9e9197a41e42972ff801959e5b20a099a2614002d68781fbc961e0fe52259dfb8aedd8753ee60a83fdfbbcceb9333ae9f5e5dc19232083943af549af3f763d3fe6fee356af5930cd79d0aec3d73d3332857924158b92435f87acad821620289e3aa60ed9c11e86e1cbcd9574c8c3672dc1bcddb055ddb015e7d1bfc9b296c567af2ba017d75494303ad0e07f307cf5df86b17e76d67f1bc67275a1bfd0065d1ffc51f7186a0cf58b2fbafd3ecb3aeb4ab64ea0412e3b3bf2c02fb9fc5e127a941daff62737f153fcb01ff361964b9aed32515e4b853cf8e40dedb22e71f3e5bb6237a95ea429f42c7f85cdbf7d520965e98cd2e47f34e471bb05b1ad53fa987fbe9db7c9222541822e6f49f4d0c7aee33e1d20672e7944fad8f49c4e5f89f4997ccca9a1d31b0b743ac6be497da840c56f81407f9f414024496cd0f9b95d548406fc717ed1dc46ba9f1dff995f848ad48891d14851119e3a6652877ede78eecc9f61d8c4cd3a7dda43651864e7cefcccf1dce9f2a5dfe8f2a98b537e1f54a441a86cf22054443277e81f9105e124d09e7704c9d4213bfd514997bb4302f6ef3f6f2f85ffed5781cec20f77118a436df75167b68ba2919af076ce1f8d921677ac303f052a9b1c3481e67ecc3d3f3f3d3d477e7e7e7a7e6639cb6c2167ceac2b9060146857922463922347b86b593cb090eebd5f9fe64f8a765db121aa4b27babcb2011488935fa12db5b61e759d940a4d3cb0a18f720a890755fa28bd40d94ac516e8711ec9db7c2ace2bf4f8485a7cf266a418638ee3400eeb3cefcb9734f943b897efe40e999f3f7327f227875dce632aa145ccda3c6292848a98f3a21a2448c5a90f8343ab46a1c7dc6f95562bbe982ba8a575ce0f9f68a874ec986a7a5422443402000000c315000020100e88044271388e645108d17b14000f7c9040705419cac24912c4208a821432c6180200000818205343334312042af0a1262a12fb6497af725470e5d9185a1eb61a02e46a01e8cfa01fb0e90d0470ef7f7336fdcbcb7f24aa596e3950070100d3f5d70685865e660afe0109cbd5eb10885366611685226fbc3b1ca0224fb9a6ab2f850dbd3c8d1a57d786fe525b750118379c008f87f05499eccf1b40318da3a8410a2494836b708875e2c3e65a88a51c6c801f61f21f68a1d35dcbee2f0a80a1a7070e994bc5c128d776026d670609a29e13a37ffd1320f89c0706ff57c8431e53199490cc204d0b00eee3cabcf304c84ca0b32fd1f484db498c42b1454f5da3fbd0ffeb64337769f14eee1aa25940db0892ae49a9c63c0b47cca6964ba9c723b30153aad247e6103c400b702f58eedec60b3eb72f3b0bc96ed017d2cec7748fa043c376a477eb495349c5a6b256f2140f2a14ca447b48bdc2d73cb24a449ace270575d984d201b14e5bb45594e50b3fadc144ef42f4c9288c89de6393700a5355d49532dceedf92cb7203f515ce89ff321852e8e0ea7e91fad10f0a729d38f929ee1c0b8c160c112fec9bb9fd3ca8cf35d3a76880166731852f5313e406aed98b2b0f82c05afc585b5cdf408e8e2a1a87239e1714787fd4d6c7a42da8ac5bee122d6583d30e7f04bfaf8e8a023fe3c62a85bf8be9698ffee5946349ad90cc180ce43be28b7a6dd4604c2fcd2aa05b3d9a8cacc3a7d4a8310479f970c8a42c7d9581ce6a44129bbf5c9d9b520cb23e13ca346c61f470becf842b5f80304930d889fbabf8b139cb0be8c7c1913188d002c1223c75ea1c9cfacd6df3c035ab9cf1abdb8c15dcf79a9e546674314c0175a957f2571848754de8808d5f646946d38627b9750d08423f29e198ac6a56271d205db33d72c7fbd151b9c73260d4081b2850d89f853cda0bf22ce065f242d92dadd97adbe7dcba6e22b54b8f41f9f8326e2cdcc5a92fbef6df11bca25061d4cad102c704b43094d707f93c6c4f6c7391046e286e15870a19d1eb2c900d00e23e426024051f00af9a104a20e5d79f9760b728cd1d5385d0fb0c49146a2beb114b736b9e27b0996da5c090d91cfb7b37db361b424033757afda4df4788b801e33f512111aa3c7a3af1cac5daaef5dcc3857448a4b58c9e1ee502a7c17e4b8100fb427e2206603c9c391f8e6974358bb3301ce58b21046c3922b7233719afe85789a778a4610bae94476585320b15877192fb33db606b429600b0583f5215f53b4b4bc85919df3784964e2fe6f113fa5ca94068fc17b30a5d95c2d4a5764acc29c87a4a0847fdc59783a5fd08d0a06ff3b523143c733144d094ed92046f76a900db2ea04b3ed9d086648d6b1ab092232eac0cd6c8d3365c1127486cf73af4d2c645209dac4d37e377c26ec0a2f5ad16fc0e22aa92c87d519f96d59b905d17992f57533284367aa5b50d6439b9e91c4302beb447a4f529b1045b506478edd5afc9d3f0e63385348ed1a75a302c8a07bd8e6447eceadc5556b7b3bbb9cb4d6190d0ad4a913b3dcb1121dc12cca1efb41a2a1417af324f38d7ff78cd1855e4403af4f9641e004d2b285a3e4e38daeda64a1398e202c02a9ae4565a6ae2c37d7ab82b85aa80e9443a05c8f77147cf29fdf53020124d7071c0848cabe11a27340a2d57391f48b6c6d578a0092d1ab99b74530ea8901911dec678b7c4f850d02120dc5efbf18fde33fe8e7ecf1e90875ff365a70a6a332aafe9c252a761e169734f0cb94b14a4d57b7b70240aa212b8e91c51d0497b3e9d3ba0ca199267641d8a20412c9a8d63948e82a2c45d9eef02a2121a02be591f310fc9c5f2d5bc88251c9da0c175b7a25637fa26b29d1ec7375cc64c5167aeb05746fb54262be22db84fe2c17fd4ed58772ad7147cd763965b62253ec014484462f26b931f7513d1e0b5d14d0c1263e0c8093e96747a679b585dec64fdb72b8445c2fa40e9776ceca693c8e9e37bb6b38da6f775ff1d4d92643038ebb675c86578aa4d1c9006ff215a53fd5571ccd2a1792bf0c5a501546f4be6d325002c56780dbb0309ff47e09daefc730ef66cac1f36b1bbf6a86fa739a42d4f5c3cf344c0cf22631e0c8a020b76bdade0c2c92ab895a179e69f1920fb063105503a1f2f5dc1c705ae424127426201ca259ccc8d7fdc68b1807e0afe31a9b1806ec207b0d1887cb1e280e7a5a86f8a5b3cb42f9fc8bd925314c7b37c9f84f79b1adfc739fd5bdc09ab66c3ac1509c9a45f7778b8991935fb2a3501da98e63ea665e6df0fbe27f72339e63ed254a72602aeac252c9cbe5c03e5a69e43ce0d7f1e8af5a5ad7c4a374c9a7643fb1f97aa354a67cd199554a0e45b187668830ad869f3cf8e212ecab0c611dc91af265a256783593e0b7cfb0879de8e8ccb28e3c7c1d7ec90a00423a1ef1a5d6347e2d67cf5cc96cd34f805a2c7152ad168fe8bf316d0e05062cbd45592689e1da1b52b491fdf1a13d57a3e197990b5efe6dfcb560437e5b3542cab7e902f523f7857650cf4c6b0e16b60199f4384974541ce9daf1327ce8b088af9c16f53d9ecf3646b219c3c0cb17b8ebe00fadbdbb4e986fb28fa3fc599c8fddf4268f132aa4f7d64e11a3b41e98256f2039edb65374b4ecc0f7221307c6a853ea143d3ca020a610258b39bd3dfb92a631f80bd00ca6e663a26d771c35895f40b4659c4823aeba29377cfa763fbc1d7019daaf8b93f79486a87a6c59d9d3a5b80d8fff58e25618fdc36f00895e341e0f89fc7e839dafe60d739c7affa6014a085b3fffb04f4b8da0f5b20267f5142b5ae2f0c92ad584e006a101b60c146046f96ed79e961f0106a04514f00612597ee300bf2430e0e0868fa8687eb6bdc47ec9e2f3f5a8f518042b1f7e7bbba7f6c949cfa100e49355bccc868fe71c0f1e80307cd81dd56f157315fe6e8b9f52a9e41f833612221cbf8f17c6fadda3261e5d07cbc01fb312c771eef811bbea7048a518fee7bfde6f2cfbd1e5761ccf4faaf392cb4fbe0ed17319707d933a6e4966b3081493ade0c822ce9a1f22cd92b45572eafb74abde7d1c7d232a549e483599c75410b9afdc431a0e95b2741499c1df2f7e1a4cbabd07081f9e1706951b9eab497d37c8d4c52bb31e4e739bc5bac084cc4a0c242b917527dca4409f8799a51bf2d7528e237cfbd327bb12a8a73bcf10594c78b0f01a296668cc3b0a7b932e3bac945e91e524a0b65c6e7152c58d4ad9e7453e85728175bf6e3928adb79f4abf69e00d9f95f04ac3e999dcb961217e7e844b5b08cd4eb298d511811dd0f10378f80d8c17e72109744121fb5705370238c38dc918a60fa0c4b68bb8e680b4b867823b74b486ba3939ba663f00bbc2508be3dd2cb348205a2f38bdc28b5ab24917f7fc3095add02775d302dda9c1dfa15336d1c7f4cb4f7c9d8b88cb85ab9a4684787cede17d926da7b7ac16aa6b2e813fff233c6781570a49ff02b09af111dc97bf5a1bcc69f1ac7a522ed21a76448632f93ef1e27001062b77b800f765837ee371bfeb1cd70548777e9c283ad5d5b5c5bc28eb1bdd60681b79daceef3a5c3f5b6db8da49592ebee77b3a0891e2b0e5e3e66b53e73e3e88b6aa38a12deabc7393a24363cbe00e54e9afa90232614c2a2257312d2ecee41f140b4d66c3a72f91228c10efa08c3826fc6b9e873ff6ea9316cc00cbd808320fabc51215a4ca59e735abbaac028e3e3609c9a2262ff92b4997dcd103327c90d0f9309f3fac783dfbe3eee0dbca1c3886b088bca75694cdd597be69b98f9011237a8594914ca47a37387de65906f12335dbbcc322feb14163a3d7acbd0d58e7684b71557345f3ef8a03d3ef249e57b35ecb2edfb1070e956441bb3b01c4940d2a9aa5c3fca80471ce3dc50f9e2735cabeef1901038484b0c9281781406f81e65f7037962691ee7038568da1bb98f83ed7528119da91b0502c83b1393d1af165a00604720807b446f8062488e4b6f3c7abdb498d7fb9f9f3daf9a71afab09e4f9f25ea81de07fbb5a77f0ebb92cef59eb6e709a3af46738bc8637b78fc75cac372f3883425781b3e77cca1ff4d567a503c5c1c4b7a5e98a41fb92b8719892b403e4f9731fd67e3c83b34bf5f0858e8695b36caf243f09242a86d64662f738c4e0aa1cb5ea38734f12946df7e6ef2b86f3a2c8365c4c09aad3ed5e1d54f6f51c8a15d6fa342d723141d62a699f83d2729942b092ae7c18af13dba40ab382079f8c5c8f5fbc993550a8725abae055a8bd07e3b29f7f031d21dba2ba45e8e07411048ef11d64cb9ff7ade4d0cd5f630f689140ddb79267bd741dbd2f244d15cfb6b0afb476a830ead6d46a7957cc0c2f11c50f159ba9d147ee84d149af63133e8ea1ce84ce9e5661c6a3f7637a862db7125207662cd23721fcdf3e58677cb0c5120acfaac3012129fc404ed82b002370751ece72941dbee95e298e95e007a5bfacc2cac6e3dfe91880e22a7646b08c38bef8142763eb6719135c9b5d2ced50b83ef05a7d30c7744dc140cadb6a3b82c26717399498a061c60ff96580209b76a7ee1b4da6efe801f3800d31668c9ebb37a9ba76e63bdc17f632cefaf8263566e7dc98498fd1ee674df485c72647bf963d60322ec9bf34c994e608f8032b1176f3770c26696f772d56be28a74b1e30106832f5b3438477a1496ee1a7f066b35a2c03d6e63a4c475ca494412a8a95faffcfddc6084b7ddb16e820027c337a2dd3825c40deb43f2668cd69930eca1718dc92efcf9fda1791def389dd3029973351e92de808ba514d6a9094f349a6ac2a970514c676fb3084d491cc45bb95a6e1a0c15264142476883204f6d3b9456a30a386e2e9c68f321f50357ce54cfd9aa8a30689ed4296cbda1efc44b1237455af5b6f32c3a12094eb06d9e2cfb97a0518529e89b769570f7224add5194cf0cce3ab66a3f11692e9ba593ab7488c8c7a3710f746734202ebd86250f58a42192d2d30067451eaba90f7503801c784fe6e3f07a6d5e6eb58130cca2182e77a65a41be8681c2c9f09d17e9114e820c75b74b022bf536c30552b3cb79298da81e27b194398b46f4bd9aef3bcee77e7cca7d044f417216174bf42b0e47a5aa92b48902e6e9d5c8ae2a85919568e28e104f66aea1d3e0e0440222ca651432866e033ebea0e1af90bf128c97cf045ff57ae7f8192c0189978c561d452091a0f111480688c311ec2dfa2d790ec3ed0bd0b3fce2f2c93d96a963255ed54313d37f486f7bf6be3e84382f157981610e07e827048863d028788178baf150b4182593117d7986a7db0f222805aaa775932805c8cfbe685802776053ffcfad892a6f157a84a7a5d9d87da08f8a81d351e523c9acc996441c1d708dc218cd5d944e0b05c5d77ae0d790f66e51556c8ce2141ea6a1d11c0eeb29efe4b7ce1aa4f238ece789f473c7ea424f4428bc9f1946f6524485543cc4d645d3452374151c87513f21235c63bef1c3d6f3764bffac6c3ba045a5ad5824ff69a478d6067896d9ce8c2ccf096c376ba99f374f48a5c5016c85afbb239baed006c00b478b8afcf1a376e7bb1da20320577e9b5d94305ea10be9c124196c66c964aae95ce51d343d0e6c20036b03a939f161ff70d5561901acf81578a3bde143ce9223c0785b5046a3233647e2cc45fac8d7684ec9fe2cc4a3fab0dd5efce575c77e56c47bcc142f9e45bad683364dce83a110595f68d1605a4fd594e3764d85489c360603f1e91c2ab0222561671a8550620ff9504e8cfa813abbb1088260a4f83c55bdccb191431424b21c146f18e5e81672080474ae987adc2f485d16bf4de53d10ec00ad3a5f7c52d9370d6d8b23fbbc3c0f43505d42c470a27e37f6476e60687dac9a3f7eef53fa086f172d76734c79bc305fec14a55d09167a25873c676582d24c906d741e2f04c02557646b4f31dc54d8d9924d0991638d890800323d3ee5f2fb5dbf1aea94e340f07584e0f9ca50753f95312771f8a07a23c4ef26f807e42f90d78bf47bc2f30f4e49f17873deee999698f77ad48cbc2a5d313d12c6580cfdb71bd04573d9c0d6bced810e28024d7f60b3635194aee7a77f94a5805393039da139009c8c5e3b409bce5e4e94f86ee76b3eb557af54e5eb60507e01d78baecf2f902fc6a2b71e0bed3b064de28fb1efd30d6ae1f3b0fb84f18302d74ccb4f1855fa1667f501ed04d049cfbd4862850e082addd7ae2423418aafaee42ab30c07f089e70a332fea6a9ee94af330b079177c115f199cd6ad32c1c85c08c9607a00093def4f521d31ec69a9093be5a37291b1a4a98962c12042603a1a713c2475ab2202ca0009f69f3d16a1319f3e58c77fb98656de0fc9c2e18ff5d327e97212d756a2405944a87f93907048d18f15db062f1131a1a69fbcfa65ec23a909dadd0fecf619b0bb018f3318371da8f8e202366e8b67ec70faae942690f0119ddd241d670f57a7040b91ea1b86653d03106905d91e247d9673b60dc95d497c29ab74659880105891dec3fee5e9cbdbd209d5f81c89e7522a14d6a16c2b4e3a91f0d01999832b7924969d91ae46957d7eda25cb0da11644b45b0b09df5c0be7f3ddc474c2a7b120f7ddf94dec093fa87ce23ffa45c7b5a9391b511223f0573572828ee9d388559ddece78d7c6a96a991fe9f5be165b3df9942fd79936965b57fdbafa2ce65e16bec33d4781ea7a69ba7105b6f45eddc23778cc2567a167dddd1921027712835e74d09199be85a3a96cc0d7029a31f6fc6f7828a3e5d8496eea512c6fdec2da5b755d371ad76508e19165c36ac0b82c3d550acf9b4825b7c4bfd784964d4e1e2f3a908e087b4366accba11eb8baa916554b1e862c389da88a2f353645d0f99a7a149f2ee7b24af2137740fbc81a8a2dfb9f9b87a2a135f4bc6a9f74a9200b7a3a5350cfdd8f584b5b2a0c1c0243fbc45e9a092b0dc985876633374ac368a0137b50053d859c5fa5c6b7540e87056647d580c798bbbae7ea07821b453c694e82729d133bbb7b0da9da5a0b679950cbdf8431c4e162a55c7f3a45f7ea3b58a1d639b3826d11796ac41d7c83584f25fdcaa2af933c4cb00aab2e682b74c6fd8d5ce914416740442aecaf09184d1db1f10a1715c849a52ad08c1f4be9e0dda5529c66c01211c5f7bc1c9cc3776974f1b81a2901bd4f6bf94ed2ca17e0103d1ed5610185b40b5f49ceddc569e0af6736fa550f6b643b6be79a78e208a309aa34072a1056dbde9393c2ae3adda0899e78e983bb3ee7694b15c404ca77adfde709daa5f0e5dd0e003cfc9dca1229c72cbb11647d1f9c84abc77625a32951143a27e28a796f1489df776dbc8bf23dd8544d9a320f4d2dd62ac6e9df6568476371e85a7e55345ca0371e187172afd1c091a71325ae72ff9e0f2d87417b86f637d423cb1004be6016bf860babdf3990648c04914d0842ff7c6eac7c2b7338d30734225cda8852f20f58e03cae39b982e98255082e75ba3312341d41360ef56b92413f792b164d3a1770622934a26c8e1723e98045a5e0f0cc2ccc41d5c9d69693d126a823f54beea87db18273cc2819774522a4dc150852c6b855eb5212cae530d0999c18a5131925c2f54426f1bcf60baa99a4e93d020cc55921e2651ced4f7b3fb783492cd25f2ca47ab199288cfc17d7131367d64ba03023f13e978cec380ff087eaade0279538d91afc5a7acbc0eb57c08e135d4d66a9f32da24353c5443f7a5b2b3d32be6db96f053e34014cde144a71aa4d5b02b20bfcf14a409151fb89a21d953633582c1c617f09393f5a6b22a60c64b360b28fdd78997fdee6355e841ee518ec9245efb6715b8070596982ae0a6ac1650607a00b12a20af58027e2b993e4b12801b13aa6151d4251e2772cb3ba0445c1709282221ff01bcc93a47fff2fbd25c00b04e210bc2965f5864a146a757ed91bf28f21e06affc3a03c60e276ac3a79da44efb1ec94cf5e52bf6202707d46220dcebb816401366abe92ebdbb21048ccfcd210f4f073e3c6e189fe521a1fe4f5e13a3e0696df30ff6ab01324aa58a7072026d3662cb5615ed732b9225b18dbabe52fdd2a3ccd880ba97b51b53b1049eadbf93e186a5ec19f7a77dac125f31d6f6f975384c38159babd998ad81e183f3bf0e4327f0f670244e0534bed9cc049b93d5e9345e8389fa87b488c00ceee5b994007cf0374e5a1206a8ecc6aab0c677c69ce3d9420838e06ba74b536e90df4a6b4cf3a74c2844bb6707b1a46595e89d9913d1a8b1591777d5939a944c9d35a51e116e4dc652578ea5613a3d56c1b8b5ca58dfdadb64ac92bac91043004f73875668c111a69b7e13ba16ca79437cecc280ce663e23549b5a373dda7633a84afbc94483ac5b632a1fd7bbfad81a7a824ce57a2c5f930d28dd5d128a3eba35a510e0ba16cf3cf6681820e761a9eda484b4b0cd68b295a399b4b23ef593cd710ce363410915e8e1d908b8004221b2b941248bc4709ee0f9a84357ccb022906aa64e32b1b274ab53a61661ad0d706ea2b329b06641ff4d7c2a59075cb081e6bfd79b146039039f9166d981c8cd160cbb00ee0a9c6bc79be670dce06a4a90bfe2cc70b88a3ec73b44d30d5029a17755503c0ad221a1ba10d1803c89da4ee8a354b94dcd0cd0c7a80630a9dbd61e96bb7b0bee217cf62482b9384657f6f35de5bb848f5c985cdff94eaf285bf638e148c2991ee2b07af2305a580b15c816a7f6c42a85085243ba2ef503315581bdf4ac195957c09bd27a1b0a52abaf61ec4d77d4a2428056377927be78ad1ad9df219d340df2d6cdf8cea2f6bc210fbd108fe5a0eae6ae7e0c8014dc146faf5fc8acb102b96fe0a29386ff3030f5e4b747db4f70ac6f6da779e5142048ec227f67310e7cf661c5ce1800dc76d6f9d8274ba787fd6920f68bba706b24afd754ba5e784c759d4a2c8c90e85a5866901e4d050ae6c66e91004f635be94a7c650b7fcca6f47e2fa2726608f7c52398ab47a9cf06ace126a2475e81ebf9ce0f3d2a780878813d34b2f0494c33802a529871451ffa55ad4229cdab7b06b8655b73e2fe873b6026fd3310892e7583c4a3dbe5f9c61810234bca70fa6d0cb809b99eae5074d828a0726cce2675d47e081b6ed59effbac43abe735f224175ff9da91fae73dfe946cdfef4a459802b744d8a20dc3b04fa3fa98feed179cef58c83050ddd1bbfcbecf4e6aa28b2cb8a944592296b2dfd7d490d6d28a2c311eded8b64de57438c06abffaa9ae48f767e9dce942a9579e844bd51adae441788199934f887f9fd646233111f12d529f842f4540b9244ea4fb09559d845e05c3dcc8f9c69e81c792218ddda7acde6f47a549522175f8f5c444caa4a6cc68edb40fc362a34a508bd42c1671bba8f76a74a78e7a751a6d93239ad9c0a8db1be7f766d99c6e028e8cf9ec99bcb725a520649d7f87b32a125343af25502d1a2790b0379e78f624c9f2815a0af17f5cbc19043da0e0686140f116da8a74ae23ade2f5544a5ddcca893ba0c8fe7d94cb083128c68962fef707d609f158c5e5f0d1cba3442c5c31cdd489799d43f4507458d3e8d8d9d25044d229dae1648b7e24e133ee729aa2233bc13e53a17b4b416572c4a269f188fb0186cfb629446bdcd1bb4d5c25e92bb833c2f535e3eabc82bb54c3ffc8d7918f501b984004582df1594ce3a7d8944cfd320803d0ef5626110a06fa9d0c0d412ed24926f82a60025da1b2443682396253780f86a6efccb8c8d6260d3d0a937c6841f69d94e401ee68ca2f600b8e3833c86c0ec5407ccda19b8ec0080155a95d1680295ca47dd0f225ffa62c21cedf87c44222a2c387a03960f85e11e41d3ed5973757c5787c3f61f82ec9a79aae6d2d9b41911d569fd6ecc2caea79709585cf44b9a67263c067babcaa18e40bb3f909667ef3ad96e4e3a5bfc295ddba11eaf0e6cd7e067cf63c62a846fb12a95fe6e5521553f24ade8860bc1646358ccc345405c10a83f28420e7641694e752fb2924d9b4fcb16ef3447a740e462c39e17ede09d381b9accaf9ac192f3296da86a1db7fad4139dd41d8d97522c1ac328ef1f88e9e18555a8838adff08d89e8681c1e54fbc750c0e950a0a1724be114d7834c31047399ea5f2e5b306ce720ed6ba2be4eeb393e4fc7facb2558c317ed25c4d7f68e121927bdc2c723192875d416be4b373734fd463a122e3d32d1d4c3f6498bb2003f2a28c5ee4d9799ab1782c56bd81eed589c89a7f03d002613c8a9f50d7412434d08c200ebdaf5df1059786e493c281d0d9c9197949cd29d7d12d1daeec160ed3c171fed5b92acb586ea06955bb666ca7aa0f4178d7607b7a61f557e567efa5fbe04d933bb8fe7ea1e62b4a3f79a32faced7026b0477b818084af97ba082d94bd867f407efc0838887e5fbe78df84f440d79175187dd8eb6f759abdb633a4d0f4a91229914fc3c4ed16fac1cf2244f43e5e91aabfb78e69fc56c865c91cf2ac2e0a3dc6fdcd10b464b87b2d772aa5308df4b94d07bb50aadba8fe8b7fc78b2dcfc537334d1846eae0e6f6e39a987da076bc145012b7942812fadde6d509848b8387d0b0ccdbdb594c3f0ed122f6c61e86272679df6d6f7c25ffe4799f158e1cde0c862996235cc338f40ecf0be683373321705679c846bc4a2a875da499f8c9ca57f4afb89d5a89343aabb4805d24c72e510453f2f7ed3ffd64468d03b83ecafcbee0298fce55215ca0fc1eb01d3c754613c8b06541e49dbb18e6aa15fa98b215284278c5be07b753f266c1670bef8a47b464ad0a6e3cb14e12d96652213976802a852b0ec2ae7cbe3950a12e03907503e3d732e4834c5e13044dc4f309891a919f94ff825085b03f57358d423a6ff7325cdf1360aa60186baf2abb0c14e3bd57f875d16b50dad1adba30fd40a8293873ce29ecc7450d28e5634b935745ea59421389c0966ea21e8ad401cf03a48ce76c429faacb1b0f1b1b6a3edce9d9783951510d0037fec728f6d4a667cc8aa8db31bb6fc2e747f2d6e2a95b513c4fa86b0789eb830fb7ff30d54681e5cb04e694eace024e18d69e31b0d1872bcad68f8b41d387c3644a9dd8c5de433521bdf40739f591a895513d1e2c9447d1165e28c62afedc414b206bc12eb635fa7f0a8148d4fa1d06be64ab88f4b06b7ce0abec27738d30dfea8682002548584dcdad8869a4400f053249de6e844054e3c9f914b115bf2831bae4a44c09f87ed9d492e561f0aca33a15c4d629a0fa41e4616ee5724d1d0061e06b3ee3afd025574ffd11bd7b1307221d0c2409d12a3751b4abb41bd6bf8196b6e5c9f42c80055b7737b0b7de2a74b660af66fe592fcf01cb322ca6fefb0511d280e7e3d6347a44caa3c334a849b822bbfbb60ce6aa328fac02daba5a2dc0fb284095115549bf468e8769b570bdf0d43ddffc3ddbb05d2cf24b0a1d5b5765aa4525444e672e17c64d616bd9d8efae34bd407a405247ca2501492bc53b84058b875d9074b76870318ff936e4953764b738ed4554e16ec35aea1fe09880fd8100e2f824743ce8d86208142e06355a3fcc2e0f4479d7d936aecc8394aeac5864cb88650b2f5ba1041bef504182ea96594a792a8582e04248934bbd16082716ac9d039689e39574886e87f4744addfaf0ad3046a880828a46e42ebd7829568243b88ac75ae70c10d39815551aed74ef3350d62171deefc0d4367ca33173a9976cb7ef15647c8b023106847d8cbf8f645790bedeff1cbe6617b4ec0c0a2bbc736a35773a176193e306daf09e78603567a4ee5c319fe87123e8aee5346b31838677022437e50c48813968f416d52404aaad52b81e0e2afcdb811face271db0a2bb18d821a191e95d20b418c8a4221860947f399377ebe2c2f4e6c316fa2dc503b61c2f382421bd273356cc4523803a0c29e7976de3b0d5bc18a25f727b2f88a89de19494c11cb1dd241d343b64ca338816ee7fe8a8700077a8db4b6cdc4c01e6fd3353a3c470cb9865c11da74928b1a06184b4d2da8d8be651278e22a8b369eaf5e1d38bc9fef45d16dd324a8a58d52cb466d8c55802427bc003540478e727939ef0fce2e3a25bcdd06849919929385b50e558b41f5d2bc73d9f0ec3c692e4edb1bc5d382858d242f5258cd676ebbb36d801796dca8d36b75de0c0b661076adddc075f9c9468878daaa9574dbfc838a401ac7f6e618ddf03323745e68ec73343eef68e90bdfedfd2b06a2f105f0ed9d9306f1f2ab9389f9e29eacae056a7246a099a6f71b9e30bf4108727644bb44a8315e019dfd8570f92614157a6a03ebea0d649380d94678cdf2192089822b26c3d09446747d3f262b3d4e65892df2ce1d01525de31639d7db0ee221029c120b47e36d3d10ec4a99d5b17f3e543cce4acb54090740d0a8d5d79bc4cd5b49f853c4727dc4a734e466d4764d88e68a347b4b5695136505ae40b20124bfe8910b5394aa66b6482f86fd2ee3fe2f7b19aab8d5a58549a1c6162d8910729c3ab8a0f3018ba061831af82484709e0f932234718239842a085325fbdf14a0a80bdf749ee8a34ffb06696362b5ad1feeb378f2d84f3f6e1ffd8685e2073846c639d333c9fca1ff91ccf84ca0614b0afea1558d214e07296c2a2325ec91ae3b2e0fad4af9733900206104c022fc78d989404fac17c54fc2b5d36947e29038f6e511a4aaa7a2c0356e3a6008fec8a561f4297596f712fb54ac94b7a216bb956086cb9731f87d23efcbd40ca1e04fa6a637fc550a4910d1e2fb1fb218a332425812cb3c8a5515b3418770a04473cc1dc16c38df2bc94e7f2e78ba8017983db6db417142a48c7c27c0a5f82ac34a5190e69a76af47e0d54dfaf46ef7a71d144219e14255551b3552e0b83b310ae59877f2a8fc1348a9ddf165ddf2063b726a8b9a73299e400db166cf32e2d2e12a7a57973f2addef80b5d3225ea0a75b26e5b103353d54505a881a312a264d0cc36f4d8eb6d0c90ba8a91cc7e335748db4450d350a46f2544cad33078c45c73934b5a6973d6307b239e6fd36cb7d9c55846a5ff2051d79bf3922dfb26c44a43a4105b3aed619a6fcbad0c84d65c806d0be5ed7bb90d29d0458e026cf25d0e08b6205584fca0db106b50ea4015fa811240ef7d3778c7c5f3b3f7074405e258827554ae388558060b0f92590bcf6cacd693fab6a382dcf085f7b19d52694af26506a64d63e9bf815bb4a07198a9b8a3944d2a2307d4dc8e425699216d99355376982fdc1e316ebbaf422d8baa359d0bb775d56d5f520e8241dae140176972b28486e041f708ca0cdb0aaa3405125b8374b62e5d073152d63e73e82f014de5825cb2e618ac4051e662a0dbd5cc5bce702c3105593337bd4d13671a54f194349560552c85655adc34b9261b7ea3052b63100c0773b738fa42e8a9a61e9ac4db9b4e187c081fdd949ae1834f62d54cfadceac2df2aa70476bf095c907c4e7175f8d65d1513a96a428db70c4097dfa950941b9d3a338d423ee07fba031842b9cae426926b5562d72221ea0ec30da23f08ee83fe45a686b8ada3e0ced43dfe0901d2d16be082d0327226c8bc09febe9551f98ec7d091a06584484dcca2b8c44ad78b53d25d9c11d8f8e2e6c802942aee57c16de3e42c47ab24f4883a26ec214bc35f44fb32e570f4f706151303c3dc51b45433854d82db5a382c2dde05afab9db33182b0d84c342dc2ea9798427d34ef24f4228d79b2e1f3a2abbb81efe530bd934f562f118c842166d9bc9bb5acfa2998c30369f3a0ab07a29d40f3d059b6b863ab70b61cef7cd98dca1523d79ddc4ee6aaab302de008908ab0f42aad165ea81cfb43f7aba00850dbd8b2d1899ac310dbcd070d2181996f64a3b321183f95b2c862befe5e6aaf96f401f5b4835e30196a93522bc9dbb9aa084d4bf782d5f26648d6ffce53983599fa61394b24f06869a92777b08200a966f249243c23adc70104f26f6d54b3d364871185ab181a12e6fd43082ad0d89e24d790f7b417199d71695b310aaa7a13e67c7382415f93c17f5bc4c081f49dfb3a022fa52f233470105ef91aaae4007d2d65c42c79ccc79279c51411a8cc24934e9563bb6032a68401791299c9d8334581e5a1d1df0520e0301dd7bce819379249ee60b05604b9b20552af0271d4461c03ef46c6216b69ae9ef8aacaa009d2065ebc96397527947c1b9d8293ad4c35b22a78d552f8f7be200c04069e81761609cdcec4b03179795ab372d6aec0189887cf0100b17159e689dda9416ecaeb2d145fbeb125068c2024ce15ec9917a7ed88e4eaf89f021b4e42287e92d45d00ab3b166249396f8220e75c75473f1884ef818e6e9dba02303212e6daeacd5e459392aada4939d51345761e6827868b8ba1453c4c2e0cd593417ade4113195bb575dcae6363f2f1633086f64cfa498a86c21ef571a440322d319ab4805189cdb532071c601d9fbfc8e9ba86f4a1c9efad2f81a411f09e4a3e48548bc43c789d9e009c6b28998a1027ec5c4717843066c9afe18bebbe20b232af6a655eca1d984ead217d94f9884f03563eaaac2367ef80cbfe033b3a72eeb2737b2ea84d9f7560899c49e0ef18ac9c210c7851e08ffa175ee49d111d0c41670a32e169352989ee7f74c995db970206a09baf746bc3e495d5579da31bff7bc4fe27b61bd7ad08966cee6827b717a0a35f63efa3a7ea0e99053cc253d50d359160973ef869569eb8d68259539981242f401386c08908f997fe78e63ef0c69ef69d3c9a923a4886680d8d7947a7432d88ce4c6a6dc06012e9604d02853ee8670ccb4a84c901521693f5ac5db38c1b2843270862499e632b6f6b1cecdd5860a27d6cdb882522dcbc227d9510b27206b26fe12755077e58a102ef47a1e1d0aece2a0f5e6cb8c8042225aaa2435c84fe9800bbfb73bbcf15a412f5c36ad1d6d17a0206c7fdef589a4a0acb953f4b2d5eaf3a14fe03713ca4903f544cf0864a4dfb938b855511199769795a28a91038374938589bc13f575b4e41184dafc6ea97a817641a778017039485e8194410abb1d07bc4e61175d0628234520abcbd4389139653ce8bf12a91b674261915c62d504d1de5a2511d42cf7c17271ed6cc5e50854fbabdfba8739f3632eb6c0502081eb4634bc0930967c682472162139bdb324cda906ee086ecce1bd2cbfc6be0b424657b73df3c9c301c83ba747be15c46b2154faf06ec3c90f2dea7959266dc5adbb17942ea1131010178318d6dd53a6cacf531eb9139c5042178b289a3100c0e6f89a80e456ea0975fa4caa36d4ecfb353f9c7fe355a844001a253325f40f2dac2fd5715951a235d482f32673e913e4ac3dd4d34885bc8d87f1ca74d9f605aafbddfe1063826f04c72b6a36677844d6f3c9a8474e178bf5f79259d412690c0a602343ecff9408a1a6c9274042418d17960a81639db93bd6ecd7843d69029ac2091ce5510759e2ce83c7b1a665295979d77b1b4732eaef89c4a4d233cf61c7c554c533dca53acb0f8b7b478c95beffc8c93ebeadc708cb686fd807cfe1cc1b1ff0260bc38c0ac00b8fa80ef391881a6d50bf5e3c684bc754282a4bac192043930fc782461a6731e1baace6564b4bf0909b577bed704ae8453c1c64ac63d3e0f0de8a901b14743ec432961340c9d35139d070c422822ed48e776e4bf418a191fcb9cbc7043b6e5407cb9623ec3cd4a02e4562025917aca22f219662c7d78a40290495149e2435f02133a4f211117fd3ff554e60875f4866bca6471e38479d4117fec0eaca34c40b587d8b6c4937c468d2a10003aaa2db1bc86d87652725bf98260f6c2ab31c3ff6d7cf3ce6e60170ef6c2f30c313ca5155c3cdfaceb62833f65bf3a62fc251b0e69bb2d0e92ce127f57afe8195eaf185a46272667d37a78ca73bfa69992c2e06e83b15e5a528866fc35882f3f4e1a8f0e3b2e7e54c51e6f5a9d6b882965efb648c1ee9967f1c66067ab5348c6f0ec98d4a147b29df04583710ae1848b58bd56c260cc1303b5da88373c3835ad7182babf503b3be2c8a86e97d385d667e6d3e154eebb740e2d27c37d338f36bf09f305bbfca1d0fa7f07458d1ce2d8ed5063402cbeb6197c55b694fc6b41133720cd7f978f71602a15b426cc091e2cd38a8e22c594c4390e0e5cd3bc16a6d55ed3f0741b5e0e21f09232b8a4615386df5f8eb0f6c7fc731f30d030c12c8fcac26cbf65b30a5762a5456549a98104be0a8dc511c6f98bbd93e9e3b2a40463256c842ed6e513d274d9616b1ec080a8a75881bce47376f4545bc796bc96e725e4556879756f63c31ce887b4e7c76b5ef692c01621e84fbfa8dd95e4f7c5938eef0097db918e030a533752e49383e10c1d2a3591a7dbcdbbce0afbbf26ffaa31ed0b326712f241297b45167792c8dc980965733c70e83fa02c744d03c0a9eacb955bbe6cf33e850e3c952db95c2ba44bb439570513f583f72139816f4ff8d07e3023b1fd229143424bae8073501e44ab983f5c916d1307a77b11fba9415a1a6bf15580cea121fb56cfdcb8acdb357dddf8631f8d7ebda3201a25ebfc1a56d8301482107412d970abb769d3e8f2eadbbd071f0eff081fa1127facb4a54dc68a28abcc2288560d009e386f8447b4ad1089955d44f3e0ea70358cf008b2d2a5b343e180dc960bbe7e497f889724c5b82e10013d5726b42abd8cc2557acad595c1bdf690b87717d0359dd85665f04217c5139b8f71dfd69bec10cb9ac86d8e297231103f4c4f4e31f312a1e0de815113fbae4746a5b25c6c305eac3b4870b91b07c15a86acb15a30d2cac563a2c1c1f54dca6e884de1eed927de68268a779b58bc3cf18b8a66dba5c69ec384f9ea7a566b5623c30e812e6cd75a2c8cb0c37def8590420d90179650c1448b4f4067c9fd7406906ac49277f59fc88e02e2f28f184bdc0cf83fca18b451153bb8d0b0697999f6b7e180b301234dc16d9c2041310974bf44c0b81ff0881ce8d2aecf5ce0134230557ce5e69833e6ae261bdbeab32aa21fe7f61be62c19fd5f9df7ef41a2eb5f3c28201f8ed72ed22f1122565139d85d187993b2f478ce7b37cedf046aaa2fe84c9167801036ee541527663b3f1a44d711b62c99e9922e339798914f68e5e9266e78d5c6bf1dc0c73309e771fcc8b4ab0097d772ad99a2ac6d6fcd7903bee0c3a3ffaf95bf95bbb29bd49afcc95e2a4477e67835e25ee1866b4f012e93e7db4637c409b7399cc60a70a9be1408de5c79ef17c45d4d587fc3b8c397b1bf03d717e0e0dbc72bccd98f3201b891079abceb4cd0d37d27255b96eb4360b277213ae554cddd84ed855e826d5fa473d467ddfaf5921a951fb18f9b300aff7ee24f9c8bf561269b9c119406db8ee7a6e3d874aed39480e156d150c50651599b27af1a89807708df0cc55393606b5884936899c2f7354112c721bf5e664d6ed43c8568cc6baa0dacb9bbb1e8349e199c56f5b805fd363ca17c1f90b8f23dc4c11973f419f02569769cd73c36bbd8a1c709e6c92a36328cd869ec8aa3d7d17f4e0a5d75bf44fc55d72e85559a67af37734a7a03178aa662c61d19e061646d4f4af7a9537db0f45af900618e7511bafb8805d5eb6095757f27e0bc3d9b3889bb2f73ef3116eba3e6408a9866dcabcfd783147a4b64909ef7f4c9998c5eb7e127fadb0fa331cee479351addd90e74039aa0c2c7aa3f1380a0e0dbdb73f4044af13673602a50b8e76b6792f559178499d6062aa63f40cb053a5b0c377bb9752c8a6db3725086dc8492a316d70279a5ace0ca9ab8c972a252a2e3fabb4110a797a823107bd7ca284ac354379da4ebaf5fa85f74712f1a4aa1a4271deb4bd3ead701e3e5bb3fdabc469d25fcc6cfcacb85023979f490713d3e4d9b34e1bcf0e18fb493ac33dae0f626873e844387f39697d8274a06d51374a39cf9c7967fce7cbcee76f7b272cdfe6c53c976a2aa7568c5d9dfe726ede8a8ee0cd2254e89da1b45f00136a79ce7bcb5ef0c023688bfb442e37c52cbd9177630cee6dcd12a596209a2d3b38c11861fc18d142c57fe191fffc2b8b8f5def62951d4192ab1be53545389f98accf8c7732cdfe6e55b11326964b403932e8b90732afad8217650bcc352cfb1110db635a1f4d1c6adeabef6df71cdfe18b60dd4cbf8c0784fe58d9ba7e78344fef5eaad4a3f4f935018db5e047ee1feacc56ff9d50fcf041a847735c321986ef2113bfa4596ba5e566bdf283ee55e2da2a013818b39f43cd13cae068de677c559502f96274bcd889c0692624d027500b3e8633a978263d78d5fecff8119354ec82c2d84369c3ae8351764b90edcea004e7bd7867ca1fded9a6a9377b50e6fad89726982fc75e5834caf8818cd6e6c6493b3fd61b27e142b326e0ac54cbf0a472f20147da4e24f1485390db21a622b871f62f1903f030aaccc320543a7c0d0a5542376dcd771ede090c433de52a0856871c0de7d438d6b636dc83bc4ca7d100709bf7655b7d48b47594912ada7bf1f755940f22f1ef6f362a7eab8576493b66afd276ad53d84c09b0cde0826caaf1663fea860ef437dec0b996f35c138799d3ce8f63851b1a97c5f4f978036ec0ac3fd15d15beebab8d7b5dcb975071650dda8d4db24756730dc4f1f52a13f5ce343e679dd835669153a28f60ad6eac20acab01062f4d210b12ba2cfe668822410d2c7fda5db8bfd707861bd9c389cd0df047cece78f6564bf671d59e978469465dab77a54a7c76e2d3509cb012571992192c0dfcb8b001906e14f21bcc60c9be6356801e6a7933afea78e24b0efed309a14047a5c80d656359f5a00d6faa0f33b29244ead69da8f09f0b4f323b697262d545dad86d6029965e8a47a8b617e7341d253566f12d6c304edc28a06310dcb6a4e75c37f3f05d94937bf318c46bfd833d0029408d323f431c73535f6f54e1ec50d0d88c1e61ddd5ca4f4c751faf21122ab0d9c81fc0e4c095fa8763371ce40fef52bcda44a77cfde35c0fdc8a42dc600e918b425385872e76b5e28bb7ff09c41a50abbe3fd4ef858c908779998f144e0ccaebbcc6644df1dae4803ae099a072d1928a481713a7a21c639924311e4a551376c11fc956152f0645df25d0530507959c6702a6cdf912c2f75f22ff80954f22cba126169eb5e216bfcca861008bb6673af8e7288c7605602799e8d7b08f8aa6a0f9bebf9ed8243ae268b0a599500116b98855f52b197f6abf6b097c59deffca4b5c64f0b7b99f77da971977dd54b8e8c348c658300e448af9799107a59a85a62d4df27ee98cd9b3da879d9e9d52d9ac878d2c3f2d2029226be0e03668f73a64f970e73fbc4f82e8b4474084d53abc53896e4bae2c0d68a8301f2b2b708bfdbc2a2eb9b2505e94c32c7e156cf0beb36a3089d7a840914411defeeda756d992980e9ae22f4f78d1e293d55f693acbcc04401775bb18293c36bbdcfac32582c5f8f814c349dc9d0bfcc9d4b88778179ffabba0cbb78429da2622d5683c75789d283692d27e9665bf103d12013545534bf891802290d74830aa2c95bf01651ad0f38e9be58a8e242d33049c1349e8fbf182a4b9e421581f841b3186ee876519f793c54155bb76c953418f3232e5af5cd7494d1ad3f4cb15808962040d0c5abae4c1009771319cb9b4cb406f62ac5024d21d83d5892d3abc59f723597825e258786d780c68a17eb66a3321fa78c0a36cde90597ae16281bc9b48568b6b5bb80c050ef91a7037c473a10b80a4249571734e2bc001650531baed096ea2d77bf865da08966886e8274a8d80b0cc9f5053a96e5490d6801d5178c9ce6c78b325d78caefd81b4974c1b056425eaa9ef3f01101a4897c361273a6eb11510a705eb7022e8f802f252fb0592d61e160b524782f5889d06753a582514b5eb38235615d508fc3ad09f4cd00e0bc3524736c37de3e8da10c8433452ad5f075e798893a772910f16f09968f64a093f66af05bac0d48bb908103937e77285214ecb054e9a26346f508bb155a154179c7a9435b998b48fb5803862ea2408f9ee3a333ea480c2e048c81eca8fecc9a3fc5e4334b202aa47b92962fdb5d3dc6ca24e9762328625e03298c296094a5bf37532480899f4ef3f6a719a52b23fff63896e991bf9989a82d757e51d03f0f77e1703076266c20d23307a71e33a8b0c32ae9c92316696f23f1c4b645281c6292c3600f917090297200053b953d3eec61d3bfeb30be9839004bd2ae6aadaea1d0cd18496722a027569fbfd522f2907c67bf8f4178b8326786aa10b40b3c2bf5268eec1be1688866a5395b894c9c835dae04659df393d4325c1653a7a578da667a3215654e894b7f9759b887a9c2e6f3d76531a5864672a52db0db479420bb298f09253ef8306d3ecd30319a4d3c5d24d55de273f03db52989332b09e619725c24288f483b4777a1977cf0724142a30a13b75615459287ecf730a4f6c2623be90b8bb5e9f7581d036adcea44213a2280bc7f34e813cc7e72770330169a34d62fb8ea1c8801dab4becd4ceb9344ebbe58d251189978fba25f291d802721c36011daa43786fde60bec710858b648f581e8d7c198a74e8d5b500112fce097baebc3d47da770f2a9525654732648132989ffae00bba14ca680d80462322527582e8370d8102e3b935f2806f20d5bee2b01527f00a92715bd2e838107e82c39d0b1379f270bf2a8446e105568c057bb940ac40d3c51b8249b58dfc181af2904e1f86c9d6e40b56f09f9dd22398853b13c52924aeb90f4919c723b7f0afb603de6ae98114ce1bda8b7e77954c9e690b02245617040ccb6821bf7dd55d2039d417f20b4eedb62d738ecafb4db07e264b73760066aa4c3adc342db3dcaf669d5a8eabb58ef3c83cd7894124cd16d64a63971f8a405f4db88bc089eb747611d2d1ca03f5170a8b617b81f81050ea6a45a20c79f331c0f9006b240ff055ec03d748fe44ee3a81603e38defb130a9ab0d60306b7f5a8782b36d74356c4f1e42fdbcfb367664d42481602b1fd42eb5582e213495080306c1910c1df6d32168a6fe694ead6602c33121ed34defcf4cd94347dae995a52d5fbd031b02c7aec9245b09e458250558b1e240b060a3e4731f3f0850183557d96f5ced26ceb3797d82f836e40893d86dd55e90e375fe39e0943e6991b92377c276e83d36cb7eb9042992536f43302516e74602901bf0c16a5a501e5d90ea53eb3b21ee9f9e93c5d6ec6383ec174eaca569fe2b1d216d169035ed3ea0dc4bb59ef967fb1cb200fd09fff5cac9f45bcdd6c8d39099fb7b8111d4a4007854d1b45e3c41b3b4eca661c49e517dd13f392feb4512b29bc1df0dc250f07feef190ab8c0c3824a67ebb5a78c562962493e3c48a40be441a2a3ec17b7d4d8f08fea55627773be1da0b9e34c8712d5a35d1765a58cd259656c05befbf992346f623b66bb326f00be1b466fe148cf8f3a6978dea56ff6d6d7ff5ddd7c57be8389b3132ccc6cfe3f109b590316fda33b43fcba26c0658a892b5a92a813949d25e03b260527712e822912fac50aa42508edfd83a707724012db8f7013608c6030a10082aa61354a4134316c28aa36c2ab361ef33932a2d0e9540e5c1b600c3a1cd07acb9205dcd234a59fbeb95244ac2a9e1484b29f6ec6a73d0f633ad79c718fe19ef6f7a560c974077374dc7390a969eee2facb4b70570007d6a9d351adb56dc775a40d88713fe72590d596c9f4c9b72d7754d5151b872bde656c6a24142e75344fd7c7f1aea87284fa562a717f64ba04cd3fa5ea25388bac8f2df2bdd5cbd7506214ac3b2a56804f53e7d4103e0b3382cdf06cb2276f409b0533626d3488f09ab09029029b53a27c5e42a4de7dd1ff5e299fa1878fed0914458707b28cf264a05f0948a9d420f47f5b85a40dc0ff220e10681129362006483d06ff8b01477102e71458e90f59de2f05e284afd530f8b6a0f1af60e40b376d0b86267c100e3603acd14753fae27e23a6f49013aa891b46a3234598622a7b139d5f8b3f29c9c37412e60a5039b102c0e0c157e15b35c15a08dbfbd82b9a9fa60ed06b7307cc969e1acdb921cddbc3db63a222393f045cca2a84a8e36febf9b35a398b51d340f31611c888e9e765ecee18a1e853434b07653aeaed55e40fa456068ea8b49060c58e54e5543ecac54e8af74dcb33d3976215765d0df36839c31d8ad4d377d3c13e2ffad2329f4ef56103b0570004ad665a5f05063b6f321997d8366ee7a15357785a2c83d832cad27b02ef0554967144e87a21e7de1dad60015b2634335fdf05eaade6d54e4c1abcc61f822a200d912192bf313c08b0ade96881aa1ede7a0d9d8a2c211fada0f5047cd44a3a4f6299f38ff4a67065e814d0cd64ecdbf359811f313656fac8d099852da807912ecd0838fd880d1560e7472b08b1ac00e73f6b7bab80f3b77b03a39b0b1a23c25e1023241f74e8893341940c4d2eff73eab6e9c90b708ea9f5af34c3b21f7a01a7230c2e4bb7d52aed1bfae71b2a483a79c15927293e4749cf2df92fc093176016c34d195ea8c9cda6b48d5896dd0b1415b80be3ee74bf1760fe9da5dc7b5f3df394b2d46a2396660e64ca1564706fdf352212090c6440d6884c5bf82a0184378e564d997dd46ce6beca59e2ab58c7ff3536ed0d7ec980ecd034d5d608da6470ceb1f3210381214b853bd5ae82d6eec4693faf7f6ba543d5347f8d253fc05ed01c8a36347c8a4a429e5227af9ac29b5d174138aa3f549df6d481199cbe9d6d44271fddec54f0ba9212aaec097069b6c3a6e4242319dc2c0e68af46415ce59dd77820dda8d309d25229fdf8d621fc5c9a8855bf77dd9279e1393c9d0e91a0180ce14cb2ac55263d7be4a7ce25673a614d2af4216e820ad64245f18324296ce249b624f7a7b0135559044fcf11acaca15e8d22c216b76f1c25a57dc5e2155b8e3daf54bc9500a796e1b1d7874501526e18ac3379cefd73cb0042489b930e4abfad460006feb435e1c7a60e0ab2e31746d82a8625a4fbf3509370657c243757f0ccd75dea4ad70f0bedd9dc29a103fa04bdf98d8bde41734db5233f4158c0b13d19a6ee733430c1794f84d3ca494b3334350af3d0520d695a5b6b480fc91ebbcb772e3d8accbf95caee91fe43d9ce0ca5f2c6ad101bfa6b77e74e88442ba533f4091ac1ad3d90ade0f34f7f5f16a3d6a4f850a21f285d93e2a8a6291fb5be4fb3f5c81c5718e5ba1f0dd3410fcd5099658f972fd0507b0545a15b32fb424c8d80264971775916d0aa5bda2d1a5280fad9b136307ffe31a154f99fad9bbbb54826c18387f863fcc2fb0e750efa63b692d4c5b5d52d8042249cd1b58fdaecfa80bf9aca9ce369f0c3053a0c7e6421967cbaa9319260f5196d0e7de4a95773474dab2e34606ea33343732c4a2e1680a5092ed2d058b5a84049f4183c712f23b9c1314419496d5fcecf33acc208efae126661d1a6f9841543705d6c529cee12f241cb629edb46512cd1da837000b9a3eb2b85dd9a1210da140db2181331b46edb48ccdcebf2abafcf416a6ad367ad59c1dd77cfd79717d3bbcc76890b1d2c6c6dbc746a15a1b07cefd007df485b78a8379158153329824d2f46fb44e9f181e4f7eea45551f84d3f5646c368ab781441a760436c7b289cd66c9ad0c70514f1a83710ee6313abb3cea2243741b2dbb29e58ee2e76d97fa5a5d7569eff6566dc240e675a1aea46811ff8f6a8bfb1eff7a8d7db994f0007e3daeb97434dfabf2620453936e7724178d05f2cad4ef92207b31dbab866ddcd3698c067d19f1e19ae8cd62b8bf0583cea436b2f01b4889594ab56b8deb9e4941de64ed534c03621e9a5986a10c6cbb5b3ed2ee6958b50ca21d2a9d8ff34cb439cb8f044805f9af98bf9e3a585e970f3f63e05c45a81025ecb06173932ab8283ecf090ec0a7da51eab7fa34dc856dd4851c2b963ab94387da396fa569964448c7229132c403a5d1a472e6ea64b479bed90aee8a1f01ccd4e84444508133406c3c8133e59d80b2ed17872528a915b0f2f5d1d474504db6c3fada3c2dff9b4888e861e05433be1a596cff975bdda27e35feeb05c0d126ea17a2a817a89c7a6141f25cd6bc3e1a4d69d6e85d19fc680c81a38569f9b3b73bddd200796de29163b1410767030185d7220a9d983e600b6f9743902a0e82a12f3f3cd202639e6f4e1ca2ed1c6c911fa6375997ea1b7c3095c0344554ad1c1856ab05719ab31149ccf02903a4f307236533500fe4bcc6ed1f7d214e7dc55f5203cd797be0086ac0ec52897e130374e3e95f150932821397d9b33ffa88c14cba14a05f858bc720421ded0b9a9e2ea2b9a1651accd584927b3446d2597d9d8a15ff89f0aaf8d92940298510f67e23a554a90b4fc2557e54d40888a19191a5fc1842c83961519928ad0aa78544ce320be04c0d85381c4474079cb25ce8582db1e8641c5245468d8e9f5dc228c5505a733e62fe38c8d452a3c4a9170029ea1b330fe3a8a9f8ef1af63c337c824f350362a921b6506607acb2a1b5b26324ee902e64314caa45f3062895a0c6e20a3699da1f1a9445e9743bf4de04dbe060469ca413cfe6300c22117fc6308be7c142833a9fd0124585d6f694fbdaed1d6c7aef5219445e5d01c618a55d2b6286be5ff575faa1b3440fe895f08825d488d1e55722fff0324c95dced0742b9568a88d30abe72df6d88c90632f9019ee198355439eb924e3ccfce2b0097a763461d2940062259a21677ebe475778909d25b7b35617d574b3f56859bedc9d7f2a60e33db4830ef299f8e6344b45b1d9d5f1d6b01092b7c6ec0aa283e04a229f0c924c108f6af39cc06692b3d34998fd56084adc06c380a2f34e095c859380af8e3164a4c807d0706f6d9cb23b77ffc102aa0abbda9e444b65aae5f29fb376899039b26c33c9b1f2d0d1a633da13a84a880e90c89ecac063f74e51e613fac99059375a3b4eded1740b02b8bfa9ed6941be0cbadfca50c6e0d99ab1051dcf403a0598ccea88ae2794ef67b457fb48a6fa89660441e00aa1505c7efd2ff80df18aae74fdb2377b7cc87774549e5da6a0b7097fe9914a31786f7ecf649c859058b4f9a7863d7161afe3d3b6394ef26d4b0f6c42a9c77b7363ef5750f8ddb92e3f4fc41f05d0c7a81591a1761cb72ad728f23d474bf44bfe942c43c785860261dcd453c2f26048b13f42ef2e4dd6232336128b983e74f90227a85a8e652f8840bdfd4e792e477e78ea0aac726cb7a91bad58c8f9477884c4c7dec9396f1982930db3dc1e8f2d50338edb077eab93ca31616957af57a547e6a56c23f4ed4449fdfc6e901873bf33ed17f3136fd51a13ff953749d1884545afc8b7cb142565b9506e245184bae1ef0a07b147348baa8b02f54a04cc137475d19ac00049ce257aa973e4f4e32171641218f152e765890695a3e9f1f4a05edc8f7e44f3941182bef20259e963153852975c131a751d3af1cc0cadb8716521a1871d82509978b3a19dba4906af799b219fd08ad474fea0406e2b9a70eb99f5eb10eb9255d264c86a3a8406c9aa044c409bf3ba4f334094571b9328f28bdcfcb147982b99499a7acbbd687e63c2d5f98bc61bc948375bcc00761c775728266360533aa15a7e9cfb68638b7c67cc312546db731441f4143bb0acb57db8d84aa113813c0f9061e8d78b852658ecee864d8c84ed06604cdc54d322afb80c308982837edaa98e6af499702842d55b30d815c0e5a03b0d77574189e444a3b7795167062c9c507d586868b41daaf41ece6a5bb2fc1bf9a66d3a21911c2d3764413ef12b75fb1e16a5f057eaa097fa1f6dfeb87f9e1431ab1b011f1da09bd85d0dc544efa354172e42ddd193ef53e8581ccd59607808414a74993406f65619e73ad17e643ad89779cedb54ccea1e9be596b20ee73c1728bc86bb3610b60868ee311177d5237559544455cec268011de4729d331ca02d39c949ebbf98a916ee931dd50d855f56874592736d862be473ee746bd15073a205591a9a684fc3a6ee823d856905800f485690fa3db7268e3480afde1802203523b6f98e5ae3d701a8ef1d3695c357ba7e4486cb6ceafbd6f30090e19697a3a45faa5f4f6a749c8650d53a17aec20e0aeb99279a195de641db3add20fc269bc6704866d11e2c390f006267da30c68e92c3ad1167c8174a47e20b97dca223058a434068059486054f2d598e9b784a9199d749a007f0542377966e3a0aa2f15fd1aa55274bb557ffd5465d27353ab084c587bbf8380eb4305b464269c09729e3f183a55fb945ac2fc752ad3d3272e787871803f51077545c335eea836113c04237a8823dae686ad5dc00b25a893184ea182b4a34b25d1248ff39af3a7dc05f625d0f6f13c5ffd1b50d3ece36bb849abfe4b02660c24c113c85746c21fb572b8f35f5ec28400ab7263ad36946abd93463e63986c23b8e66954a32aabba2a9dc7cd64a66b6da270e2a6a4bbdf9c05fee0a5222e0460f3af0c2279d2d8e7ccca5592a2de4cee24e9f829e5663e7c8071c2b81ebe43119e3cc183e7706e4a34a89da94d188dab4f343c16966a136e135193561bd92cebc24cf842a341c26ed3d3f1c9096240013baf2eba5293703c30d8b8ae8a48fd5cd1a2e38bc079317590d9e7f91408ba4e4bc7fa246a4509746ceb92c17cd46a334878200ef59d002b060325980d72f50e4d8c5dd0cd7d20665b6a6621de1f860ad1da66945058fd4af003fef6460c3ad853592c9b4718aae85e1e37a6da1270be8422f4806cda9fd6cf952a6e3e0ed7729293a43ee2a5c0fae6979024a30aaf25264d63addf98a946acea00437148a3cdca987ea174e991e399bc44dccc42db96ca1250194409feb36d1e62ab073f90770fd12a4b404529b9c9c62e3ae93166efb58ef996a394e513b000c0b1e9e84573b7c6493127bba0901b7a0a4a1a0f22d69e5db30f649b5e71ed9a28a5d2bdf085ef966dd588d1f7448d5d3c14d0cf9514a4cd0abcc348f5e68391be931fde05ed2dd905d0c3b66ad8dc6449a8621bb8c30fc86abc242807036f92ad3f151a2041654650dbfed54d3f885ffc4457c670de4f601995d6d6c8294285199a9705474af554529a7cfa9880695f9e7b7ed28ee32999be085443f1f22a757e2228f1ebea1d889ad2f4c3f619d362ae34c7e138d73654ebbb0244b606ee0f033b29892e887486c5acf0b354fafd6d91942a58ab93a421ac2dd6a12af4786fe9551a34b86da1f22ac942e7ae6dcedcc109388d03b4b6f8124686c04a1b4ff8775ae87988e5ce156bedf88c8414648be2a29123a612eb0367ad4df40455110eac384d04057b0a27cfdf30e9bc6328e5624d185c2abbc018efd732b069f00779291a4f68963a8d0c048b697352cc01049d1b16be2e242987199ae9039de480a2f12939ea69afd8cc247b42ec63e42bc5c0d4acad54d34c842a11c972072f1317594e72d3434d0efe2357b0242d6d92e0e5abe5ff29a337e4d8141ce8c992424392f909fca97fae8d5fc92644ef2eb6d0f048502faceed615e300e429b0e29475929dc458bac5a66863f1f0237dfa460ed2c2b5d8f30e220b8b174993652acb67253b20623b8d1ac1d4b173a6680542008c2c050081cb4a186f24725968eaa82f1f61a9ab06948d07d97c14842dadeddf6de5bca94a40cc905db052306f9737e66055cf3015c2479103332c0cc5001469b007e6e4072b432fb1263f560c49e5e44b1a22442458b2916fdd752c5e40a2670cd4f29a5af718791d268330fec5bfb52704bd35d4f9317375c1ec949d816c9486020825f0c812f9017b6a10c5ce8e9f104c3e8018f543074588caa5902835778e097a6bb1d5090c02f78b0a8e98e474c0648b04bd31d4f12f907eb6c8335dde5b8418f9aee7ae8d03be3d82fffff5ff37ff59ecdfed5caffffff57ef7df5deffffffd7284b9cd08eebe1a8c0f112be97f0bdfcffff57effdffff57183be7131b312effffd5b443fcffff7f752fffff5ff37fb5f1f2e18fcbffffd7fc5f675efeffffab0ff1ffffffd5b9fcffff7ff5ef7d7dafffffff5f3d02f5bdbc2e7c9d0960e811e0a855985c3c7a2882c90b8f3cc20617785e3efc6d396d0c433ea717b0a7c81790e7c5c6da40e9745e6e52c0f0dcbcb0cceb6483cc4276a13100901d181aa8ff2e1fcf85261c3e5ee5017b57b7ca6303766dc0a03eddad5e6633eb33c287c25181244b121c287b4960bb7c603f80a24fa3151c70f4c9cb0c19a3292ccc80ba1502f96eb1716d61fd218622f4b579a1f9abc5791e61392470fdd9b5df5a5f56bb8c9fc72773ad1a18f30e69f80f929e6b59b39756665f3b14237d1d0eb4f050b35f9e31420aedcb3754703d918176b30a2d28b248d16cfe644afeca94b89529a9996615beae606c31b9a038169ad8670c7065cac1d39745405680d17578947c9869ae525427ac676d538a73a51462fcb39cc193d2a82ea03a945aa2efb33eb6487f198ce5ecb31a9f55f049f7584d7a5b9eef855f2d6dedddf6321760b679567545ca1e85aac91d917e39c7b1649fa6797c2bb1f6e783a4257df6fde9369bedc311b4e158cb1a2195616b29cac4bcffc4f92ffcce9a00317ff99ba0343e7ff99da01fc6c502860ef386a1f79ffacbae18486fc2cc8a3d9d85598c51ca41985b2c74f8b1308f2c328c3d065914adf85e7e0df37f08598b65085e5e097475047392d297984568f1810d63dade742086355717610d897de65e33ecb3b154ae6c57381d5a3d18f7297c2cfc1b9e53fbdff56631a5517309312fa12af11030a5030ae546caeaf3ee803ecdf80f438cad50406f3efb7c8e21fdb9ca52a0d1813d6f1ba47e1ad556b6615f915414569a0231e64a634d81188344cb0906c10122780efd6aca988d85317a4551146bb2de168706c18f92637d28630af86f5e27fc9bfa65fa378f06e860e3a0f400347f372280b5cb7608581914431f16b41d2ae41d37c0cc927db21d21d01b022fed57466907557ba6b45a46a06acf31179f6d4c05fa7424c50ea7a82c2f5042bb61f3cddff77ddff7e59e19b44fa6020e235a748c491204498e2bb47befbdf7e69e19b42bdb61654a5ce6e265119869cf2a8038dc4b216c2e7dfcb2baf74049bff9af18c48007bc61b3cf6f8365deec0bb8f9332c8394460d7a53f3786f094c961903cbb4e457adf518cdd6a97bbe4d29b8334bfcff0dfaffff9452ea84d4f6ffff711892345aa39021e6424c69a4b9d1eefe2fa33816c7d7fa63eab38cd2980dafe9771efdb54a6aaaaa07c573826a62525a62d2af43f2d9d3ec79cc6910dc3f8795c8679452d9b53424255dd81cca50b01ac401c62a91cffe5ee94d7343a208e8e0a6056bbc314d37ed853200e8728ecba88c4ff6590c41a032be56d3635fafdb25211439fd34273de327348de5a04abb975529a34b53285060d1b09ab18e80372ccf96b4e71a28434eda758551d1387587d6f8cd670a0b71fdecf7dd3a53019b9af7e0abafe3c4f1e003fb27d89861c5136c50de0c07880002ed39ed9d6063c607335c42d8dddddd4f16171790f1d6e8ffffff7fb5f9f0c256716b75556675d53ac9a8f37abcf8be9a9d81a7699e59e2ea6e7ccd0b5fff93583f406e409ee7ff9ff5a3e8ffdfa7e869acd5c6092570823cc50982c5466980fadaa88c19262a193e0092f03baf06065cf3b5bdd76f6d1c5757a118f262851febe40b0c24d83505420c11a39820ec6b0a84182c9ad414083152fcc7071d6f8a37d2a7b4529a6bf8fe9f6485ea89177c16a789d0dbe674dab75592d1db2eadc0ee90c2f2782bffa95ad53fe9ea5250f4fe23edfb75da061c4c9e78b0c1c013e5c911b4fd660ebd9fa77dbfd307fe052a21cff3404ea4dd6a7fcde4f5a4b657d57026348aae4efb22edd988561994032a322aef46d1bb3e5131e2ae49daeb927626ed3abf60b4e70dd004dc5d71da6b7ad72d4b3ebd2157454b9668f749df252778be80eb509abb9b25b8677c732544e5606514054918249a4fa13de378a0020bde5769c6865dcc80b75582b7b5d235c77235c784e06d79bae6991e785b9cae79c6076f7ba46b1e75e0fda7ae79cc81b7ade99a5f3cf0feded75c5ef91e787042ff517833aea2efcd36b37b73cde5414a11fd5990e82f6bdc1922302b4bcd312c78fb895bb89ab1185c76c1bb5ae97a73a10b08bcc4c30f4ffa66eb392df4bdb97b54b342cd3543b8c511b8851157697fa0efcdd8733ae8016889f1752ced5eaaf755d251daf41ca5d70dbf84f04b09af00835744f98ca9af4e6ea4369b0510f37caa2cb0c08d4dee03d7a4d3fd504d87c3d99148e6f699e165492949f1be2225291e0f642901c3610b302cb29ee0b058e48284e3e802aee975c823a526728512d764d50a5fc052f82459e4849aac7e5f9cc624b9058f2cc07bdced8034cdaf9ae76c162c903c67b3c8b130c2d57769cb42f71a33529b3df2f457b7d6d2d21d0cfe78aaecde7c4665f8bdb21b2ad0e8dff8c8f32123cd12bc432870cd940c857718e6077e5d2171385850f5029248612ec0e80b8ff63112888b0a7cb6d9fe48ddbdd6fa4febc7286d7451ea02236c371a9d42082872342a1a8d1928b903dea1866db939b1b14042f31755008f094a398569c19f8e926adc7103eb0ab803caeb4829143669f5f4007fc8ece80202b951224fd4c723468401dcb1c50ee9087b8a589e245c475a345b3af3ffffefee4ee9ffd731310024e1eb82eaeefe5f799274cdb3b0ea9d7610bcc9e81d8cae80d0ada5ff45fa2da551b30dffc7f0f08cae7b1bff1fc794bf7fad4e8c95f736366c8ab07d4100dd5d0feb43cf87393886d1d7fd587badb5bb1d25dafe8e5118c4486df667ea7aa4a91975a5a9f905a9a7a9191544db1ba551a51cf73752e06d6f2ec5d2ad56abd572d070be3aa5d769f970e5879bf93a6ed69a2eef756abe4f6fc2df3cd9deebb0aaa408b3937a9d7ad66ab42a6696bd2d57f62a856b36e175ece992b5649b9eba5652b66b4dd7517673b826abd493babd8e0cef47548d56b3c5555acd9fedcb7d4a36774f6b525962d23ed9be365def6ddb9bae17f7b27d6b2edbd6cabc58468a3f7b6db536e32adada232cdad640b3d6caf6e74072aab184134470112440420e2557ec802bd5b1b21906ae03de333eb8418093a602014e9acdbecb51a4e9aee6a40ba0e9ae96d3b0cfef544b97c06c031b23a114c173603f5fc7e8b57c80385af57a682f4768f1416d8cfe72049a90765033cce7cc01e5682f74fa2b4720ff4b4d83e508b8ea51243d666bb5267a68b11c0186aeb290caa899bc695639c258a76a7aac1946184d9623b87495d5b104bf5b5adbb223ab258a9fab6cb5beb54fa05d9aca7606f0f56ae963b055eb3731529b2d56023c1979522104fddccccfe7dedeb0814ee74267af9001e070b118ee5769a046ead1cced03b1dcf078230f30d432e319e1f508c500605c3918ab885487ea8c998eb1d9dfc09071c4ef3b0108ca58238933bbc9310df2cbf657d1fe5234eb346726cbcb48a98ed8c24694c19ee8589659a631c24c754a593dcdd14527bff13f455efddce7744b560ed1728189f58cf45a24e1c2620c8798fc1c359f0895f17242697cee739f7b2354a79ee1d3a0ff06ed673d574ce1970c76b5ca1739b25ee22b045fdf7dd9d7bfbceab42e0ed0552821d3227b9209211342b645b645f644294d61a4fec28229e9334a1ff00f0800a5b556233228ec25fd7affff9621546dcb10be52ffd6be61cb105e56a98ccff7af577a652e42b8d19f6f62147cd1f8acca348eaf5e34d25ceb7fad31190f531a96caf02355535bf34854b33bae44489447c23f8de528f0ff94470295f19656ff166d829f661753f8b30c08fc9986b4b766161550b8062890e0ca3676c2dfb8e732569b647478d3261d6a3a7a89e5abffe3501949f8c90b6880d2b059bb0015376c420951388791da904f25a4406530c198a65aac33cd1236c81399f33256594b692c043eb3945a7abfeffba815c1534a22c1b059e821a635d9719ea84310e0e9c9d4629e54c64cd0db1b36d0e95c2cb157c800703122333e3e10cb0dcf09ded803865a66bc303c5b2806c06c8109339baa14aae35667ad52b59c7efecd4c1138b39b1cd3e085fcc879667231413f6d40f385cc20cc4a75aa496f3c83995533f6d482577ba03a5ef5ae261097e7a958fc0a882f9af93906972d99d9335d6062bd303d130913046966fcc4ee04367330c1d34f0f4375aab97d0aedcf43bbb984a98286a60b17211b8600edf009c39027b25842442126086dada2180e11865a44d648822139b2c450e405b1d6da0fb439a01dc398d39b9b4596be1f735bf51bf98de7cf8927a621837c5a89308c851487300ca956b8500300e3053e2f7052221bf9584de99d71a046773abbdfb555edf5575043ad89ba8ff9afa0e262375bedd66eedd630a03fd7cfca5835d7b52499f4372c2128a678634c6520094127466ab3c7ea63a533ab025df3df27595b70cd7e5f34406a6bfd16683f3bda777badadb1d6d532c698e09a3f3f0d0db85afbffaf2209f55cb51197dd50e16d2fa864d0f4420a0613d996c10a30a6c85430d82383293a32f081599aee7658f978d184c5a8ccc3e0957cc32f28353cdc3a0283062cf3a3c90ba79a23484d773ebd9f5145979c19a203b860e241ac88016658284038ee08e00574335e99d999a8586c4966110123b6e58557ea5e16a1627401594334b6a062bd74a82b92c035bbcbbc66c8284cff414d773e5b6a171bce982098690fdc34c52234254b6f879d245f68429a666c358e1ca0fca8cdc043844412b42fd797fd3565572909bcaf92670f72e86532a3c36532f364c6680669262947ef99a5ab3473c369cf3339ed59e6c933dd323ded5b466acb9c54cc0bb51c74508810c3046dcb58f9d07ba6a63def19db0cd00d92c9728364b4c834c9f0b4cb3869bf2ddd98b467ba658eb4cbe8b4631925edf9754ed1fb65a57dcbd4b68c8dc5142754744f48edae685be6268393c9c9181941650413af9e94077abfa6a45e4e522f28ab9769f56ab2b279a6fb75d3be5fb8fdca25b96281d2d58e1c9a3ca1edd75190de2f9df6fd527a2de9f0daa1c38b87d5ab6600bd5f3ce69459c5c34f3cdce3e59ad01b1f69df188cde58a92908264ca298212a88b63190deb849fbc63cec645bc02fd816b0107cc3b802e88d8b2c0ef3581c063a3a8fac3cbb943cd3ed32b56f57d376f15600c40d5047dc74539aa06dd7d3cf76e9ed9aa27231a172dde03a72e9b46f57d2e92afad348bbcb666baedb76f13cd3ddea69257ab7a4b48381cad1c4bcb2048eb65b670fbd5b56daf376e9ede2a1d2a242a54acb6c35dde8dddaf1925a4bad23eda59467bacb53fb2ead76abd6b473950416a6225298a0ed96cd47efd60d57a43def56ae65e4a434c2099872a7bde4954f654ffb2ea396caa5255df32e99b4e7b23683dea54dfb2e77f9822251a07c9e8c1802036d97399bdea54e07a4ed32e905295e98828c9aaad24e5e7d8ee43df5b4671257b5c9a34dea4c98e2e4850627b6082d41dba4120fbd49537bde6413b97b1fb285f7217f5c913c2490f64d123dd038f54063d5a8a47d8f2692de639305a05822e68724228240a1ed91d7a3f7d8d361d1f61845352aa11a691889c6a2d148fb88748eb573b43545f9663d69cf74b37a364ad49468ea892248146db3a476e8cd3aa95857595851b2b0a4b0905849ac25ed9bc5e44f2c1c2b57c59e67ba4529ed5b3cb1e82d5a35c1638715232493882ab4cdaaf1e8cdb269dfac1b8b4807b18b0ee2102293b8139db48b50a6a8b445b326deb67824221579a24898102a8c9ad21e561d854d4721efe80977b443537bc8e44161900785425557218ff610c8c19e83527e3b63d01b54daa039c587eec70f51568ac6d036d8b4e37983504f80459e008d8040201158a41d34b2faacacc09a950f7a7fbc29bdbfa72f082938d49084071145d0f6d7d3b1bff3a5f75765c4a78311df93cfe843fa92b47f4b55aa77cfafe6fbbb7d44566e966ae56ab94b97e9eeb45727bff15d9d92f6b5b56845a86a11266c4f4abb9d3ab24e47164ac99a4d4abe03a3a6bb1698d0309fe5b2ac41cb5204b62c6b88a04ea1cbf20195d6b0321a1807489a1618266aba6b610c4959ac12bc988c31aaaa0c92fa6272fa4f9f68cfbf146619ab08ac8cd25c6b942495e1f9631bc074b78295de567b94ae347b2e414d772c74d1b0abe98e052434cc67e010638c263059b1062dc420024c0c2248c0a004172a304c00710194133190c0aee9eef644c6b73096da889aee6e51ec8c9a7ebb1e56bb1e411a10d9eb14540b836c3e2fa3f4d220c4e8b0790abfddcdca8de9ad3e77f2284da7aa9c8783e4494b384c383b4db3e76a95eb9870d5731e07ea3991179d9e2332b94ebddd7637a71bd42daa4e699aab59cd5a548d90cc249a35e0555715c804cff9325476a24c1f68ba41b681d5786ef45cabf3fccd5ea524265329090443d16429259199927924499224cbd69faeffa8977dbddeebeb8e4446b96d714418bf888cb46799dc0cce125d511463db3631c5a0c2db9e33344c2a2c6f4593f6cc82096661b0f70abb35f4d05e7bef7bbda30a52c58a7d79b467163ef33284aa6fac85adc2c06280c17bc560455983454b3b0fea29aae7525e61b1163af2cb102a5963fb1ad19a5b53f35e6bc2960b08682a73e95a6badb5c6ae18829f8d41e522779e589fe7379e67d5799ee779bf3083300cc33034dd64999f189e67d5799ee7699a4ca6699a66288e250c18e4c764b4288aa228428537b024ffac3acf93759ea2693299a6695eb274bd6666f469b2582c168b6542886121f967d5499e7f9ea7c96492e69ba639f67a51bd5eafd78b412543b30286410c1c28ed34fa4a0ccff33c4f93c9344df3b3377bbbdd7abda85eafd7ebe974493a9d4ea76341c6a0c2300c6a5e640d70707070706ece841eed9db477bbdd6e37f3669aa6699eb556190a568721c6ee39d273cef359ce3018c6ce7be51de2bcf1e7fc56c77add1cbe39d7cdb56eae64f54ecf11abf226f3a6a9b1d940343d277459b365cdd29abfc106a19bf66689cabcc9cf9ba6c6668307ebd9f39c2faaec7d8fd5137be147ea6a4ee73963924ea7d3659ee7f0b8ce254951b6696864bba6c6c66603bdef06fada44c1febd7821b64acddb00500c3d3e3ce0809aa8ed74ece0e3cbb36c93f3d137c46b4872753b3e59ac98a29f21b65ac3ced10f86611a8cc33ccb3639db320eebcb832008eee459b6c95906a1223cc2d80e7011c24d5124498ec9a783508f0c35e9d6e30a21f9c83986611a8c876cf7b6c4866f621b02002f8a143d2ef86043936fa932016689861e8c4071ca4108870e2152404bbc00b1c0080e3c6c80f4805887fe743b4856b019e73ee13c9c5a49d28412ef05636ac0c189cc288b12ab188a107345a92706223622df0d2bd4effb6c391b4adc30c4d5144142a67c4d7ea4e0916342f41d01f1c88179966d729e659b9cf32cdbe41d84a48608d04304470b2f60f181942b8244e94bfa8182c7901a64a4bf9d247f9d0841013911fb030f2a27461ca181059f0d403cb61c4f0cbfc4866f326408c4f964618103a44748ccd010af21c9d5ed0cdd98080dd9a9281c710aa104084989f7e97c7c2192a3bd16dc6e48a85ce9314403e8a3c7100e1b8240e1e1618673c7cf0cb9a00f7bf420c6071262aa7cc8a2e42709fbb3842d140a73f5234a101c6c481ed5a30267e783a2f8b83c42be8850949f6f0886611a4cf1ce47901d74542d4a7638979092434fee4ba2848f2161c66171b14877c728e8c88e960fc98fc923a4ee2046688c113e74099a72042817548f8080785c2041446e9e659b9cc19f230ff265e7082d59b0984a9f91a01a88ee67cbcd18f2a18a101a78d082830c40490f644568849a15241e588e5c1d2932c48194f0b06211c1304c8371d014a9dbf9efe60314a3fc902dcfb24d7e7a299f2142766018a6c118480fce0fc5d22076d031e413a9288633f817246407b56221e833faf1b15373399cb73351b4e1503266a9881f9618d97c10d9050dd9328ee5428b807a3f310c712236518c91e4cb55940b3f00d1f40d19c2f9f8dd09f180867c1e186ac14ed0962748a0bedc0f53ce46cbd1ee268390cdd3ad9ef3c05614c04c024b699021996d6860184cc1af77a81dd378d680e6808002984910ba8d76cf7b56834536600274e6d9495256ab602173154ac3cd586d1a6392758530a49424b3286656256bad51c95dcf9118093e9b00e560042ac3f327012ac3630fa01acca0acc515d3da806993d2709c19396813095278af3c9406a5203e81de80322aa5537eaefadea84e2b5f5d988aa8842c992d6aa988488010001401d316000020100a89c401491044811a95f20314800b5d844c625632980683c16018864114454118c230100208008600828c6312b50a02c3349451836eed19893a2c0be67858668b6b0c5f1594df6f88ff8c45d493c890f8557907fb9705a4ded3a59ff0b6f9977825033c90d419d3720d800860b8cc7a31a1484e1d1847ea5fb7a74a686e280d1b7ffba561248a6f1eb11a6bfc5551808b72dce55e18b63f55c089f4baaa3c78fa8859b44ef946f9bd34223524c05d6c0b72d5c1a11e5a06f848b9523823663d346c848581e9b4ddfb517a07937f3701cccc666dd260a06a8bddec4e19984620a3ebdf612df6a828020b3adc2f55694dda412843316c413c2ced5adcb7b1e068610b159b03a4911b8daf1cf41fe3fdc1369e972471b849ac1754da72800939dfdd6efce3bc9f2fb25082bd3fe916a8a3759ba9b4e9c972df5c3e08a2632315704be1db82b1bb6da7323e9976a0e9c946503f88aa46c1adf709c0ef76994b010b8f2194b835c9e25662076990d21db1cdca5d707f1990236c9f41a7f9c1e6a58258733dfe9b8126e998a376802683fa206142187278dc7d725eca7745042e4f7d216814ff0754dab25853aabccbe8d335e54ffe8cb8f1bbfacfd1ae8cd0c530724b4c70b56c0d67d7a47425915f428b6e3de9381b6305e0bff575a3d8988bad53a0810b3b437e85d736ba11c9ef2ec2932dd0313fa51aabc7dfa2e66d88159a1f93befe55df7202d8acbb974847220a211d6605a65f2121d6e47c2c2a58ad09f630e66f349e2e3586deadf86c5fe51fc06128fd888ca856672e7a4f22cb4cbdabca13757b3317bd7f3540cc526fda71cbf726ada207538749bded6bfddcdb48afeff3d173698dac96b2606ab5b03e3310d7067d35d2973f63e7e3ca1d208e0456718e089a876f2e9bd93cc98a69f8c10e0a07455ddb1fb39573cf853a074f08989b1ba3a824c8a63fbf7dd2b3a393ddaad47b10d9ebbc7e8613e169d515ac5bc7102e1dd80d3c959e13e782962a43980d1a0c6db6f12fb4cccf11dabf9252096f00b0e8fdfaf9fbb26e36f3a5f8bbb416e01b6044d2fbeb7993de2d998275ac5634e30646480223d8d3d234d66be97e1a4fdc2647070367cd1b7af9b8d42e66db4ef227648bfecc33ed8e97bf84109f6d2d951bd1a862e3d77b77d8487280240557e417b3cf7de75ebc3285b73916a81c59a09c645017f91c974601cadd72d0fd33ca0adaf1d0404b660f1e2299f7f8d83fccfa5e7279284e39a77ea93ed65bbf453137284f5039d425949f55372a8853d2344e6ac2368191411237360cd06fb5f82c82ae7ecfd0f30135c5daa5b046f18ee9a4e35dc4844622d179769e043b9564cb8cc86107fedcc269d50ce377869cf8692236606481b3e81dd37fe5cbea577dcc04478a4120db2796e6bda1cd71b47a84bdb677b1985e6695950d4ae0d8551afe1a6c782f93637d8f48c6b5faabf9837561f25a6d6453ecdf9b522cef3fbc309334861a74bbcac24406322903840a71b25e90f6f41d65f4c92db3f98e8cb26cc73efd29c1074dbaa26351396d8dd3ef6d15d511c18e2fa6acb64fb3464378989e7360baab0e05cb725c5a8aa884989f9b77c46bccb696d68f133930982f389db3e0a398af22a591867d57b2714d22366e362c2b480247b7573139e8a3b6f1de8b7d92379ea58f2df234468190b5fed1ebbd5903cccf8813687a45be4e77f8a8d718917aec3563d7ffacffad61e35a5b3d4a74806520eb74f944a86ad73bc99c14a21a816aec6cf32119c1223ca6b6695c1a8003d6140653d290b4a0a2745392309b3024d856f9e39b91d0aa7818daaa37ab0e0d0efddbefc93916ef8f0f56d760430e8908405a84018aabb47d9cb6b6314a52db766ddcc2ff868e554d4415f5a8eb81eed2693d650f677784eaaec5120297e7da09ec50180cfb0769fde758585a93a8d91b749316b75b0fe111fb07eb7dd11673fe20932b5fdad80f0323253c0d9b3949bb7f4b3854b9c90fc4a5299da5de610c7a5817d0658c703319134648a6e984d6602a22f9e85c67cc0f8dfd70efc32a32f166d9931d1904372c9b4e59b3e9539f216e1fcd9f046a1dc7bb756d5b1f48343018f3dbce7fe81816189b9075860110640dc1a7ed4b7d62b0201e75484069fe5103b1d5a1986ac0c4d0bfc2b912397428b195a40aeabc763ea9796075430f8472d54a94c6e914302b1970930a049599f353c3cc234142c4934721d4931638268d81d31e1b8e1c0222bf23bcfd31890731a3d6e552512c479d20f2bd758870ff4fd54360075d0876b00e23219ff1ce5a17eff26fe1ff1a31afadfdb1890720b321c5c9f721854861d85f2a9a0b04934341e2ae0ddd0c2c51b558e777768e77675938f69acf9f3985a426641e2e6104f6aeeec06b23a0d488029290c8dee37b2e990703898edfdd18b07aa2f4540f91823d0f24ce08761266af9bc8194dae972b1402c3e49aeb9390f8b2e7e2261bea498c68b9ad4ca6fe44c1930edbb5c571a3657e7171b9ca4c106197dc3129971c8f511ea43755de583cf940b00ad2cb1185804a0ae4f8154ea9bc0d413552da3498fb3667889e00039bbeeeaae183673bd7e80b246917063a863877b0f0edfbc31e6cb864177a0df5470a16924d00454bc2d6f9306ac2e74c3f992df552bafaf274852d24d64ee48299dffda6fecdfaa3d71ffb69eb2d52e3a58c4b19a95b3884ae520ecdd66cb0ffdf14d7c1f0954c714daacf8924e6e9b05e54cade511208b8787b474a3fb7bf55b3e15b65c808d14fbb6a6a16bbc340aa0cd9006ec3648cfb4292e1883f0a365a28b5eec649b94294159e15c54e82d32622556d01bbd5748496aa8a69da9284bc531a9d8351ce9c9d3e139e3f03b39727c8f91bf9db71e9dfbc6e41484f6c7ea175ccfe5866baca90fdb8dd79e133629fe07bbc87d685cc882dc73e76f40705abab07a0f991c39fbe938907fb691f367737c0571daafbb420fde3aa1e7037b34ba0e72b8662ed310582f42d273fe9934ec0200454fce6b877037d9d069400caa7616963d0926974742ec9b712e644af78b9cd85a7414b408db8a2d13319ec84a8beff9619d583f169ed3efc42fb30d0b8db6a6a5cab12af6c8a071490e6a5578dafc694b2f45e46bbd5ab0321b3cee45ded1a7031a1b4cdbc40e5e28abe5bde571c2cb6b7d04a28541d4f9855a9a0da51863ea72f79cf658f71bd93e1632bd4d15fa20c336255cc0a0fda5c2e82a5f942bee49637f47db4ece5ae91d05be1e85e32e4acf4e061015a382a59105264b0e307468b4c73b04fc1d2a6b9352b1318ab29b729d6d33bab916bba856afe115deefd268b875d1370999f816b58d576929da87b9cab937e00b93908bb9cee3e1ced4db407ed2951f2c7a0bd5f810beec2dded5d32d08c3b153290a24bcd491f18f4dae0de3dc96b182e91c8ef95c878fd69ba3383fe6620120ac97d50f1a51974df1da7c08855cf2d702568a68192b9e44db79394e96941ede9e9d5db0233d92e8998d8f86bda81c8a2f7513a87858c21e8f45ec4701ecbbbb8609d6887fef61b767b3f06cd6e5f60858b6024aca375c5be28b7d30b5cdd5a8946f7f57d0b9154a4d55f1a39a534546bfdb71e4bad7ef75337a637d5e8d5c8aa006c5da21c77526f07a21c8c4da21dd2856eaf6088b51a66eb35e47dcd7e736e1e5420d263637faf60758d5354b01f58b2beae672ab88b82d35135dc7f49a9330ad7d68d5cb7ee3b0eb41b7b5e3203fc8d7d93133fde2ecf50fddbfa3b90da05f3ea817bab36bc619dc1df2422ba775037c1c01162ec5e0a304b024f81afc636ee565e54ae6cf476c06c5b67958279fa883b7b3aaa191ff6c7e648efbee047a07da19987fc0218bfd4b8fc096b4f30f2831ff7b60e9cac3b3cb1eae726eb94607737ad6a22a503cddb492f37c5b4ce468450d54ba638d5d96b0bb876265604b9aa56db0bac9a5a016ec6a987d4e13799519c80696468a2b988464a3d2c1adf1a6cf0d355dd5594a85a5d0116a3aec6a130e79efd8880ea8b3d10dc0d3728ae61d5acac778103fc3f3faa7afc6704d83cb33bb46c1b99b2312c3512adb87a8673feb6bb73f275c99c90dd6676d0ab50ca50167f75fb10a33880e21a72f26e67eee3699f77bb3fe0c7c4e59ed97abc5d9e1b32f670bf59ae38a3eff3ca130cd387a4a590be7d08a437d6f1c55bfe244f5e3904cd30cd665e1609751bc828ae1d194c27e01977ff911034f891247892acbdd8c1646fd0f5c3b252a8e55197cd5d62d1234dd7bb07f128974132a035b7be232a057e37cdc328bb0b42678dffd06caf39ce6e3cbd4497c523409022a18419b76e198ec05c141ec1327aac3f8cd42b160fcc3c49cffead9bca4ac6b68cbb8690740696826fad374ebc151d03fc002178ada4b2e9bdc2d007e9e5c253f65e253f36ab40ac4d0e55d348bde6cdcd7d5d4e2d7be34a94a4087ea6833866a4b452fab2dc93313ea1e6528077249dbb6ca58677daf25cb4bf41ddaceaba23d00f34f62e9f723efe9dca1bc1f181e2c75a2677f555d787d4228a275b3ff2e1a675f42b6f51aa24e207b8bd2199b06c1b8ebd99b925caa3ee2e3a2414af979e338e3ab92f06ba2e2fe01133800a3a05fdd211fc6da35b1cfdf80224eac36b8053febb6e911faa31832e18f4566e8d09f79e1b0e1a7a105f5b82cf7b38eef9cc749a2d03cd2244fb8199433677cb26887985996872fa37e551a4b9cbc06f578e3bce64d858945d1b47b8b7837e652c251d96f77bd9a0a5d837d3783349d4457c34c98b150dfa0dc47ca5d478cf6e073a3c434a71169fcae4dc4ac3d8afa55956e4782908676da3611e10a821c99a52cef53c025a9b66c22661c837208e542178151cbc008d064402cbb960592d40424bc96d88bd55c4415cbc7599840d641afa1c187cf96c2a6fb1b0e34ae5536eb5137b0baee31aafe4025c381c57313b183c235ca0d3daae5d9bf1f4befc4944ec0023c22d9f95bbf46dc974449aec58ae0ccae9b44f25b201a1436cd0b9e8a85c602d812a77c31b88df89f1a71fd3a51d865271971180b785c846e68f7c63560981f6223bd9b8b9cd8831a018c2dec1b8400c21b47ea7de2d19a034cd3de485605a43ac28b2835016cec8daaf5214bad23e37e0d1080ac29519bf2f5d0e317f30ddce5b42340cd29dbd622c941ac4d83c4ad2be64ee121275039c4955a3eda6aa1070aceeae8cf46877f02daf1c4975d03d997bc1d8120cb62e24e1220558344a7a9b90637f3f85f60164eec076af1f1afec1f1130a604085b183494b360d8fff1a6e47629eb10c4128eb4a79c3bf65f414744132a2c76a77f1fb7771b5f3c2a4153ed3bb7efe656b30f16d96924e5a8809c01d6e7d08e6a0631b90f5b05db0396a3e692bf73724bd6618d0bbfe746bcdc93af2e99c096a5f0ec421d232c4bd9571740cf4d0cc162e6463f42e6d03483ce9be436c05ededa23a7513708c93b68b148bf3a1e635ce7115f29676821dc0631a16c8c8ce84d4d7f852520f2a6862e770c67479235ffee4fba5c354e214cf59579cd32433fc56fc389494ac9ad718d9ca0db83cbc05e28e17292bc4dde4f5cf9bfd42e9d055e2aefb8b0a4a1cf5d183c351b2459ca3a06e0bf3f25351bcc1bc1f5f442bc43f63bbb5d81bdac9c8caf0abb54368dabdd84b53ba85079a54ad7a0c98557de5373813366ab1750f2a378c5c4a99e567d983eebca9febd6db1f4295fa461d6a014787bdf00a6ca26e9209116d02e0b9a1f9407be395c99256e889baf53b16faab17714037aeffbb430fcf7303c764e74c8399b2dfb9dc34608b41b8b01bd17b4f82b271a5602c3b830795911e1967d95823f80443fd1ccbab2168e0282094231cf0f7694bcdc36cd6e00aca1058bacc5b84b349a7017ada20747e4e06874c95db3d845bb6bd25c60cd250e6f383c4a98f08fdb7d5ac5de0aa8543fe1cef0a407bd776ee77446117b182977912f71466184238d75c1a6d0817e5f3f94d53886f425f13167e8974a6b7177d53fb94b016e51d49bf4a3977ff86679fefaaea41b99d78d91039f011fc82e2893cb19e975ca1091bf0aec590ace7ebe1d6435c240fec16196ce27cd860122642d301ff71e1a1857e0d4f70238431800f20621a590bd371e67186f58085ac4e59e92d2de84613c92dc4a2fe0f1e42b7f009c0af7257cf473e89045f41b1748c7022e348623e9d601b233061d9525554497141fcc9284abf168a87473248f01f0d2ef9f0739cd6aafa1b10d507158a27e5336f48c74ff1e73a88b64309183f51804cbfd0bb0b823d0cb42cd9d8b1cc2bdec81d437d33816300f166c5876d7918021af476ed90e4f27be6cecf3e932daecb3398ed3c341a4aee0b708e6524434a183e9be830abf3f4564b8a3a1a4e1f7ffc46f6b2ec78379d8dceb0f60005fd16a6c3b6483a5d0d467ccedd9ee112c327d93f99a29481cbd0aa52d1a124553a674536d03aacb317f249310ce2f524b4a32a73d744bc924aca3d30eca62486647acb9e32cb87cd136033abc5783b1c8565de29138d9d31659b4d4ff04659253df11c02b35a29c777b92631f499047b682889695a5f4dad02f38410295e33277382cb81f33faac2a73511daff6ad10e48ba8ebc309040db14d26b68b9a3d917cae1fc9a7f859c244b85f080d38cd47f7212642cd3dba4ad0095ea732c6df95a9ecee978b5b0e922382da9879c13630b8d0982adef16a817a843ac495798a7c1061c5c84b6f367c443072ebab0927b106580b33cc9e6495baf19e9fef816d519677822c060a344a004f02de2c686ee489261a03a8c47a06508772eeb2ae81a1121c8b5322f998856214e9d929cfeb30ff274e5ba1bf898ee91064fb877cfc594655f024f2390cba73f2374bdc4012d602344e1faa4331364ee28a7940dc457807656ca857c54937faa295e3545a855952685e28622670d23fa16fc3f4706e813c9d6a06072b987747acecf7fe634f6a5ec0b1ec80d2f8838ed829e50f1ca2cc60fea0fd0611d4b114b349cf0ece5b13937babd0ca4650d3e40ed8cd30978c97e3942d2340708b4a55da0890f0fe13c0aff5f1a995e8f497d665642cdc9e85f34872037dabcb3448866f44730f22bc9537ede030168eac0ce04893a56976e21c0f6487c3d7a3816437ce8356a982274ef2375fd53e6320026bc25c31846af9607b7b80a270844052021f760c559f6a8e903f434ce3e91ff72eb1e61d7e88de92df508c7f31a23bb8eecb6cafc91e8428c7f756f6c63b39f3a9ce24c5055c5e16f50e3ef20c9c3a9b9eae6fea2de4277c87bdd67610dce0be9deb6241b86095a1320d9995788b42f11d94d5b5c81b2dbf10cd95e5e5c4d7b0dbf18698b32d9b91ba99165335b7385a2b690755fa9bb691d9d2aa359fabcb995226e49c1b51a2b9adfbb40ae12efd03ccfebaeebeb7eb9f9487cf2487ac07d382b0757d16d884140e4358fee3acf53255bb46ef06c46ca01ed9312e21d08fbeb3fe6cbc7d875fb0ebe05baed8f22ae1d5b5e9ce1d71bf0d244cdd296748412ebb2fa9150afa88a6267ae24419458c21bdc76f101002ac8e5bd61e65167c127e5e383e081258ab3f9a2cd8fe60d976f521834458286091977ece2c0fb0e9039f14373452b0ef9b0a6ae03a80f92cbab288638911c88ec2a4aef02c2bad8d93b536e057443c021ad89b54b83b208b54736c4590865de512ea07a0360648aff8f6a0c691185a7615aa424491edd2674e16d44f2d4154856050bcb158454b01edafa4f6a9aacb9ceb6d7fa15abb349040e6d3b653cd943ac2ea8146a3407d8079374769c07a0814a32aa731ce5051f7b53fc16a17a269c932f840d6b6803212a24d270dea4cb7e501fd6bf0adbc17d646166d81cb04840121b25f4d4f4b142eda5575e0131a4057a13e562f0938e832fd3d90fcc3c1b2a4c5f3812666ab85d6b751d011ad2aa9b8a3c19736b2fee7895bd28392c1e0dfea492238632f95dcaef865fa94fb6de90a6e28378dbd35fb6392b2f4b0c4c8f712f8d9fee6eb8e9e62803532b1e3657876216431085c1c770d8bcea4947c250e174b3c79a3d301581148ca0028a2d144030b02af612f8a948645af8f3f33fa84a839dbbc6e6952627a6acaf2cbcbd343830c798de567defb8cf3bcfe1687e25c6ab1dba06d02c135fb39707138e9082ea4a3bed42b619f431a3c312fdcef512308c80ed94046efa73dce90af5a30284e8f1a668c6bd1329e8fcbf61127802835301da9013aa17ea777626793455f7d528b10d83e892666960cfec3e52117ff6e892e1e325a45b0bf8b4c7e367131f1e72f6bb826d48158c97c7b8bdce70de0820df2908a9aeb66b71c2c6dd37e060806a4dadb02359df8d319c6dfe8a47cc8bf6b9702868508b038de50c3c881c495debb094b18d446975b5b7089fbe310b6df3bc1f099bc7ec360a97a5ebc089a33585dd333c6f86046252963cfc043c60aaa023d366ed5c26095f71c522cea1b646c8014638c4d443dd59f22c0859b0ca66b6af1a7e01086982b9156a709c5d4141d39bcef8c2baa08b5cd8fbb8bf3ea145166f5ebc0ed6c63b94d49ff12d4c2aa1c5a6861b185a91a9567a29c41efa848545d342ddd1311f7088d8517a101c3d678cb71424f295640682b8eb7fab4e265380ba978d1994506343d0c8df009555020d696a650ebaa3997259aec868bb467bc18ac68562cd3e1ba17effc0f99a2e38347b146717a8946b531f0ff7411df150eaccf319a882aa7ebc1b917224e1f885b7cf893c6aba45c5df2074463dc198361ba36ea0e1c0606dd8144cd3b239dac36a16e299397d65358468bcb1b8d190c20f31a3ca3d3aa87f07a7ea601c06026583a3dee90a23146b5aa7efcc64b0b93785622a2081be6ab7ad7518ab4dcfbfe7e772707ece05eef02ba2f748ec4cc25a60ab303b9c3699deae6112376bd445ab132c9338e77ddb3baf0ccdbd11266f6e3fa889460da0245f288b6208b1ced27d0ebe8f9ccb404b9e41f0ef85388af9661527ded158123eee172b1c034193637c9e0547e0c6e73bcfe211a152ac9a76f292e946478210357b5a0ebf3f588020b7e1ddaae81bc93986d05fd1520b226ff944a524a3cca7656e2b24467e30e3ead0f51c0c764810eef92ae9e8c3ed2b54e2bee7d1d10d2dfb70a4d365c13937b0cadfe8a5660fd1caaec8f1eacac84e3097a6946a70d8cb806cccf2cc02de675751c95d16559db5997067d38ddcf0f26ccdc94a0ce74bfc681851b0a58841cb3f423d4858012465181845174d6276e6fa0adf3daefc7d2e93ed65620601099950737781ede99cb9d6d3f6925c330899cdaed24a358c981888839ffd85075d85f438e3eb0bf77a942cc98600084d8f03fb2660806df93afedc70fc3e4bab353529b166bf06c349c64d8df73b07639145058f383b4e9af80e039fd98b45bfc10ca52d47cca199723d82e5ca213f67fabbd00e94a0e4023cdae5a604d41dfcb20419d6b8ede07cc78ad9b6ceffd6001c57687221c9ea0c2d6dbe574c050348ee2fa1b36c49cbee166725eddb28087b3a39a34c5eefc1ae2416b1f8a12c4c846edc68302f605a643ec9f379b502b74577ef678be40b64f37c61e31037a8196294301e296550d6210513ba3708081e58e7bd2f6d2c0a2599b587f4f3aed2bc7e1ec3f6efb2e9fdf1e4b79a7b8eff7078c6d5fcbd161d608424193ab81e7a9208c796253da1aa3acb23c958137de601f012d08b476f5d0cbbfda95dfcfb94afd0b0bff6d0ebcc851796794fa851f47b5a850ea2b4e0afca738de0d1acfa650a5a3807b49e5f986e18972b4a8fdada576c2b90089e770e5ab5e1d68a89ab2480f59439acff4465fc7a358eb69284cf242a78a381c329cb3abae41bd6afc49dbf7c91db1e2dbc05ea94fc020e900516c4da7c51186680b82171eda79d57a8f0a9a4bc0a59bf5d3671a6cdd5c3118cd16346aaf11ff11c61605ecbd09e77047f61f2322ed3b8d4993f0cd28d7d3cf56bb9603170b3494b9406da9fa72c444126941dfd9b7a6b0afbb9c59c1d78344414f303d1b431854f302e8036723a223e7c6088e42b08e20ea7da89d364d102546aaf73103d95d536496f1b0b1a89198c508df6e3d32cbec08fc18de0192a55bdf9bf25c2d24e5970caee1571bc5ab6154b2afc591516556aac0de50ab9b1a8f4720297b3703451f71460106b4b3da4924b34fc1d4cc40a2d24984c914656e98dffe3b7a0a13e6a8e1dcdc1ce65ef850dae700814bdb788a66d0ab7b4ce9a6be0092135811704c50dd13b03231a11e3cf1b1ebf042f04f1178c3bf5106b4f752e92c803003b76d70aa1e3ff24155ccd4e69cffbf60392ad0069ac24002328022297553ad70fc6d1e7174fe5b1692230fe9eb7bf3acc5e5197ee97ba871865871d2ad97f4e2283b1eee01d614fe4d0fad60e091d4472a55d7face026d5c84ece16e8a692af620a106211e90a1c28dc847b4409cdbb13eb199cf5a5a6b66c9537205f233dedf91ec6d39cbac1a34f4c2dbdad5b9231b61524eb77c48ccf3ab34fdf5b4356be85927cdde0fd21a1fe574165436accead951c5c3d67bfea7865a00dfd5c05205e3b2752a9ec1caca7a2fc7b0578017edddd99d0ae7fc93f0f574eadc280b03f947884960877f24a966835d158518aa1357cbd74c1b2c6a0a5835545389f9b02bc99d9a8cc9e512ac7fd9c670c92d0cb899420582780f78dd2733e61b438d5f264bd1acce4598886664dd798febec45e472be096c55a1e7bfa3b037ef08e5a0f4f99594ff67dca5f2718533f39c12259f2e97c45e511ce7ae502502ce1bf82daf5b46d9b8af8f9240b9b3fd7e603294cfc5d6e35837bd079bf76a2fef6f879d32747ab00bf81e43c3c674c06095a3aab91f4200d963f6526f04d578fed69b28613c51336bea5aea9595ea108dbff09733051be31951c2f401c2c048ddb8a910c785eec9f9bc57b230e820df2ec6d875c734a290155516490148fa1511af34ae78671239375f2b5603e1f1b0ab513449fc508fdf7927e109477b00c29be176c5750030c9c02d808798235089450b3ddbcd67ac10acd4f0aa9cd26a2fe6f6e4b046aeb4b16e16d4639b1149f76e8e731f7968b478ae316c052fa2e5b896d5edbbd379b8c19d5db874ddb9c36912568c2d8bf60df30b459837613f42e91ace8262f1c59488bfd002ec0abd92d5a14eda2ca33e23a7d333197108c8fecd12244a7a0c8dfe5369e6b4bdc302e33e14c14b4b7b9be4724ed1d234abd90d99850f118d2ca5deb162d294894ae1be46ee74792b05fd77583ad1a4630b29b8dbd20770deac3f634fe78b9b3ec28cdbec6bc1e82a0c911191514e4d84425516efe4ae3d1002454aa68b867874431ab5da682ab9071ec80c192af3dd4b3bf1747981c580ed8af72332ffbe8e2e9970bbdccee2863a70286d2e441511528fa90b2431d6f533e38d772d8bd1a71bdb9146f1f0fa75f607d5f145db23c2793fc83d54c14e99f757eaaa3297442ef8ce5b35934b73bfd2e0b84e5a161fa02778faa5de0db8ab5f530471c100dcd644682ecd81aeee818e571fc5871e27e26eef1c06617210c8eb2d6b760ae2d3dd5ac7ee40636644502e5674af9f498fd7abc512b866bed73951d696e27f135055ae184dabbfe0f9d46f407fb96a26123bfb88270e697dd4f5427a5e7de61841303faac5f8e008bec7c59120116695ebc775eb2fc3de294ca07d5c258076f4da7e03d42ff5da78cf9331728b15401803a755e3d3beeda1ba555687ff57f9303a9203bcabc6a4c0319b9b0707c23bfed342be61f6504a9baf18c684fcd320f651692ca8e5e82da2ac440c5360ea006c241764049bd6f940f120676fab899dab95d3db1ad4872bc5a8a88b0f4c08deca8e948ebe8428bd3f439d3991d862e6d15c5c0f2636a3a429afe8745b1e0ddb4a19bcf3b3df1bdf2c29bc65e95e39a4c4d50d13407f4434b269ec6ffbeff459632814004e9ac72ff18fe3b3ccbd28ce8e7dd3a9e7afa0ffb7690ac865b3a800311bb1689f6110465862546b6c77f1ac87a54a70bc3f9476947780c7220b34e0ae15a3aa9d708083a99537ffb813704b567f6be9004b6c6e9cec88284873c070f61995b15a4faece932332df44d59f752ad9dd794787e29cacc82fc4fa9227820c4dd2297033a9e65d905462afbc62fdb03dbd3cf455fd8a529e7c5e8d425e825326d2f33e292211f2aab073f30563c36110c220e8144cf3f257cb05e214ffb45d383ce157ffd234782ca849a02446060a3e76860a35f0f14aedf904ae766aaa17d1891e47e8f20a9d199e789ad29a2e3e8edc83fbbbd3ea0b3a55ecfbcf86000fcd69627dfeaaa6f17d718adb8ed3754945df32ba3d83abcaa3d400b01a3c5ee143d905b1700b90661e98486fcc1a8f1115336252274fe7ed262532028aaae0eb7f87519f0756606401a329bdf46b384dbd5f0b1425970e13743795d1bea76bb8ac7724cf86af8000515c2f335cd968a6f94ba7e5127de38b11cfc4851b02e66c38f284d7ef58ca91ab6cd9e3c86498cf812d721cc0c358eea004c698289687b8926f22e2c9f8f9c7f941a8be3b467508080e9cff1246e8c749bcb94cf5c70be4db212f0b6c1772bc6ce85ad852a4fff6a9fb6b87c04b6bb1eb614a6fa749fdc6a3731016a87adf4f0f75089be8e940e6ef65f01bd71d0a5cdd26ea15880d8abea6abdc29c8769d5b5d9bc021f650f2047c517ea7ad68cddd16500cbf70b03887f71e1eed2eb696b9d80b63456b03ceac39ca1d2a2211f5e10fa9c6f71ee249846c0ddf143a000d200969b7aac24704bbf8d1fe110651b36bf2be1fba00ef84a38fe7706fc37a551798501f66ef1d32e8075d26812097b10339935452c3d3caaa7e159a30ad6b331cc8dbe0f230aba7821d2f1a12c1883f0e180e464065bff9cb2b07ee1df3271c23b01be5347efaf8af1e1ad5821503e6862ec4f48c987ce73278cc7629a4639f5736a52a20c2816a0efe84a3787735b0021decb2e2eedb35d1c017f124c04f949cc8eedd75bce3155e5102beb9ff50ced2586da5c6caee0a25b138f9a76b98ecd9700c419269824874fe2e7661ed3c173bcfd5bc9fa70da3c190c059af66db5994e2d47bade08aae4ac0dfa84c72fdb9b61a3b2be1d648bf112e60fc53ab0c7086f5f97ae0ceec8516bcdfac4b39b35d9c3ec7b005f7c311f287177e568749124f8f47a1f504800799d8538fc36637d6f556a6680b4b52b2a4e744258e9e79f0c0a1d84c46a1c42589cccbc82dd155851553523f19ff497bca5b717b5647084055e6eb9b880cad296d02fea2a3b0e99ae3a6cd71314c0862e146f4172c4480c11cddf607e82347d61aa54503ffff41e5d5614173169a2e773c13a3068e43fc45479a26d7f73339283b884e0aca4553118e88242b647f1150d8733ca4f95648bab6b357387baa900c88ad9fae01244f5556192c46479ada8f994e21760baf47f9f6265e5c3d23439c894e49dc793104c0afcfc2977d12cf81fde6cc6358ff411180412d2a40cd4fe35142088234085cfc5feaace2569f5777d4b83b1eea210d9b1a27206f631b9852b01e6d7d1bf7f5ce3e321e62e8d7e12e67cc7a4f4fd26b5bd123a127d793c1170e0d93daafb31886563609ecbff42be8660c0fd079b7f60b3fe1a5162a1c5f95a16a7abbe3c2a396895d17f3e2e45548bef714eb22ffd071092b86b87c2b6cf21aafcfa87f7e28ab2907b0bf9eec7f7eaf928828f5e95ad01ba6e3ef0c87a72a0877d3853614b1688b3ddabcefcdf804fe36b6fc4fa2852b1617757b1e8cfdd7ddccc5f5986fda111cacc3e6a8099038fa5a1800274bafa6b358b8c7aa52f2804d01d57fa9f9bdaac7f257aee3ae6d7727d7ad0f23429e628793ad9bb241c56fba7d4de7c9a70cdc026aa4b0fabbdfd1f8f61a5c149b7ab84f47b6da0e22656310c794194b16ea2270bc29c43f6f6e8827cca280115d0e9ce58c9acb3fdba48131602e01a1e762ba84a93dd501bb8b94d49d23e9374199d1c9e5511c6a4b699ecb3bc2a33a2a461df05d1c7dd6134bb6e8586fde4f0c935ef3284fc09f8eb72e0cda9e6597b16f76fd79d8ff7fa8d8f5f9ac37dcd9d36f472f0e78a2092bdd4390cb4bc899759e771a14c3b64aeceefbddba821d3b946449a9bbc120b0e1281d01661088eda44275ddf72a352977fb2594f694d14f63ba0579dd0ea660a37055b605b8f572514c04c8b343c5f3555ff7becd765e0a97a7bd988aec35ae819d78d837a1dfd539bbd3464008d733241d790d4a1783418d1d9e30021f15cc458fa462216bc34357920a0b2bdbbfbf6399b1e6454456a54a38b7a358db0841726e9a4be5bd0f60e70b6fd54fc6c44f00e02bb129fd01e9f30915a87f673d898b620f82ecbe264291e1751dc4feb18d16570d75ce734fa6832972f4230650dd837f7932d5a525151d024fa69341f4449a833b34cc59859d813eaf9019fdd0c632700bc050fa0f32d18ead7fecb65419fa4d0aa5fd5fcb02ca3e03a8fb544424a47cfb387eb2ccd9ec4a1d1ddf752039159e8ba2521864720f839509534103d0107a02c9880f4408e74ae529a500f4b905e10d21192876703fa024a484fe2484bd77290eee15e9a9f76f5c6860073174d63bce2e1108cb697d462c9a0c522055dc97f47f5e4b7d533ac8cae6842376e51b4bf118200eecfec1f7eedeb372d47f8f9814db777128009c21df2a2b4a57654cf3471b47c764bf4b162ed192da70cd27fb9b9227002e8cbf93b47b23a2c73fa1e9c9ff4523a318ed0a5f76e6df6a4298b1f89c93a5ac87e4ec86b32dfbceac35ea8eb5e2654e10d45c17902c22265346b4a901a5154262e38849de34ef88b1dea14ba3adcbe8c3619c27f36e07803083633f962c49ce178f75aae8267c2b6c53d61f7ff6d924ef98e2b4b4538cbd01a04c29497e42ef47beae549cee07830eadd29a16dc715ae1657713651cf102036f85f958128f326c5dcc7ab05c3259e71a2450f256740cdb6a9ce46ed2042fb80af0cabd346d735503d964c334cb470b52de8c070be69b9cf6edad648d93ad80be56a4dba8ebc921a6de7bcf6995b88a9a981c34273a05a5cd36f2c64e5c1f1ec2261d206f963ed2bc66d41071ccfd926e72c2b19c22c6c46594799989450ad0669372e68b3f064c882e850ff648df8758fac28f4ea1ca47edb8a26ded8816e6d373ef03d40e788d5d021e5a23d88f997acde13553a57d49509db31783cf0ae1e42f74ce54dd47ed1d56cfa38a49192e10a04b1d4a3874ea0a491b3e1c6be3908ee70f8aff7cb0595451d13bf962c1953c8ab7633381696627aa7a92fa4d67abd95765a918f3145fca80b3b5b04b961cbd37efe7af2800d866d7d0d1930811cf3f1b8f26cd4dc56f420698346f4a1fb98a150cf9aa52aab16cb2c6ed240193a9ef7f0c571594034dcba31cf4c05655192a9ab4d44b89a14efc7edd8a870fbb4855e8ed2d1629910070267d61ec89193449fcf02dcceb4e912b40570d4d327b6008027938e9209875a68da2efe0237a6891b3ecd6419461b0bfa215685d4ccdf6fea3795ec0975c53d299ec402bbcb14adfdb5848152e8569529cddaf1cad0a1afd70939dc93de95e1a5d451131641e04994bb26a77385879a0d25b57e3b1f7d4183f8dc201a66de3b2d9635432c194d0f1d9b1012282699bfa3bc26b48abd93b943fdf04dbb7d031571544bc23b6c299de3b621f1394ed6559b9928bd4c71f487e54c498b292661db85e0fd6b4c344d5ab6a74ca19b220f8936100e7ded218e8d316c8b05b406d7c0f07520a98cb1bfec224a4a60415178af614f871dcbf970b733f5119da9f938c79b222325a807c50f0f5e510b956708b1e9fa6ae1fc1676565d4696934a5f2569186f7caeded7d0a365217ab151da9b051e741521af2f9267404ea1013e7885885c4d41b7e12369cbde1246e195cbbe4c539ab30baf81dd7ff841e161276c74f50079718ce31a5de22dea7cc469455b68ecf21fb61c8a64dedeeb3ce482158906c3aed038e5ac52d27732f87516ecb92fdba4ca50fe34bca92c3c7d2c37657142d8d769b56f4b96eb2b0ff4f5a55667d8d7ec4ee6c58f8f6a50aa1f0f902c97ee139b7ad89ca5ab9223d703f8bfc38624e960111c21790e8482eb80c2b5af83984d88ed4aa9b46dc543e7300ced0b7fa9273ab27f131f1ddcd5e5d8b6dd73f6bb6e86e1e02d489269d8a24d3ec8136c10e1066406492153ecb7d3502026076f7b975a78d7c2b42828e335c6cc4593dfdb4217dafd2da92cbeae1d5dc8e5008adb3e50c057409f6bc7167fd2519833456a9e1c09f63129aa942ea4b90150a39eccd7b2256cfc0e4de6bd5196c8f8a2ab1cf9a17728fb1f91549e8018cd8854581585c5c65da845c415136e421610ae191136ef0b15e083dc386e8e0bb11971c1fc4553efc74102a1c2670e6ac2203838e19d2c1a7d3f92ffc89892fa81194a863496a694a6997cd1ada9ba7434bfd5cba489d106a1f05a792cb62319052c35d371ad0df4242883cc63a0ee8b34c0e2b750ee1ab014876dffa13614e532cbc209f099000011e6e1291e643fef1675581c250929d27283d3db3b08f2d092d892127fe50b5c78e4c5b2a9d6d2b5bf98aedc2ca445e00384bc405a1636516f0caad3282ae6ab2512584c3716922b33171cfca4d311bae8ac541417c8543e763035fb025ea7f1e09017153c7281b76f7860d6c3098bf0e91c31ac6ae71e6cc6d8c0ae7761d70eb5300c44e824b8f53528ede13a0977afa46e3fbe9e266ca021848426038a88b0aef4063538d2c93148b2307f235fbdf3fefde9c2391ad011c9563762072310231a53370803c57db8b382c5669003d74b2d3f4253c9f4620806a605b240094601fa15eccab436de080509ac49a66b93934210fe8860d1aaae26f11996036b60dcac96cd81868835eccbc441cf1b39247be80caac0195bd88b33cedbb21faf470f2583a4ed6e2e599f3d41d99760511baf5eb6507c032bd4be82c73e8ea5011a03fba0638e20951837c720a894b6e38c9cab2ab11dfc96c1660172b9833a9171e5193fdc9698945add84aaa2b94b45a7ca0fc32d1b15eb2273f550abe842e8a682a82f1685dd3ad83796dd48569c60d21895bfdf25aea3d94b35647c8989946bff3b9c714de2a08c51ec614bc0e403b32287e4bd9723ef220797c65f89a72b80e81ff2f23b0fef9459d028491183d20b51019a284d0a2c5ca1c2e96662b9abdb53a694d96f21f40b64805620dd0a3c1d4ba28eac1dcf79e896eb13f005f21330306f432d119e19b28c23843fe6ae5d38448413509907e2ad7ffcefa83d7c59afd1b381d7cdc12033c3d3abc2b3b7588a914a2c600c7c670b81a0478f6c70780a9b24b2a440f190ec339229686d7723a332649df8445867a8c9c053b93427519c23b3ee59577d89fd3936741c9fe17b16c6966b6a9f8b7cd0bfa45a7456c133d3695fb44ffff849251533a074c3e18e617ac96434a1835c2d89a0c1657f4f101ac8292ab8d685b888725d74edd28317d2741481b99dc5c96014b69576f5076fdcd09f59e4a9cfc65c36f84a56d67b1c8c669b7778a65d3672c8a938440b419a6c80b8dcc28e3ab680b1449772f9053afa8a45915c11b448438404616a732c6decb44876f998e0a545ed2d3a230e380e9a9750a8467579e8134837080302a39e42adc4b22c1d7250b49a6cf3197c2dc27f784e89833e1a061109c2be853fec61a9ea214d6988f01ef3a9c3b061fa6079124d2088aa594f7f74fe96b52b16314c05df81340e255d5e21cc744ac3b524a0462539eb165d654b25856a70150a13cc352e8d06876c282a0fc95ebc540b768848d873a72e2a4702959f4e98e60e4ae4ba29f68fd1b0f5562024d7defa62363a021b7448eed2ef3ba910195724d94287660d1cd5485769a306858822ee2e65ee113892b94d0276fa14c78437bfd83f8e9d6b04b78f43072bbbf1ba580baab5d61ff1ef6eec34d965143db33e4e61799ae14ff62e31d112f1af049ea052d68929fe45d9fc5c946a0f312bc4de0944f320df28fa1da159b11927caa309f7fc56a57a08cc1389d8818ca0629f5af383520e5a238b0a08de933f63165fa391f54f4448686897a108fcd34ef1688abaa0c9d4f7887f04b3ae90ea0ba1a70bb6e655175a01c3c828dac93ec44370ed58ed9ee752a337369bcb8217eaf7868ef28c1ce2f415202073fe884e91df8a434273d7799fe918d2be2962c553b314ac76755f3276149af7183f22e77a8b0f15ae2bf6938ac3a2fa50e46bdb6e3f00328e9e350f236986f3f4c379384f3ede6ef0f5e3cf23f811e1ce61e1606b430bfc278446cab0cc019a44cc200ea7b6d825d76e5895bc03f0b8c1f404cf7b2d81d7c5b94fab15c9885438f1fbe8da1a014ca9436a35bf0b125341091344a073f95ab1d03f0ea5959f7042c6fdb8258e72ee6bea0718eea2d4b07f9125d63f538160a40e036c98bc3fce2e634d618a91c72a2e29fd396a68523072f9fba981084a56aa8ceff89ee9460ad91d3e5bbd1195531aa1d09911294bf6785a1a22163c1fe010eaaf698a7280b2fe3734d90d08aac92e3cc6736f23e13d7433329385779974126afd50d77332c57dd0e127724591fba8108fe1e2eb007d786c300c73c910aac5f3ee3b8ac5201f2cad85c170aa0563be0c7f5a7793fd585566f452bd0932d71cdd3c66647e9308f1bafb37e7596bf71f7693765a97142038c28f29ebb43f70c6a50385652ff077bb4b8b79248b0e5b979519a302b8f13578af2993106b0a3c2184e9b031fcd260fb9feeca493d0392e40ea43b9d7fab565b5f70c7f0265047afee665a6cf156efab6e4a617cee1ac4ca2861cfe0e429fd5bbca2ebd7f257562f4ffa4ea7fbf1a742d841c49ec69ede027d4491a9172dc9a35a45e02b29bf4db6f54a15be922526d58bad60b43823cdb2ca941743f3dceefd8efad45db2185a9607c7c8ec30fd0ff01674a07a33db9590b14279643a93275aec577f62f68b2623a1fed0c5b889b54642473edae81ce8c848dc646b5b640845e3020014c53af686a6927b2bcc7e64dce5eccf1bc26f328e8c7f011577a2716ddd15a573bcc30cd3fc811cf4ba67db42d65a49b8cfd5b1e55d5f4740c6f1b44e7f89ac2a9e372f4a90a70e2119ca58b4fd7847fd0a00f208cfa3e8e558070753ef994ccd66e4543929d84c2b576334931d67b46cdcf6bccb8f51a50e4f7378e2c5dcd9ba4f764de30e56dfe0ceb63d8854857a305d8970d15d7bb3c747ea8b5e6d5364a67e246d1b06104ab71902a8826be1923b69c0955ee7ab3318fbd27d79411a8024370bb5e47bc013103d82f041ff2fe7e62da841a0bccbf71cc6ab908f99fe522e43d490b75828b52171daadfad2dd2d47e56831bbc31e43daac0cf9f0869f82ec622ea88f8bc8b575a339eac1ba88af31d40291f490d5265de4066cbfd7ff08951d5d84e8052d2c05a029f4828955b2b94836ad2ba1c45c6412fad544f76b0d64882e1b0a23bdc760fb50e4b25a825c04422daf50ccefb8084f248cf7c2178169ade76a9f9cef60bf31e4b97b4e2653552480a62a104f310f88e67be202629de2720b01db14d8e2224206212f39f642cd0e82621ff6101127cff4e2f71004efb011495b636056538d9b31e5006f0f78adf7c09b1fbf73c7fbca016b8710894ddaaa03816ca2618a7079864c0b8e0b403cd9645cb4b3494c6a9ebd8656189fc02b839fc5aed3820a77646e004daf33bdd80d932beef0d7cba15da04ea6e1bba5021724ab02172f58810bfc5b810b9957e0828b97ad64b1742216e1b018859b567f16b8106388f844be62ed6603d70f62e6b6de263940393d4c12a7cdceec9bde0f036cb353502ea6ecd4f18eb5b617076a52caa203dae432166ad4934e75c551b558c01c8c6be6122a4a6786f15d8389266f9883c5b93e05ba9d371ada481e03524746dce5a273f7a613944afd8e00cc5aa9fb35dca909d2e69c893bbb7a3522f98cf264a3c0081cbf298a15429f7d755c7a05c5ec34cc2e60931643fbdf50690ba3f6893dbd64403d09ace2999eec30ecfdb8ab433f131e637d6561500421034b09a79ca9731b811405fa2852f293f2400a015e0ed6b53a203df6e6b4b9d396217b4e57f9d02b0846aa8e43f09c56e0cc2f0f5cdd05c29f5a24fead758c961015ade012d87c0f7707aa5887a7e5a89e7b563a22f8adfbbac353c61f95278a7bc0323e910114a3027aba795a5e4096594d07a163515486d4818ec1b4abb0d5ca6c2ba051306e1f9a7b446f0b9cedd3bde0123a620336ca0f6c9c0170e5f8f39d3dbb6c4780bd98009a413244175800dbfac60534ac10471b7be90c876bd1269a6c7befbdb79452ca24a50cea05910537053f4d5e7ea04075a7d3e98442fdc47439643e9f191d3441223d1031221253d1cdcccccca850416489b0654b18112516a81a542b38d024a1e9549e0c954ae5b1401a4f8613497a01c03b62657252a3192100a82d61384c21806ec7005e84d8a49410c08315a09ba3122a3d38d50e4b563bd3d59094d4853041c3243ac28df0e16606775a6b95104283b556488d6dadad752e11a282226e902203215078002064a9478b212d1a6892180264cb253d5818a1236ca2061008397e08126eac1822a50615366ccce4d02173038e181d7a6060d0e3059623f5d34487cb0f14b5d65a6b0fba6e09fb9122478e930f4b506ba5071d2a463974ac061391256e7c44947e7835f8e870e0c125d9b1bd7c6847d8909decc03818e1060e53f81877e41885041995f88c301b46257692a8778402363e0100210a30b4ff0528fa861e3792a8370cd9e2df282188941b72cc1b7a5021ebc486b5d6da2436ec20dbc0031aaaddf1830d417818d284ee81e68c5ea0611a0a1d64d409027483eca3c427a89b7352ea13f331aab55a0b640996618c73f689e203846ddb382e482bc84e1764d49e30d231d03e3c347c5bc27c7c70fc08c096df690f63d03e77ed7357523a69cb37c14edf016cf9ab3d3d164c475758dad09f75ce8ebaa8d9326646d5c0f6300b71dcbdb5863bd46548dbbf36d9fe1669d70ba4c4458130c618638c31c6f8047bbb5af3bd57944f58dab67555e3bada755df57c4ca6eec864ea6a57419595bab2e274ae9838ade2b156569458ba0a9e5686b63f77f25827aa643b05dafe9c8b8743815ee8cb4bf5f912454989ea2829cd8e8c90624646afe93381c6d9337b664b3e519ac9249984c209d3c39146db41f184ed6f4fd8f5fedcb78612c4dab679acd9799be7399d9ed771def799be4e6f269595159695eeb4b1a05a5a5c5abad4cbf6f2e274beb8bcc0c0c4c0743233dbcc8cd3391333a342c50a15dd831b083a9d6056d1d0b468d1551000abba5a399dabb07b0fa7822c3cd6dc5f4195c762d1d178acb9c5d9826bd1c2e96c91bf9819bbd2e42f26ef1b8a2fecfb379c2f253500120a8408734e4a8148f1c9517d786415c2c95808f4dd39260a3af512f600ca00ea4294494101285017f24fb02dcac8a8b698812cd9e2093a37293d8077a2feea556f95a8a103a9c6105cfdbaa2c90e4190868044521131ac0e1daa0ea2f155efe1576bad8e1a4d3210288030b1c5bfd16ce028cd3ba834c01140ac5031231303f3927269419d585654eed20d0e5fe268e8f974952a3a9a3f9a544d669a23a809cebdb2aca868d3e775dca6657c6d750a43f292306490ae4dd024afa931783a296532e42d38a2e95921f110119ae2871a45178663e7470f291d17920ea620a2093b9708d88f17c07ef8f88123c995c0bd116fe44dca6aaf3e5914d13ee89ea31bb8055a89c6a16709dde52003fd835ea27fd42062812ed23ad0e20ba17b0461867f558e628b7fe32ea83c58506badb5faa023db8006cc638707120e1e3db8c1e3c98d5a6b652144b3b881ee31ea2471b784f500daa27eed09e9d53d94b9eae0d0dceb0e1c19093d97d8b841d890d9bd1ed1a05396c788ed32bb9d0ee339c2dd4f29163ae898be3c886c1f337052e52fe491a7fc5db81082bcabfd9e7adda76602380d94db82727b15da49c5f0fe03b5d003a54400f6aecabe9905a035e2023675316be81f60e24c5904f640fe7ff166674fc60577de40fc1e68231503497eceba8753b706da29ad7f96cb9d90f7817a735b674d3b8355dacc6f67191c9572d2fad9ed446b64b667adf4ea87ff6d8c0ddcb683deeec0ec351a286b8c4cb36fbabd7ef7b7ed81da6be67720b72b3882bc4d19ffdc37747065c7b28b9394ea38cf8e9d429d72c837d82541f01c19c173844f94e429c2e6f1c109c5d384a669dac6e5c042702ca2cc9959603f1f2eb601ceb0cc21693dd829ba9a3c9e608c73e661846dceedb7ae7370bef4648fd68e77b3c77871702ca4a0f54e4f4ad9b303eb2105ee295202448b683e13ec39da29588fd2d640eee101ac87897c5a6badd6fad0e9898231c639f7247d3db3f301f3a32e078f071f42f898901feac423656f09eb29b235a025cc47918f156c7d4249183278c4231e4391c6f038f317741c9ab96645b3d7ec6836675172765aafd6fca31c6bcd26b725bbad27b3357d46d91c92b96445b227ae992b8a6be6b8e60e0af9a58f52b19c9616ed43ed6f9e20e7ba340e9c0821e90814484f72d04eb4f84568a019d01290127d8e9b1d65a2eb5b193502ae2242fb4ec1bd86754e202427fda78dc178a0be7d4fc544df5711a1ebd3e7a694726a29f81693993114c65f6aea94b3fdbefc84f9f9732661c4fc949130667eaaf8997fea8debbccf14b4b75701ba4c05587f063ce924c8ec19b0be0c887251318a59e9b1fd65c0fa31604b0f18efc51527db3f06acefb2e2e4e5fb1730f5302c4952a67fe146976c632d3be5d202ce99b4998f02e94bdacc3f81de42a9bc6865d862d2c6575e9c5b8eb45119a1456e64c1dcc8b2028a35890a286e4cc49a44944cc459644f23889406ceb3e76fa0e84f546b63dc688a4918991bdd9e642697039be3b2974b4e369b60cb766aa5acfe84cba4cd7c994dc1987d3b0ac2ec979dda2e9ba3a1d4725831b39fb7f5de03ab2b02f55b98533a8fd997e173499b2d2786daa61c550eb4783ac5240c0fb43033bbb2616630a3b4c9d9226adc38d022cc6cdc2b4618943ff7a71c09c33e6a77a0f4df40b126f1d74011a6c8f6cfa79858936c71cb41d915b3da92b171cee8ab25ba6c068a4efa892650716df54fa4b182dbb45c3ff104afa24d9f673f110560caa5057562b99f9882f057a898918981c19f08038b9053cdaf4755c6678ccc298cfb8bd7d4cc8e217585d285dab2ba6e8c9394934fa594524ad90d0a293440050bc09098a8ae93ee729d7352eaa4dea0904203548021c94e7aacb503d5da0c70e008859bbc012d5f3a5a5bfc0c7400031c48ca9dde3425a31ac0096f0414f3d64977192c41ea04057dc9a44e2009524f663229098e404608739491a008a0705c8db4a1cf79acce9b2f3b700514dba294a96a7e2561c080a2c080049a6d5f02a73aca7455250b26467420b5111eb8bbfb3092b48d44a18d4c4103a09d27403d408072580284a402c900a80787810e12ed456ddf7be9795ab57557b0db1ea8235f48b4e7a60e928a1f49cd21d875daf434cea774ff720e85bab0520f637e6bb266fe965760a6b63d10d440a12eccdbc1296de6ffddae9433f40e9c35be398dd364d63e1186bb3d1005afe164473f11867d9febee0d5dcdefe691fd44b06fc8edbf49b8945fe57ba514d3f8684c97d4a1f9321b610233128319d9c1c88fb611c6c8a7610b978e99315ffe0cc318e68c0144cf694b98911b5be29795c6072d6aed96d2b7f7d200a12cb8eb83809c6091620643b4c8565a2e5cea48fc53db5e43aefc459741ba704919521aa1f884159cd9b6876dc964b4f358a87ddff668512b9359d90e75e9c2b6ac8cd6902e300e99435de01dbada71ff9cb54ea51ec3703bd48595415a9f2e835c4d4a690be3fcadedef7fc10ee3f078e44ae9a1a492f4865b9b7f2a24ada26232994ca69cdfcafc854a12adf1672227692829a55f6d7faa22a32b7f16516823be978481df5ddc78eafb7749f485a45eabf3faa2d7b35dca2ba5a0d08dc670e8392ec356b540cfbf2ae8afb4740d36f851a40639cca0c7054556002bf2dada95a32a5215a98a5445aa225591aa4855a42a5215a98a544ab40cf764b45af6f55229512999cd5e2f95123aef0f17db273a8e2cb3bdf2da2ab3d5d25266baf9b67c950b62d48597f1cd2f6c1aa3abf91569b270328bca49dd9fb2249d7308479fa3200e67d22e013f9d6464b43eed6ac2a926157466a899219b64d4a97a813ed5e9e14899b5cf3e8e8715cab49d5377afe149a39bd220b40b75536ec3b450e25eddab7b75afaea27908457478111ed5b6285d6bedbdb9481147c2fee8714cc2919e5284861d384987e490387ce69c53c29c071eb62da58d140245c8d29096bb7bad4378e85896789f11dfd6846908107437c1214850192265e5da60f9729cb81b4e5bc27e7480fdc0c0e507960af75740955044ddf06d0a3f503481a27575a0689a89a851a411db2c58ec76948f9f3e0becf8b5d0b6640dfe16504a8c3fd3ecd02e5d86a35c27cad560057ffde9e9d851da1cd2b6e48beead4f02e6872e8440effbf6458b63cb37b564a6960b2dcc29ee1b989fe36ad76d2dd1478e7b1943cbdf7519dc32f49dce8787cae69c4e3b0cce6c2b6be8e7fcf7c5ad55eb579e4dbf574b2549452665d249279da6d6a66f6a997a4c3ed4457dfa92c63c67a5a5d11c8371577c447b499bfa34445adc7c44dbfaaa0dc56f885d7d76fd2369535f05d374f3318d3d9837d4e7f4ba5ad115b4ecfe4e59dbf66dfbe6b1a646bdea2b78c5bb3e95d65a6b27cd5f7d4af369ab98a0c56ecf1dc3969e0cbbe50c4b9f3e95362c336c583d968af6d97a790bb3f5587ac60c6738a5cdf5582614b246bd066c9c4757c0f7cbde0a54f3aaf4eacdcda6de04b4cf17d042c9c2b2d65aabc7aa1f818abd1758dbaecfdaf6145a2998a58d6762cd97af859ac7e2f686ef96e5c6b22116c1f670caf6629cac59cb67c92d437b63cf4f8ce167b430bdef7e06eeb79f5e8c04bcef7e052ff4d9f257e0c267dd5d9f75e7f55a9853fc394a433929ddd9dd77aeb5d69ddf5a6bedce7fefbd177bacbbf373b8b320c618638c777e151164b2d92c5411414f079c367e6ecbdfbd21fe1c7226edf30c2db439c49991f102ceccc63f7371789f8217c4b95376956f77eee6a454ca557066abe02f6c8eaca1d97362effea2cd2a3417f59c1d9c5489efbfe59680e9c2633debdbe7b0d61e832dc090a3813206bebf8135e69418db73f75efcb580b74f5980f915bc7b03b5771dea93a310ca91c2c5848901d25d166db1765e6a89932fe46ca1a43d5f4f39b433e423a57419ce29f2a58792536826242454c4e690253824646d702a1fd43bbdfaec98735277e1a15aabbd47afdc618c71d67490e9d084ebb66ddb384e071d66413b571302a77a82253b4fbecc19b42428c99c93521da204c1a413310c15a167c9ce1344600214316cd2b69ec3071cae53faa4b5b5d6d2d024c38288080171c2277d9913aa239cb851a98ec8d9d1e969e5ece8d89c698deccbba6c9155625d947baee5d30334b67c7a5a4746f775676e396badbd168fa07499c4135b6e419bca2c9fbea891bc2fbd47912d7f8eee84c9e69e6ad5ca2eb633cd266d75db9ccecdcaac87d3f2e9b940776cf9f4b464e5ee6c1c0252e21a878046dc755dd7755dd7dd9b2fd2a6afddd83dba467445595807e7dcd98d7295ee8c85f2c6212025ae710868ec388ee3382b62ccfddc1ccee306a464d3e75c1dd057bfcfe9fc46cdc3e9923a5917eb9a74485d8c9338266bad5ce52438374ec2321cc34d30129643405949768d434054d474726a0d3397679ecea6ffe5987294ac94672cd4d69e93da8bba16d38e36d58ce88a6aafae73d94638991b7726bff8d493519b0bb545dc7cb69eedb4b536d926db649b6c936db24e7b790dfdbab552fb7f6b6d2dbdb55e9b832fd4287567333c9b567b69afacbdb497f6e266cb6736ce8666ae59d12c168bc562139c3038d7d9cc5f60ad53a9ffd56acf6b5d961bb7fcd271a76d9ec73ce6318f63cc6c06ad4c166a678c33c6b8e23ce6d15f743a959a79f48bb5a7f0ccd75eeda1a8532bed95e79c8e63b1582c26b3e546cd0a4a79e79c33e358c6b1996fd8eb76bdde0edb2d7bbd99735bb98cfd5a4e2693c964a88e73c92464415b6b26417b51a3598f3cba93d4cb04e7e640c9cd3d77983d166a7badb5569f77eb625dac8b7d5dcceb625d287ee85d6cd3dff2e775e166da5a15d38cfff33a1c6e7e6fe7f986d8cb336cad775bb621cee3a63fb91bbee41e737fb9d0865ad8b198428b9ccbc5b9b40df11830242d9a1f9a1f9a1f9a1f9a1f9a1f9a1f9a1f9a1fd76abbb57588178a2556e7dfbfff85e2dcdfeb4d65dbb66dfbdef340b9849efbfbfa5da7a2efbba993fe8173bbcb169996e786d47e865ec76d2b9ea882e80fb86b7bc0fe7d0bded7a47d49fb92f6355f5c615f5315418b7a9b6d8ab4ede75f1935abc74d9f459bc27b6d53ce6e0ab55631a1e73659d004ce7d67b93b6d6dbef679afed4695e5b267fe3bab234d0f5adcc6210963daf7ec77d6fe665fdbc66dc5155bfb0ee440518b796ba16822123523d3c4407364a252f1a05695aa09c72191a0c025620256897c4d01fbe268c46aadef218c072540301e7a603ce8d088c598429c58cce572b954482829d188599775b1b47ecff35e7b1c8580b6b94f81fafc0db643d2dededf148a316c93f734623462d64523a682625dd625b2605d20988960a411a34b2a688275d125152cd957d29126ec30c30e3aee112486b4a9091fe80be5083a521c017284e8c80f47823832c491228884202a828809221f100d81480aa22947768ed870848748c8861321d9a13710bde0871a44406c513fd290520d1f52c20da29c1651cb46251a897ebc38571a5202510e516bce20a59425d8f245bd7d82323a3931f64f8461db0f80f55a984d60c2941d82fa73a740e7ac41ddebb77ef6af8a7d41793f7b3801d82b60f64a4a232d6abfd7b77dba6d285660d730c76ca9a216881d82f6d14349434345434843b56e296d94b8322d4407e921a11aaf1a3af259ca257690c413b11e7a10820c62d41fecfa3a881d42475bd42f44941fe81cc51029845a3628098e19044ba2ea93694b58500fb6a8df8d2c0c7410da220111d235d07828077a077a053a68e75f15c4435bb256ce3427a59e33a55402390c1757eb9b23d98b5dcecf7352ea5eab755cf3c6558e733a39abdd0d73b9ab98cb9de66d9fe66d9f9ecf993aeda97c2b2c9585c5e964e954563c9648812438a95214a599d1911152cce8c8e835a9d24c26d5271838814ef1041668a787377252a75eedbd4ee7edaabd177739e3ace170dce6e1705de7e178ddf7799fc9e4e1a8a8b4782c169794c742752f2d2b9d8a8703b278acb9358b8773ea50f51d942da89616a7b325dbe99fe801b05ace8262ad75bb751c974d631b7e0d638c378c2bc618632de3fa1ab5b586b97acb05129b4abbe3dae29f6fa9b6042bc37624dfc199fd45f19a6ff6cdbe99b31459c2a6f59b790d651967b34dbfc588b6bca88bdb0285b624d11626680b5228b6c0a46cfa2c2c2c51a80bbf16d7ad811b8cd6d0ed88aee8b6c1e88ac6a40dfd6f66caa12e44fbdfec53faa27c52622c977dc33cbb40b4bc7636ed618b72894ddfb644ae852738b3b796bff85edfabf57ddfebde17bfd735020d36fd5e7365c6f2ea69695117cef26279b1bc585e2c2f16162396a38c2be83bd266b66c0f698cae5aec0daebe5459fb72be9c2efd3bba12fbf46f0816accb34ee5049514da15a822a089c4986a2756debe1dc5db445ab82a2ef37406eefa747a9e7eeded75aab075a6badf5eebdf75eef39dc75a04a496f8fb5962cbfbc35db789cc5e628de1cdc852a257d9f72de6bcff2bacfbbe35ed33e6f2d7f0c7b46f7dab3bacd25a58d16d3bdf731dac7b0bdf0852ec499d95c286a38333b87629df46737b52038d5929eef4b4b4e97967c892e2df9d2125d5af2a5a525bab4e44b4b4b4b7469c99796969696e8d2d212751acad8d3c37959439c994d43cc0214576dcb5fb8fc7cd1e608e79c93db3697d7d0570de44af3b72e2957f75a9105ebda4a6817b005945e73f4744559dc90ab09a06ec81afa262564cd7dd37d930c15746668f919502ecfd949bd4a14eaa50b18e32d608c8731126879975fa1e55ddedf254401120f5770f954888297777914a4bee56388f102a25aa72368fbe216bbdb1f15b630a7a076240c67792957545e4a7bc3822d9468ef4d4d4c327fe1d2e997d285495c6d31bb730245948fb862079b0ea884ae3517a9543402000034007316000020140e898462491084619cc9d80714000a65763e665c349b0885b124ca61180652902186184310008600830c8d0d19100079122883d1cc168113c1f2811d0aba3ca64d0a05d64e12bc6b1ff05d48a12af4ac3770637c94ca5f409c4f7e1e0d0f4ddab2f34ab934ece8daaf123991633243107a24e8d5103283d70615934a050d19ef7f1456024007870fee05c32abf4c92b30efa65baa3f7d24d78d0c483914c5b05a20d9d6bea3034b3309a0a96635c228dc522f7d097db05aa2d41349c30efa2cf27306c4655fd445ab220278c44ac44f0960f3b1b8a0e4ab6047129b00a227897aea47b25ad63b0281c16039ecf01cc8e42be403be0b075b31b580486ac8e2c2b272458067f7e56dbd5bfa08b236cbaf8c0c978d085580085b7477884c1fb9979ed8496e092d0b5ab60076b8a3f80a0b4ec30203d9c104f1d83d4bc1491b5ab0d905f289600f4c0a108e8e4fd18514720a7ef2941175c31729ff5f3673205b62bff41b7fe66fbb040818ab0045415067c14a09497146ead9a9d7849389db470ebe777402946760ecfe34840d134b5ac10e90ac4a1cf20e14677afec6061ea76787220737f2d2a1881e10e8837292153746a354fca49b421b9580410c0fbff8acc5f1103c1be9040f0abfb0dc3efea84fe523c8c918081de5e384f2701cde25c2b8039e50b04ac2070800961bf8ad5e9af83193adb5f20187a08de04153316b2284e6c250df28542a35debb0ffa248fca4f3a4d66f563049679fc18f2fd590c6558bfcbffb9ad38fb41e08513515126931a87ed5e7b640fc9e87735efb0bed388ca7de9540ba26fd47fb58bf5cef9916e9bc5f83bb9950c555943db92feb1230901e542f96bbcafc452377d25cdc74181b5e283e447a432826794932db45f564bed49319c1b8701d7a4b951925c50cb180466353dd4b0e25cbcac861b8f1fa211cbb74bd4b6f81021dc1d783b5a1dd16a1da081e1d076c6bd38a547a35acd5d5d62d35ada5ab01b60a5f53c6824e4175f874c0184076b5394823f5316b0513a887785e3058c2bf02f570d836c779ef01fe4aa1c2c24f43db54dc66580b817a30c2c78737ae310a8496a66a5e9093a5257d6644531d2009212beda2a83d162f073805eaa16ddfe404b555bacf83f50f02f540c989912fce49da0497e9c63abd26414263e4e7ed15c615a281188f41e77622fbb342cf5ca509d4c3adb7714025118dd138e711a887c22b4cfe4e8f9a3e0cde7ee17e21c7eabfc065458f280aba249c0cf030f7aeb598941d8a2818a9b7b883e71512d317724c869f5b74bb54564b402313cdc8d1b509d4435f6f82d98ecdd5a422e5c479c2640880b294832e9138c9ffcf1ee532e2b0d86abbf77b6cbb632f7dead12f500ff355cbf49f99d6e1ac2c902b730509da54c13abcb605c335f5e0169cf273175bba17dc51421b52c3b8e10ceece527dd74ce6a4a0dfeef91e9db57bdf8a2aeeff2bafdc62f20533b51c9eb33c14b9f8695454413ef514cac1554e2602fa16f3abc4c74532a39327815db9370f0d513ec6c532ef994edb55a5e8d0c2c4acefc8dcb7d7271ba47d3c53dc3c7336a51cc1e93aef2997947826008920af80f375720f866621fc1a338365ef22b2bd8b57c132f6643ca2efa64caf4d228970f63f676ad533fb5575e475f35e41a5bd1402c4704a31b2acdd84b52b31a2d9b3209c656f5fac7724f5b53c4fb0a35bccf531be0907fe882df53c6ed401e2d5e9a9b0e6b2c58390f0a04b98a18b5891abb35aa9bd3a2f7ac60a71abd6013baf52505c200819855daad35b7ecdab02fb46b64e725395ef9d56eed1592e89620e314206036957a6476891aa004de53f4dae8ac4439c4dab7526057fc06fd594717d41d466426c7ae9282a8423c1dc4952ebc34acce79383c34476ad2ae8d632aa931d013cb5db4b8268850d834b629c5dfd508adc129374c33067c150efa1548eb94924d89178b2ab223ddee67d66ea12301e9fda1e8adbab9b7dc09aa50d9d1c95588bd464725d7e38c2e0d07e6459456b1f88973d6c04b416def7d58386547f77d45d2e6591ba34a27297ce5daefcd09db7f32e62eb30ad39ed0ff53bd4a0ee1fed6c6e6008263c0be1effd0eb8da475190a07c3fe7412ce87c5b8882fbc8678442e5e22d5fcadbe6804177c0c1a3744d230489bb69e194d10f8633c3a50ad2b9a728da893bf5b701442a877f8ad459bf82e0d8916d4d0b3f781739052362412be06a040bc00cd7991e815e5a96394cae9553a65df7986a1615a4b371d92b508419b2bc8ae4e0f61dd5d34e775dc7289ae5ddd781236423593df887a7dbf9c685bcbacd6ebca822dda49db834d5a3b2d902a58750c4eb4a87533bbe9c85ff01303782d44d8f1976bcdddd406f9c71bc6218b833828058a7dfeaf96bbffd0b986c8012107603c0d53e070c1885c98b6520d5df0ffbd707bd324a3f6ab01131aa856eeb80b14be36518d9f0736578c0d1ac2c6809370c14948f056f4a865fcd75947aaa0fdc4b7c6bcbd258bf081f3ace41898e1a36ac532385498315033b7a01e6a951ed754b7296192e4abfc1ba81f4aa669c20c419326ea19130798c46183e98178ec57e1240c2afcbab2024dfc60ab52bae68e3f131b4996b8928606ad8091045cbbcc3e059e135dc704c3daf1631014825476071aa5dbde1dd4c267857ae112e5a8888095c26b3736d242963e4e082ab91e3a9c387730b4df08cbebddb0ef9f8acb00f77348d2217dd0caba0fa5ab7acfd6fd39674e26fb526550e55616beef0df75c6902a98ea6c9541ef3f9857002492e8a5ec61b1a286bc2914329cb0b97c3f219ea885271378fca8217f4eac9ee0587092560e3dd30b4f8abab175671a6e5c52d22a5eba90b92f2f77d1f00107c15a7ca57a8c344268c76346c8c27388637eb5d4f5a7f28401b38c29d3dddafa5efe603b2e7fc1ad38c6e866f185c485546f1489b0b0b5036cb47777b37e9bf0a3f068b21bfa12c4c4a1a6905370241f7538afe58ba19f406160bfbb81709d66270158c9c11cbec9b85494dd155a911ce0438a3200b614f49162ffbe1f6233f97557a0e1ec6e2470ae2c668719aad0505971501327a4603a7a09d14c379f8f28eab0b9b83731390b6e8fce4ffb3370da806e03704635b4ce59dcb9a89da783547609773e8c404f783f8e6da54b7be422176e2cbbb6f9341eaa2374262dd5d9fdf7c7a771a81be0fa1fcf409edbcc0774e4a8b3d8a0cbd6e6549c58d772c7c759db953e6b00206c6484b2bea2b4a2a10b73a8dcc8f7527588d5ef6f8d62d823dc5cd69e42fedaee4f843d4329374dafe114ba4969959ba8dc6b9e4cba00d15dd071c8aff4cb451da339720e07d10dcb4630b355a8400d97bd95706486b763684829d6d79ca2acb8e50dd8b5eea4e58f08eaa33105d29df31ce0523b10b0ad7cf619f034e16460daf0a140c93cb9cb64edf7880786651b1edbd96f628eb9d9a482d3bd5b5fa010e2d8819e965b8a5b5cc0523cbdd8d8d8b0e04a5a2ae7efcc97c685df1610276c6ddc9e81407f4743dd0b3936206ba1fc9050f1afc2a28b2515aceba4059840c8a5496f615fe0a63d4a9a4ca4e92cf7e36552b61e43e6dce4d68fb4f5f840adaaa8c644d281fbabd42df6b76629d56ae6f6f0b773032dc63cbbc625dd47c8086afbd45d9c981cc9ce4da22570ecbd9354b973ead1177a8d5a391015aa3837c78285b0afd583250ff1c5922ed42996f90ec6b2cc7233a16596bcd65a6600c81cc3c88558eb3b3a4527f060593439408316a4ceeef0a8eaa7db6468f33f7bc9b4987b38fa6cd560fd200b4f68a1f2334130a888690e7ae32e663f7ee6277bf1922c65a1546a6d9d331b0501edd13f4ce7c1f751d500b91f69c10cc4f96cc89d5a549b4d2ac8e17bec99a622aa2e73db4f623627224d89b79b1f57fc55e81953c888d6789a9cd6d10ca4737f56bdecd1628ae6b381105878d125a5003e936efda6141376b4636df0694a97b03850bf31579d74d86c6662d8990eab7172da27da981af067533a4023d0e9c8e8b106cc7d7795aa8c8b3f0f1b018ce429cc23745439a62950ef55dce8c078b8ad1f074f0c346cd529abe44cf2a7630c8c4711c48ab8699a21266882cd5f88b6dcd1287e49d3966619235ddc084b9a0114d49148b31575eb76ba175b4cd37b78728d3aa501e182e202313aa96ea2399abc95bde7b5578a9bdc8b7db2309021b50dbd8ac099303849af2fae2df8b2e4f7d0a6f886ae88e8015042fa31221d4a5108b02bf6b565987234b608222907292c1bc241bf36265046e0f246406eb3e153eb0c732e0a0545ff58d11f14a5145470eeb8f89ae67127696308ebc183120a2670afa8e839a1bf6f2d24306038917acbc8fd26b720810121b11b84f66d921b26c44f6056afb02ffc2f0c732f52801627709f56269e1c7b574976bbcaaf026012d8e2217d2124a14a6640df3fec18d91b782dfff85d862cef10c1ec478a60f9fae3e1aed6537ad67014743342369f229ef68fef76e9360c8c08e7585f9cdcc0e90570b828f4023d66d7492667408052a06ade0ca2b732745004b3511406f22d57410a478ed89b92c1eb1ca388e0d514236328b1dcc75aa4aa0657805b413c21d31a10d4cf7e42d785dbe26112bcac0b6cc641257a5f89041ee6c705a62ad9cd122f859768b77574a31cc4b106aefba3993777af58e41f1af8d0787741b82f4d90f1c691a2970bb6c1c21c446fb4d5f741a769ce116ca4c0cba72dab87a70e2cb3dc8f21c064b3f06c5a059a0e4281aeef37ab607e351e3116e42bbbbc0ca783e860dc740e54b7b62c8db156f1b54d9f92d85a6d0b4e8c7d1669ef9bc0d8a0c82da2b969a113d6c2cc32394b9772b6bba986c43382a8244823a5dfac2dd2a5f0bba1f7273214b33c1f145120da2cac0677c395401bc4a4eb3ef225ce624d61f3cc679e497a55b96aef78ecaa1688632ed599e06b5845ae786c05bbaa61ec164c75d438a59538fd74d5ca5fd057b1b2cb00632584a569e14da59df4c2ded0e968e5b95bb4746916a359b99dcc815a981081fade4aa9729b97474bee941c18c34f153a4b889d5e3080900f02dc63d511e4cb1f4f16f74d7a5a61d03f8caf48903ff8fde1b21239f4f24c797ef421961f505c2ffa0fd6094cb9837dcc474fb568c68d118cc845303009229858702f5c9d2f88881d40f8e130c070ddf47a485195c1cfb786c115bfef6da4742d22267386cada57f0226904c184886ff7d6b0db32f4f4567ff4dc9d29e34e256bc8733236f473f391ff8e3d7b7adbbf24b07407dc7f88727f20719e0fd909faa08fe829f0e9c68a98943a3e3bc3f3a2310f0aa4ea08f9c92ac406d71a5cc57b35d7fc3c6894ed34f791fe0bdfe8d8dc4b67f92903aea9956548bae2ec8045262dfb1514ff59e9467a2d14d389b01491bedc1230f10adc801efc6f836122f501f30d7751c588ffe6c063358206436792d92e3b1cb478441a743cfc0d9aea891de00c341d0a8cf1efb47568e1b03688aa8b78c480169f27fafd940f655da01d0ceaf4436308209a2b6bff9f19686d5324e274c64ab8462916a0b26396b322d57e9b1853874ebafb85c5229bdad85a7d5efd92b8eb441a39cc8ea3c35341c501890cf349a93e8014aa5bbc4d130a7531cc5fdc922a175e6861b765bf77a59e5a638d97f0bdf1dffe14a5c68c18b1f01ab27dd66ae405acbf2d08b345ab58bdb23a61fc66120b0efe3156b297d04e9f4f940468db25a8d3f9dce088e6a22740e32050ea0598ac12ac074f0db0f7d34709fb85a58791b4dd682d9ab38e1e4cba52e60e9d2057892e0995f08bc3504281c5435daf794bfcf0102040de620c1ed220aea8272644f977080438bc8cf1fee272ec5d3c63fd19242cf5d3eaef2d921846a0518c870490b6802e6bdd8a665dc34b5e4601e54fb8d859084dd04b2a85db284aec0695847ddd321ebed3aa5017641b721c908653a138f6f82585b984faff85b04cea61c340ae632d0bb6bafdc5f3d9f5200fc0b10810e496c46c41b1f3e36331426b3727151a91ed440755d320b46aadd203aac70d82bb7853f0f9ce5d8cc04b93bc70e3b9683bffd35662f63ab6f9bf6be45c1852e68e5d14828d9b135ec3c293768e80f3dc0cd7b04cf91d00d0eb313436cc17dd84ab513fb84bb471da971dd8d725f93eb26a7188be1601f0b47348f2e55da74cfb87a4223d11e8ae3685faf39dfc82f318e4719e7a25ae9f1317fcceb6da6794daa5bcf58070349207c7a9b0dc09c06814a7fa1db962a00b344b8e287d0880c8a953b472bdb2f354126348c31674f20ed0d29a5c9a194c0c10f7eff17964132016fc23619ee2743736d029b72cd116241bf6fb16323e00e2c84a2ac8a5d4815de67040c587d3031183cb046ecb554341c65004889545184a1cbceedf3b00e9c312948b518e640658553a05e9afb3029de76aea2ff1104782c6d47667876a4a1fb053f8f4424b8960f7c332424eb52f724d849c00c88bc9350c817ac0298246ebc44511033e62b3e15b6a1d862dc7365bb4e08af6d7b82f0154b09e9f48326a7b880edce4c12937174fffcf3d880ea80ad6c8b51d25896831fc830e347bbebda15f1cb2195f24d2ae8487c7f65516228c0842f8774e43ebca5f8300e0d111c701e140198191eba88414b8fec58470180b1a3a06a7af406807657d2b0c8b4c0a86c58652e930d1015830688993fe6835d669475008961faeb1fdaa477f70a285d6c2d20dcc571c7823a240cd4bd0a0a0ad9c57c480be56bcc0e7304ec08d37c28a0723bd3d5dfec6b81105261f68d2ca7690867303b091a92cef1a9989109f5466e5dea227e5cc68b5787d31cf9708caf9f1f27793359ca58cd2de18a61c39b9224ebece540b7a91d1883308f5160f66031a6a9595c0176c5bc27a445584fc0f87015e1b8c43ec87aefc1f30e7dc9b4a64b63782e48b241c4bef5598bc21c448095ef6a95e6c921c3da4c3a28c1dd452d5feeb48d284dd8456b183de0ad9238bd55af2fa3424618bd2591a41dde6d1c7449fb215ceb41221d73fe423ecef61805692599d77624c98ffe1a9ac13b16f3251702995126ec46ef907817816e22a1d855cfca565acc3e6b42a925edab29328a34903fb36c26d5bbb8b509825dcad9fea6916993dca15bda8145c4cb94a9d98c7ca0e04ac4377ac31d86835a74b50465ded0835e8bd595128fc0571b0d88a562bb99e8f0cc6f5279d4aa205e54595c73050ddaf29000385b24e646e06e8b95828b3fb8ed201ff164295e32f0b71a66d0454964632fa324092c6120544a5817aaa194a2fbe9328a0287131d6d1d4a5e77f527865264f87067b8b9e7af83e69bdd078c321d21d262a22893e46f1d403f0e8f47c136d9c68ba2d24844e74443a65580e82634e5aa435012a9959488c06d08d6b7d2104ff6ae41d8b58513eff46c5dfad314389bd6b1db9132b48ce2dfb43001d9d227c236d541f567066d21b8b049b91660cb83691bf76f79845e5d092742e92a902a79136bc87419e505e2e0d167d26a05ef4f4f7081c0b68730774a6ba758ea0212ecfb90c597797ea63b5abefb489fe9f12289f44d7bd50872304485099b352ee1edf1d5016d7867fd4e4ef5116f41825404908706b8ba63e121ad3379e1c4ad648295d6eba316f16ed7689943f052436dbeea661af23768b2052ecd042a2d3dd52fc2279fbbb883905f580262f882e714785e3ab174e8bd0c03266ecad24fc8bf881f4717f77968041426b349ab739e2e18ea09e74c666d26257468bc931ea000160d35578539558e62745356ab6ed4b9d41157fc1cfcc07ea72542940b528e5c93cbd38722ebf1a2d1e797828b49f75b04da903a9fa0a6ac01c9a4dd699bd71a418e55d1de86baede78741928155fd52a0e69b4d9bc904419531e56ce2038d60f145da63a80cde2fd222bbe8ba80193316f5d06b976466408ee4c5c4bfb7cbbcd59f0e1a15088dc29453b16754369c558f55a84e30c159c1221d3a5aa79db93c74535ba301bd666177b3db92a62d148e7049499dec56279497e4f81d9f3e0ca72f84e3c6294ee258e86e6b6009eac09050148310af583b26e1a2a3f8b2a276bbb0448359e4df1c2eee57e8348fe09bc33780ee0003291c6c45a7b68a941763a1acf014d00523ef4c76fc9d970327782fab25c81e9e1db91922721da59f7c23bb841fff8b96684391f5c43cdef3675f359151ddc2b633a74fce09b58ab78ada28d0ff216d8a0a76ee9ea7d84e9b177612a52dc79a82131b5619e712b54bc6fa8f7018c8b232ad9194cf3799a8350ad5e6a9bfdc2fa4e17656dc2d697bc631de0021afb9626c764aee77eea14ccfc53669200dc1b1b478a0e5df9b62be09999635db2d464cd8a6c661e697cb3a920973ae7e146f295a27e31863021f3ff140d961a7d3d2c7c2e27808f70c85002c39bdbfe91b385d9abd43550fb221fa9b75f87dce7931c35dc2a9f49c66d42050a2bb3ed324644ab9dcb7f48420223c4197260e7c0dc1b8c1f3ca92d001dc0e3f41463a6ed1ed62ded950bd6a65b1dc72ddc46a7696378bc7572346bc7797b13970993982d27af32fd9bf613a9cb5ddb644990f678ac0d58ec6d7504f435deb96da8f75419681753baa5fc238f8a91f1dc402688ba687b2f890e890a00e1e8e9a2c02f30409ca3090b1e9ea2d0b46a1c07f4d6183b24b62b6edba7b6c0e93806f8e689782260eeb31d803864f566905a5a1d2c64b6ec6811a00ccafbc5aa328c8a367125a237dcff751b9dee74a7e50b2cf183e617d8c77525d3d9817abcda5a38ee8101f04406bc7a67ef792b8541ad5d7405d12398a697c25e34ae591a5924b6f19061bcde1adef1e4ac04d6ac0b3d509cdefed8c72bad082422ce72a78417be58a9bdebb28d77711bd9a1c652a9d3d98b5085b4afe8b20e0754aa85d6a4042a83fa1cd27e2547d2d337b28d8038856ef60c83395b2b96e0fc9a9d39265de270b2ce592259855e49e3fbde7d46f6c87963b836181079aa6d9a2e8c0e9ff36e691c71090fb6e3dbd9874ac00c1bf89c19b7c0d6c0143550452d9debf4c30dd9220cbfe41fdb15b1de5eabb9b5f43e4155dd9931020128d2bceffb47ad6d600e0771f858878266709c24cf858b346e5c33c3d59b27d5b66c9d2cda994ca408be239b9726a08208c177b24b49651325aeb87d279690d8abc4c79325a37c1d095216ecf9855f971e8e60a0b5097d2ce80300cfd8b5e08550f45434ad86c42a702e423b30a511508c14200df59fffd042f0cab565379af6d62a45ad8d754533c3aa82fb83d654aa17a653c34fe576cb1252ef4d9a2ba2facb5064d8adbebc8d53f60b33b52e25d89616d270d78b76b7ac7a3f58bd8e85b7d43286c13021199431cb613871d10381820511f59ef8b888ca3ef58ea4ae203daf52f2017caace1a57e2a7f358cee6ad03d96fe4641952db775a8216c4ac5d10abba8650ec0316c7a273ef73fb1fe0e069eef434a8f0d3b9163dd35d88437d48ac32b0e1c161c663c2e297ec1890f8ef1f863263ddf73db0c13b4a79d3bcf59fb7efc97b5a1d9cff03fd7dfb822f71b07095a20a8fb762c3108f0743e982dc01248b99de12dd2b31ff3b9d14f6c8e8b3ae5222ab3bb27b80c306a8eaf506c6c14cf11fd4e8a7ccb1e017f3a62802d543b0e379758559381feb7e1f4b6ad9c13350a958f2fa62115e1a1c50393c308bf8f7e1c32c7b28681dff98cd3f2941dffb802b2e6081265bc99cfa8664a1ec89e615725b00f5175f2d38b036c48be76ae4c4c2692d216b839b3e30c3a3236f337030c3146cfa9f6df2208f3bfd7c50f34d2d80e53a35e373bdf112df7565f1cd84097f167c00bce68faed596259d94acf968f6d8e117afb3257416d3cfb004f1dcccdd19300431c94e2ced40d52cd72a88ba71595690f8e38a64bcb919ddd7ef9e1eacd3663f4bbecc9a63b51fed30f61235556a24360917c72e05dea403347e15654d875eeb2cc3c188259efe51e302fe17b981a1a8f53f62bd386ef01ff3e190594289ef279b1b27926bb4eb6d87e001d39adb5899196e5b242354035cc69ef7b44234d767e354ce87c85e5763bc4eeff354848231221fa3b735b2f3422d1ce59dddc4cd007aa416964bcdd29c2360702d1f295d4bb4294d61b99650f0e551e7521ba4677e19e916ac1de401b7ce1af09b490428a5c0977fb406d06e0d05282aeb64b12f6ba94318e2c23eaebdb0c5547084b4a3f9c672d0b135904b08960c8594eefb3335d425545ebe24e5194108a97ba7d248cbc1ceea856a687e66c5ded328c201925a78343b1f1a2b6745772fc66c14e6b64dc309d1f5f49b394202c65e39e04e5cdd45655cf4c577090cb8fe7985233eebeda51e36807bfe44ad14dbe1f147eb5997bf7b61a74589916f76ab193ad689345b8f73922da6f9108249d4b096431f1adf635619f8107b30a5d40cbafa9e6770c24e3d08407b8867c5c063e58816bbbfbfff9dba50a8c6b4a44c7303dcd8836b3dc7ec020f4ec0b3f5b58d2e159be854f3c2c80e6a651e92b2b9dfb2b6ebeaa29270d6a0978e6aa20c4ebbc4cb62c05e36080718229c3ad4b8031edf41d20da4a29a6e94d8a4a1ce0817c4f1d153a003886400fb1f07ee2d486849399e0bfb707920d6138066d68ec85a25b9d6d4544e33d87ba223ce194b955061c2bf17d44b1e7523e9d12fa123d6643af406ad6446a6aae1fdb72b02d7cb9ec9f66ae23cdb36c7ed9a043080e2941336acf7a4fe0de267fd09ef80b26792e58702f4b28052cfc877e4b943634a169f7332efe433da934b10019f21a6a54f3420093fb80fc1620d87e75ef654e0285d8d6ceea5b032fde76b93a9e0e32afdc18e48669f7057241c40c3d625752a2f111a13ef67eac8d78bb8f6840dcd49d7950a24a9f78dc11ac5b514f0539cd72fd045ac6c0303aff8b2cdb74592a8440e457c6852cae90d3cef7e119523233b4f6d27a8d76522294b38536946b72c895752231125e2fedff8aa578e8fd10df527fb45e68db5f1983818621cc9c244b4514bb40d5ca2d40826766632b1874d6e83929109f6b3a10452ed9280b112121c667a840259cfa2755af1218b39acadd63a2d7cc8662e502668cbb3ed42a06b2a269ad0108c7cefd415299b944e94f1d403db3431e11a89b08324738c6043dec604e0af294a652264bf88905b4909c6d941afb17df8cb799f11573ca743beeda8963f2ebad8fde45f967dc61fa334f8c71faadb8346c9d2a5819c45e0c51a1edb4e1e1422e7262c138504afe0bdf27c9a3866989bd38858c933d8989c81585b78beee04ce064f8b1b82bfc0d930da5f93acf3ebed80195b7af40493827232f6216beaaa7e31090fc0988134308611198c317024bd5cb07a53180cb0612a2b020bcb656a198a2b3b6f8bd27d76baddc49a24177660f7ba899dc878a08351bb7bc114a397cc50b6e8468197193312655c82e20137720e8ece66a5a1ed48eea02f22e5814ed74b5ec3a9578cade144e309fc48be7f8a60270d5818e9b4f793952c3d89df0c3967741b8dd4ec2dad1c71a41dd16f097ac6bdece69e40471ccc2738c1866ca8eaba74d8fd46e9dd7141682dc382df739528f2e2c3386d9f25643731a206a07e91e3281682c41a77175e8788fb831fa7cc5bcc8fe258b36c91dc3b9ec85a376ae0a93a0d4181dce148b6b63a184f6046153e451b711718b1cdbd37c2b805ba97a463dc023cf8151a7f818e66e87ee515d7c3679662dc3ae37e781b6bee97a3befe2026bd1336d3460e36bd72f240874493c747929358bfd5baad29b43da9e75aa30453e286b2d31cc136f01e4b5cb68d525cd053a14f8ad7ed56848521fe9bb6f5a1af8035f5a1a1d21beefec1cf797352355620d6faf64fa3babe1fcc09251b6a4404e2791ed9f09501fc10b56dd6e61c78c42d165f81be4861296bf36d2379438ae3f7a6645ffd1013df0a9abab41011b26ead993527e31e352a1d3ae018d2dca24505b508f407f35f5eae3f58c9ca3b08d235dc4229863bc94d57e1ea0719cb293da61cabe3d71878b2a21425ee18ffeb9aaacb918951188a754cd5549f636886c449948931b7e9d66870244906460ebde6a9ba8ca8a029119a0a0bbd2d9ec6452345a75eb70be1de31d0b5377c7286254770883eb54c0b8e3f48c4574806c9cdd370564822206282995f51923b39ca6a62a553782fbf8284e07c6e582c4da358f5e9c07da6405b9946a6f294d537c3fd9b5889a18a48a44f3e173974c25184d5e5114944dbf64819f178cb644c2239acc17f5c4fbb15c28a0e3090016974da97eb6a87ad26d4a63c52b8ade782d667b15c8fa288274deea9b9f6287f3dee429ee5a45e2a694456727c2148b508add1b68639bb477428193c9672b290084354d305b91a5953af8c4b9a5e4f74907869505274db9bb80e4440d1a2d032a3c33b060d1016ed2264791a5bc8417d7ac2c36a6956b25934911c4c9a1d0576d8f841805a41478b1652d326501190479b3cdbecaf35d7f19a3620feecd378a6caff1dc7bef0383fd543afe095e857d6f097bacfad6d0e874048a1c448ab9e9c551e44281e70e5e02baa2a81f0bd4245c700eb04325c2769db68c6218ead59611c927deea7988a0c51adc8cd4543dfa08b107da372c13ebc17ad535d9c8942ef0f47515467c69a1bbc229bffbbfacfaa62ed61659374fcdb2949b7f791dc39ebbe45916885bdaffc4e4ac9984643e589ba06cf9e26d39aa5f457b9cc5bc1c081b6ce8ecbb4785311c59d2b17c80cf04d5ca93d0781810e79b24c2499951f309353b091b4c6478486654a1df52e83e6283f1fe0014c42c8bb12e8a517f82d71d7fe3e977135f5bb9010e0fd831be2036924a2894d8c8fd2f05d8008d2619a27fa1ee738cf7c999cdfb8e3d26ff4232fdced97d73023038ca9f711363048a19d7a73d4e279dbbc7fca890ffd3f38abf0ea827e2ecc158f552ef80f83ec116d74432975a2de61746e93fdf0ff6703128eaf10964aded6ce293320142b342da80c43e0ffc25505ebf6ec17562aa0ce83773649b4a0d0856dcc6742a8d5ca11db971b7a6f16ff0c56ddfb6b276bbca3974c7cd54b019ed3449c42cc0a894ac40a5ef01263c6d35cf92545664d6cc33c5479a42a62c1a67ce1819850cfcf10c7fcd049372f62c15274c1b5cb700d2756b1664541b40b839fab1725152165eab9de38dbf0a6421eb95f87f48f4034f8c7fe444d9c42536e1873b40d68dab43858e5cba2c02cf34db63a77a04b92415475af6e60822539e59a9308ad31b64b689b2c2465f1bdf1cdc49a25e9f09df815f436a5263f3b27cdfd43ecfb5daa1ce2d58a890fea8ecb0a03f4a4d75e003a8e29877f3e3d02a743d6d78241534da51c481f98355288517a1e35495753b0d59201431cf300a626467c3ebdc5e7eb657cbeffc7e7f38f7cbe921fc77f10f84e04c476a390de029854d186ea137fddb46e5e89f43bf80dede165a9be999463c78d7aa6a924956c9b18b46ae387177030eccfa5ca9fcc299ed834f180b92c246f03864b932f15cdfc54a27110e41baa7e092f82dea80d3d3e24f561bcc2bae3cc2538220c89489ce0e94783749d30c95276ec3d34620087ad15dae4211137ccef9d3b935a8f6ac3833c1b0564fd1481047813faf2d932389f2d54c0a8461c8a69457bc499aaff45657cd802f196ef8376d7a400655f341b81e92aa8b44316e4c1987721eefd259028c812de0146216012adfdecd697e260865445de6732cd147bfe372f9e313a1ae9bd938d28af7d1071a3d543fff29eac9a57d8bd999d6d6573fc765a03279692a2f00ee68cdb84d26a177c204325e5dff61f5c46615ca99e01dc6e406ce9be3ea1fb2dc11c758499edd4b8cd7324363c7e98a37c8a17408b9f01749f04d24424d658f2d96fe4de3f2ea91df980ea0642060b939002572aa0a5caff89198db8eee6567d5169f6aae46db10395fca5fef067345ae7028c64c8f515cd621f251bd6ca9c27228f82f42b7cff42495269cf20b426445a9005b1483ae66ce14bfe0e61528e905cfa15d1e4548701d96e1829aafa52f886864aaba6dd1311ad59a52192f2c73bbed903af12647f85173ce0829aec12d035617daa256df4b9217f62c1afc038560c53679a01516923c6e5e9e3d2de372a43186876f1460c937236c6e171b4775de3ad5397f73fdc42fbd2e08ccae92f0bcc7356baa1074a8e27e18c03cdfa8d79460397cc8bbbb1da93f21662a91d8f810f6a69877c88bf3fc6fd5a73d73687eb585463e970bb9ed25bf06e53cb2fc81b3a981a27db3b005216b77671ee702819d0d7c80bf2bcb4989868a80b746cb274b0213ce7d33955f13419aca05be1adfd05df12e84a792bcd5f6c38476030f9c2c0384ea5a49f17ace0d3c3a8b69a88f7a44f1aeaf72050957d3542379ed589c90eabf897820f60ae46c66ada9c79828f1be9dd25fc41c4db9a04a0a562fbbdecb2f15ba7bff9c0c80b1a4f9f7c8ddc2edc670a93065c45ad9b8a78a2dc2d9939e995c9a1a2165ac733c4605d55acf9eba2937ca2cb0722c3eb3cda3b5d3302db3fe015895112b273d95d98e01fc09f96b8be8e4de60e830a02d352baef6fd9b9262dbc6e63fa3e22fd376d1d9622b59e0615e6833454c417f4c4627bea9663544df69b8562e818052dc66742d8a93d18ed436cf86d46f6fe234e88fde268dcd86121add067a6cbb6f6d691442b329fa69e0cc4d4b6090a57dfca59f3437d9cea51824f38e3c628beb2634d69b8b13814ddc0b77f00a344cdda0c93492dce092dc1b7d5bd5859a37d13a9865bfd83506f9a1efbfe7813c4f036baab5a4942c124e6788855e70022556e13b71c5dec3ef2c3f7525b4ade310f952467ab18e8ae43a1769a3543c384f05846eafde047f411f4d70764d8c99c2b44ad8a83ed622a704932e18c1f862ac56e8b445e74547e545b6f8f041582a01ef1edd8b2fe708bd4250309d0c04a85745b91e15c6e6b388e759b84f252cdfe3224579d38b7013178a712ac1eb1c89f54757cc429477d93f59a5e2136b75900b0a5d2a327edadab4385985d7d7efa2fac34dd6d2236e501edd8c74a6f2d816adbadf48b2b2a94dc7d3072613dd8a4c379c834fb6f1f9fe9882e79440eb34c89967b238dd81f3bb2a9f57e6427e243498ad78191d9843e559a0e9e373b949f09ce10a43e99cea442099a853e4d4496133545cd596d527e580b74ab352f60301e330dd5150a1b74f6d5e2cc736d4544ce6a160dedbf18c2dcdaf54695f14ecb3edca5ff9f19929d6278b90d2c3471a3a5baa80e72bf8aea2022c53a578da60df5355b00989a52ecc18ce246c203561c561e5a969f7e35e809a403713deb9a9cd25b6cf6892eccc4d662645ef96d6c3935aa063afa3d11675bba0db21d82166dac8e62e38b3cd8c4ca45a9eab41e58c367f5a1c1568473db72a66071c2d30a82092802c509bc4cba46cb2b2b26f0ef23d6e043e0e61e5e2b33ddad4bdef5d12308fae346ac7013cf17ad5dcc5dd563959bb73b37e88019f2b9a260e1baa4611bd3d8af1616edbc2988697c6a94866da33af0fea44b2591b50c0b5df42fa3d9c2b603423b37e62572135c16b3011724003cb7ebf70e6abb7f7a654f71096754b7ddbf15e60e1c9df406808e9b652787466edf5270b5a7d67205040a89fb4abd567d8fe1f04c36b00c6dba9a9fc430f2051c177f0eaa558c732ce8f8b7b4a81bc6508ccc6ec324fa29356fe4cfed07ed2b2569c9bc7f98141013d1a89f01d0136065de8f1a6a6ceb53a102a4bc8841239c70d7de4fa1d1a6ed2395d821fbc0bd39db7e1f155c2fed0a364403728e2c46750c404cf019fb418f6944c7bbcab90c38c7227202fb13cf87bd1a78acf019c09ed7de0a2da3579f1f735073e938aed5164ee202656c191679a7f37d205f594869d84989b2ffeb0791143f56c3b358df2d904eef892fc10ba63204ac63e44ab241b3bdd4163a3ddd5e9f24e24c7ad0170a2e490bfe6a4f6a80adc00115ee0f0d7bd7156e5a2562e32c7ebdd40d77a6ac133464949076e74793c4b8e6d7f7f151e02746e87b12554cc032d1eb4025826db91044070a13fb2e2f33cad45581383e4bd2bc423126ab28ed9126958dc5fda4940d5541bf506f0328ae9a0991822d0d20148ddecb7ded3f165a858c94461975469760bb5db6f7ddae16dea70ccb5532cb64583d604a2511ecf193ca669f3d22dfb859a81b171ee78e80c24db39e3d7967b3d4ffe8d1dfb1e84feeac7fe43c86bd0498ae05210bb39dd3bcf5b9c821c2864ea6532849b5b2ad47c46b0d693d9fc0302fbb7795d02e61421cd8ebdead1004c7b2841ab9fcad826e3527f1cb505ef44a81547eda38a94f1e0e78d848130e1bcca3d7ef8742be65a06e80eeeb659903a209906e1c5ab3389a95eade620f34a516f799ebb0a717c6db0b118a55585f4a1eed1fbead2d5221b1f425619ec048570fe02131211a678d37eb674148ce566244dbc389698754e50970f78a41a000bacfc536e6b6877a132546675e6c38f61f5d74d346b39d709efe48da4f9ed32d9d4618e6332fab4914c90896caf1628a4fdbcbb8cd47e04d8be9eb157495f319e6df1903602edc0cdadaaede08af2a61c65a6a5174c32d56c3d193e7d4868bd3f3608804d22a928c0fcfa9e9d98bafd8e7255ea942f033426c2e1575cf36d8ce57e922cf14fc95fd57e6997ed6b682a81c22347b02ab131592944a075510a09dae20e53fb59d8592add93b656642bf1552219fb22af19e363dd8e4516d5bab8942bac7c3e1459ba8a170d674a4830a8cb2a04342e144b244680a1177e6f49cd61f84d2a44a3e61766cf09ceddcf94ffee655779096935f290ccde5727ab4dcc36f38aa09317231cb1b86be47aca77c2fd7a647236bb5875f2461eae9daab5513e646d9737d985d696becafc86bda5d7d4272b33eb0b209d0c00b36bbdb87aedd92bc230d8da6ecb29d438913542b950f4dd7c1a0b499e98309405e62ecd7ae02268f6a5ec5cdd9e6fe19a9a1fcd9ae7f2ec1095dfbaf86f15378392ff084758a7e50a6273ea6d9228c82309e9b76d2235fda40f58c32dd3c47f057880931ecdd0c4d202758ff1fc2413ca5cb78ce0f5cc06cec8872fa605dcb04e3eb46197f7965d0072e016ee1378f20f3e366da74e21550378d132b197a822fd8d49d40392540db25e9749969b20076072149f3b95d97811b708606e7d838ad8a3db3653465fc8ac672fdd5ca403f3c9add82ba6d6b017aec5538ddd464e744e06baa3b63a6538c532fc74247368ed59d14d904643019f6a58ffa22366d151e61e57619cd70325505adba968124c1f71a00e44d17eb529f4033f085415c6c9f344b2e15d7958aba575d69f00103edf68bd8ac7b9f402e007981a730f016a04f3fcea6a615d5047dd089554acb70e3a233303378f400d68da91373c0f52d33e70171192c00cc8a0eec790183db6405b75f5cf32d5ef079c902ab2cad3a507618e8233a8462dc5b87ae66083bb50e0e7f87d8d0c9e8d58325c3f6c3dbfb2f4b1251ed801cac0e1a434a5bffe345a01ca8a90b1f54a6d25eb0147373437749cca8d942bbd0f265d4f51fc44378b13e999304647e4decb4319e38ac6cfc9c36f7f7a5847c284e6ec556486ef3caec93ed1bef6654c1e4b187bce26a17e55ac136b709442660bcd52ae6ef767acd66610de1d5b2b151c24b6ebf4512b99abf49a2bf244eca89db10e80f89c1d5c9db04c153c25432bf99a09f4ac88509b729da83a4b58735360d5565440bd09c32b136333596cc09da36b0bb72618e4a04826647643da3eef72e8b8053c07880a2e8169a5091b2641681fc35a87c4d49f052f81bd6a42ab92878b5e142048415cc6f2fd05f257617736e4db4078978657ed3887c92c854276e0de80f897975fe467d9c7a812ab94f5b27aa06ccbdb375aa92fbb475509d7bb3779c6a2e35f7c1d671d56029e72cf5549dcdcbd92242d4041c5bf995c4bc8dec1ad80d6eb15cc6b4ec34a7efa2f78d8a73b1e27a52ff22af9cd7e6f919a67f2df63238e91664faba8be786fd9d9b4ead36163778d550b346235fdf296c63af2bf673aaa910ad0551ee4f374f596a2876e85684ab0d5bb3db47287a848c23d91fc0f34252878e9948e5a6d054149cfa0e96dc2c33fadc0f6f55604ba2591aa79cd4d61eb1f1cf58e559592e5454d41a80d3f0726a9eece4ae0dd4db6d6ec8926de7004ba1a85a449c348c55803eb5cc43f52af94fccde6cc069f56bbb7b4c26795c47f2c5091d96698ab9e2aaf1932c47f39f9398136fddb179c3926e55b8a44ca472b676c639c2bb4e6b2ac5d98ac40a3d840002acfb0b206387ca1ed10b3d40c87667870040296383aa6e8110932140cc727f29b2a2e9636aae781f88d8871e509f0552cf8babfccdb11f8220e067542509e4aa3c914982057f9f358d516b60ad442152469cf007b5b763c6df1eeb0bec24ba215dbee21179a9094a39433d7f524eaf89932772a9ad30eecee03283e63a2cf63da91ee409fb5b9a3a3f189d13ad8c4a892be08f979f20308d1071693ce1de8ce6c338cd91384bea30c80ce7d2fa8edac974e1059248b424482cb508ab36505643ee31bc102f2a7b625b8ec07c0d9c9230a1b2e006ab15b9c66c179eada7779384b56198428457227c74cad6e804e26b1ce193bc6ed68346025fd4ab99ef00584fd865f580c55b94304c4a34775e0b4573a0f0b60a03fbf7226bfc358d4e38db73ba58319bfa011e7525f2f821107ce8551f0f1d36f47f9000f30f49502f8df888c059eecebd80b7dc0be8867b006ec03e8f7ef3ee93a6a39e4b618f78cc4ec4993f4fc669c8d349775dc6746bdbef86a71f8b0eeb62350f2fc41a89a428ebae8baf010b506b2b432905d2ec1b7c9076f37674c565845bcee05cf208129228982ff26edd2804b41b5201132cf3943dacc1530d21df85de4b4bc6a1c95f83ad21e1a210505381a86c63252a70c5659b0116e70e46bfd9ec0d73986c5a3eb911283d8741823cc68e30387bd3510f931d52ce4208c7954075ec953ba09d772809a427d5e6415fd3a6df1a6814540918ac7ab8383bf9e1233f5ac00257c7817d548d53c041223e73dd24de9a5fa902508d7698cac321c1831adb3e5568caf5fe997dfb299c74804b9a3837150068cf232d6aa2393a8ccef88750f76d187fc8fb96bf9eb990b98949c50af03f8c030243afbd50052efc8f0c9a33fd5071c31345d268696561775224453d041e14793b5501cf18aa9d9eaa596e276dc5900d70b4e69a2e069f1797583f81a96238937e3187641602a50c29aed0d831dfa25838ca6551c32f5e00b379cd03296ab27c59c0568eb35ae596a09a5df82e8da7c019b3127aeb6585fb19d6599777072b6ed0870e22597b80be5fc8c6659b53821d120847a17a916e98ee4ada24067919d30f4675ccf1e20eb7ae7a7ce8bea5e8819b584ef504f87512163849391ea146c19f4c4a8f1a12cc865937d67608492d597e32b6d7422ff77a8d1a07c3ead294ae0f1eb13d4cd24e80163cb0cc914acd25c601260625981690ed07e46d01f130447fddaeebcf4a092c4ba8f051407d9f334fb9ceeecd52e2ef9cab00c868e2da5dcf855208f213f60a259909b83b29e1502b8874d0def0f42292465119e40d45dab18f86de583311f559631c299320bb85c8cad47357893d913aca57ca7782eac5da0d1e0019214ef1797205132b885211c94c94c324af325f66c2dd679d28a68358bbaa974fbb6726b54f019527089e71e15464e3a3f5fc23ba201a01fc0a03b725e5e0a12046db51af0e8d2acaa03fd07386e808f8ae7217ce332589d1ca7018d2f499a91fff4fd49740f4d2920998dd73468ed29826a0d628741960187ae858e56b5888d53480d7c8dd7f2ddff61602aeed014544c495863ea52b472cf80f29b1171e4df383a04ba8e3d3d531607738eb9f93adbebc380401b4a94cee025fb43babbab4e06271b0e87da621c21e186dfbead9372455586c2bba8c0f25742a5769ff95feb042f84fa82f2e27e4fec9c2a22467d72d2b348e6fc1f429dd3e5219a7ad6edd520b2db76a458bad5a6fb1f556dcaa675b6e5eb2ecaf0f373f101a250ca55ea54a7b432bfd6a17e47faa18b40b7552de0aa158ed9ea909b9ba3782f5838d456e5190ad2125bf8b88202bbf02c677ac84f7b7b73facb58a33511403bdc2bd60b3cb1c3a66770bdf598b8be592cea7aaf9793645cbadb56aade5165b6ba1e556ad68d95acbadb68efb56439f4267e7902c861584d4345d957fbd3c931d7f416757052c34a79f5ab9e1e2d6f1dbfe6da1f0f4b8b41781046d308c899f601664f4de887579cfeb2ce43737c73a16c85962c18644d16345d18aaf32a11dc0d2530a571a1f51ca4567f97fbf0c3b90587544293c938830b36832c86d3bc6f89bf2be71d524d3368a4ad006b0ce08766ec25851759dde5b21e9a69b70682fa9bb203ea9f2553aa77a60da9b2210b51136a3cecadef40ac066d0a4b8d9425b6f2e47bc495bb77ff0e3abb2f19e079853208e311fb7ed9004afdcd47bfc1b683a69bb4da6c03c56da75972c1f729a1ecb6cbc2ab25f1747d64c05386136c26e4d8bec2dcf76ada7f5ba385e4edce23c9dbcb39c11e46a6d4ab02c834b7a3d566ea9063e2995b0af924e67f8fcf2bcd5fa4eea6e8d27772a9d06771796bf7121ce932cc958852f5e74f90ee28baa08608be28b90f81bbc997a1c9906c3ea87750c6934d7faf58c0cec4120f4c1c664104e9dd496d41c4ff572f709eecb3a0101436e88ac8debc10483b83ac0ecfe35ca9776c0c83c5d8ba36bcf3daa61981ead577dd47d27b4dc3d94be8a700096c787587453e67429cbfdf79ae5c77fe888d61708f0a0a31411982263b30029732d071caa8b6f12dc8e8db264530acc9eb95a32341e15e7fb1ebf73165f2e33dad7545d2edce1f9344a0b1b4377b7d75cc908307384df542bf661f9ffa3428d556bce07cffe68290332314907d718f913d1cc250131b72842e943725e40a613a78fc8af63bb9867260136e12c5d0649f14dbaf618d9ca18107502f7cb41195960bd6d18854a50500970acb473131d3a5af04334978725e3fcc9e286a5d43de660870a21de8a2747ec65ad911b417c274a63376b535c51788bd77e38d1724b121304f8ec06ead6f86c64298970309f237b4682ff3bba507c0e8986821d5fa20111d75b2da69816043500a4ec94fa365bf55a33ddba2411c61fd15623c97fd3ffcf4c13e91b0ddee61ea1e208f66378e8ff2d0c85f7549caf3d1420bb388b884cb48bba61827306e52dfcb4887bcce15e08e7f863f8f332e9ed58295712fd2d8763f015a6270450f51d5576ba3226b4219aeb8cdc45fa56739a0c67cfed2dc194689e6839faf773bead594feab4d8edda0cd70493e7fca424dd6c5506e5c6a1e97e4b48390989cbeeb63d7122860b380c7b482e8f03e35905fff0feace9e22e7051451581afa4f31d094339ac6dd7aff5515fc8db3c970c0d070e861bc34cb60769034c72503a4d730c8103b0c7840582e926da86d23d71fb8b1c5d3b73432fbd5c3967ee673e8094f62ad854c691ed30ea9003198d6095b9430a363939b5ebd45b1533620de9d0a9f38e67d5b671921d24c5ef0f118eefefa5bf6735c8329b4f283f96f2b6067b32dadc64524da82eb080a166eaa8cb7439ea207b3ea6a01d98b44f179cd7c75f5d1cbd50903bc0f44e2c9d9f64dd78ea4c7cb0010de26bde9eae3e7b4d48812afcbc35760caeca3a4b348710113d2fa6cff90883ec82ad8a0a68c2f365593213fba0d9880e6e603ac9c93292f5342a6d2e8ae9dbd219ad916235b9c265fff424f88ed6a417361eac25153160e8db3647db285fe141344735f005eeba12050fb87ec0d39ac8c5b0cfa451db483d977319cb0c14a5f9d1a0b193d36f16dfad9704d225f23ae4f879d1a43817457811f7b09b4e9a25967c9b3bdec9a4af3450e1c7e40757a07b00eb46fd1ec38b3a710dc8b47dbfdbcfc150d57752dcf3b7051b1e5cbc2a69d096bfb7ae4a4c87aa656e854690b39f265c51d0f51249753b311810d00f59425a9a4c7dca77796ac713dc816df98368d4f259310a85bda719595baea8ab70a3f0d45adf6f7761a0ed19d9d614e51d473f12e998c95193faa035d9bbd62a101524c478b304b2ed8c92f0dd7d5dbb02733462a4c85fe73cecb269581c17947e996a9ca06a2859558a354a8d5ee7f5d0a2e0939eb6e0705e64c90301dc57040e24c4ff0d581050e76c739b4e17df07c77d7d078e9d16f17b98d61d28a2ca35a72beea0b734daed3ebdd9a9cf8e04ee9a688d1b3df4c3d41e762a6e2f1afaa599d383570d366e3fc40b7a99efc8224dbf1a9009b25e5a5ab2a594524a19540d6d0d6e0e4e7b8461e25bdce59bcea5a0bd854198ce7bfdfbf5f9c333b9e771ee3de726883dcf83e9f845f7c39c728bbf678a786fbc373f7764f486546aaa287b8f42f12b39fb36f70ecd9f09e57c9be5d3c704558739ad4a979717977205d3c19cc074b0273027d57ba6ea5c99604f60ba0b7b027382e9604fb2a7624d699cfd9835a55da294c6d9ff79ac29d81398cec64a15d614ec098c26961618399813eb0a16163096b0a6604f74e5f836bafdd74587bfecb940bdbc28a55d32516b59fe3a234ac964c9a1fbe5b4897273769476395719e5b9ceac64a2543251e896792d3767bf8b4efc2dbe77ae32f2b9ceace5c52e3a74b3a6a0d0fd354b285953e816a7bc12d0fde1ca9ab229628a1355a088020a74fda4a4604eb027505154acecb7b1525ef90f5c813e7aab30110515af984725a70f8d964c4258287fde7fdfdfc5c53b7f0879e76762fcdf87f1b7437b66f97de2cdad57ee5eb4ffab7f1dbddad5e2a5affe3b6d9828f70e158b10ea8f674ca17f05fa02bd5546700515f158d38232e97bce511a2bb9f53454726b3fcd95dcead9099a2ab905054d546ee1df364094e3db00b1ffaeca245c72ab5804ba8e4d33a45418fc35a68919b99bb3c35f657432a8689cfd2fe6d538a4f86b99448ca99bb3695c72e8fe1a2cca9546abcc754616743f69ae32aca0fb47731597a0fbc5281957d6193fa0fbc525f963223a41b7380494293b07ed174525e7f59898ae93dee59ef9195363133679962bf2572bf2572bf2572bf257258957ab8d921e093e69ae2f272bcc5fcaf2952bffc5a47c71294bf25f56ff5296bf23bb942fff42f25f5f5c5cbeb5ddfff2302fdfc2bc1ae7e55c5b92ef5c99a3c9e55e4d5e4d5e4d5e4db68785098a83406f95295aeefa9b2449d2db66520e9d2437f8de477670dda837924fbafc6aef15b97f875e91dfe624d5dfd5cb6a45becbea5dc8fda252fd6abbac5c48af3c5796e9cb15f93b7cef5526399aa30b221a9e6b7ff059f93fefe4bbce04adb55e71923ff96ab222c9578efc171372a522c9d5bf98ac9c14d12a72f5ab73a57279d5ea57ab736db7eb4a2e97cbd1e472af26af26af26af263a0fca08a8a029a0b7ca088640c5d59da1b5d649abf79bbf77729ef9e73d7adfd3d848f15e831f8e6388c7108f211ec3f177e8d15c6150c6579949e2df711cc3578daf0abfcd4534bae68026913fc41f7f47a6b161a21c4f5548ee98d6f149733ccfeff3077b8277c2f33b576f469f1d4aec1aa6833981418139810d017332bf1eeec4f03c5726cf833d99299130beee5ad45a9f5ed75d77ddf5f7edec79f95f394fffc639e7df5ac83b8b6814ef294f680f7aab84a08a2959bc2957a6ec3c6f6db7f7ddb461a2a4b191b28aaaefbf26e573c5603b8aa2289e6bab3ac1ef4f92a3b80acf76a3e0e9cd941f1329d4833971cd614e20cc69f55eef7661e881a0863d813d813d91da4ad0a1e2d6df66149bf8eaacb5d666521125fc8b68349f77a36d5ef1d9eaaf02f5416f15104085208a104841454de60f01141405f456010116a4507e4c8e66f9f95cbd07c573157fe5927239cf91e75095ea5cc7732d5736524aef6c37ea8de06bd3464a299e349e43bdfe2b535278d2d8305182a0deaf357f7d7a2e88bfb58ecc5d47fe7ebe9f4bf17e83bff7fdde61544498132a86abbf5feffc7be7dfe62418b9f78f17d1e2e3fcfcdb6cae1985f19debaebbeebdc38001c37e18bf23635314fa1ef39d0cccdf9792ef549beff6b93fc3743027fb330c0abab6593493c2cf01dd1746ce3011bfb8c28610619c49e28730604f6836eb078c9375ae1915614e68dfcf8958b9ccf31f58bfcf50fcf057ec42d84f56f8dff3177f7cd5cffc0b1ffb613a98130cdbdff7de6b9b579813fa0df5df67d27e181ff6cf45ccccb778f12efbf97f3d0ff54fda3f46b47fa97b31a31af7b97e1e155414391aee2d9adbdc3b84b1236f9813b4bff7dcdc2c937c1a537cfc9ef9fd0b7334759e7961ba7cc34317e7fac99ceb1773c29c6bd9e27c39d71997b3d4a238f21ce79d73bcfa4c0e75544abb4addfacd8452620e5d25ed73ffc0ab93e5bdf8fc75fabfaf1ccfb5d4a1ba84caf5e72f9a3b547cfee09326cfdd1cf193c61f232afea002214a28543fec0986890f3e4c975b17e6847ee6f5f6defbbd7ec29ee01dfec5d0601744904247f4e5a3401005bd81de2a4d4c5569220abd416f95269cd026aca0228affa543c515577d7fee945be1f7e743f0270e8547e1b91e9ae0878f3dfe8316f14966fcaf1cfefcfd0efdb5993493c6cf01c5e6aad16f955fe4f764cf2b9757130ccbdf93c81fbfc72f3f5cce35a3e3bf9c6b46c97fe9d0fe18e75f71c718e73389fcf1fb6b74ede7bab1c7a42407fcad38a0fdbf737d0d81aedeb739297f0e683799d06fdda8f721fef07c394157af8846578c923fd68ce69517e68ce2ffbe737de950efefa75dfabfffbeef57efd3dff7e5c72e308cfdc9c7663f59e387ef8541f01f7ce0ef3ff0f7f0bf7f31c1b0fe5fee3652ca36a37d6df34b87862539f4f2fd4c7a09fb4bf885e18bea3b574c7ebfae84f8be3e46a122c93b3a9a172549b36f94343bdaf3fa62827edfc921fce07b2ed87bd00c5f9b2d7e9b30af5cbef1568cf2f263f02c28e78f31f8abcb9505cd9804c95f5f4d30ccfbf25c5d575c52683fd776d3d83051aeed0e1f277d8861f8dea3e1c7a40f7cb2c01fdffb7e450894afa6b27cef6d14ffea57d7159446bef7e2c3982dccf15c9dabeb0a8a3f893c57ec913f461493af2273eb6a0ae5ff6a8261f85c5926f4fbf1c57fe572eba2e27752487ce9d0d0bc1c05cd8b76144c82f6150128df2828a2dff96a8277bcd3fc609e62b488351c30b1de576bfdf4b4f7d69c73ae7befbd6bec799e5793022da575396a6a0a3fe99a14945afcf83e316fba39fac4fa6a8df914a97752f9a98a9426a89aaa78a0021adf75f724a0190941d4ec9084140c1421a5082949f42b528040a538910281242ce18225ae486962da9d402a0328d8a1033a3c1d6189231c518a2e30c11551ca882598e0c01223582205bd054a00015562084a1c41092c96d86189dc120cf89fb1c4138d031e8c31c63a9c22ac22bca20cb1981197bcc8e486c74a283ae9d0ac45bc55f70909510709cff3d3381906a62cb9d6d8d34c2c19c2079fe87befc393bfb8d249fbf5633d14a2fc4530d47b46fbf7df3fd4b9d6fcf333f5be7f489fdd876d8a7cbce450efff799e287a9ee7f5ff6aa494eb875eb9354a94df63d58bbf3a6d76a5ea8442bd176fbe8cb599f34a6bad32f7de7bf59c73ce57bdf7defbea456f5499354d94e38356f2137f9a9a8a8af27ebd52ab96f254674d13e5f71ef96228ae5efca195ea3f5475ae2c4abe78b27c7ca8f81f2a9e2b8b86cf845e5225fe90eadc79cfa43a59aa5f3d4b7c267475b2aa93470c4a9ebd09f55e3cb39555eff6efd07d556205a569f149fe194e9cf7ce58c5a8bd77bec0bdd9da29bbba4ee3eca77ca3cb2e10898b737174fa29b77a947e81dbea1cd82ff22717077e7f8fb21f478fa25d3aa771f647c92d8dc4e6bf00ef5c35da4f0dc47581bf571eba791419677f14514c314515555cb1f73ef5c9afd4eccab547c9e56a42a0ffce58358a5fbc49a898c71a24caf523f9ebcd398f8ae23a5def5d9b3d2acaf33c4f9bdff77ddaacf1c05dbf1a0fdcf543a7a6a2a2b00eebae94d4ca835eb0c603a48f4f9bf859219b5d897f4837e5489647cc95dcda6f258af543bf6f9fd94a888598ff0eed20190606f7c71806467b68d737e7e67c687f1e5832881f3e911882e707f29a292528cd21fefb41d2071735278746b4bfb82acb12f8e8af3f7f1b7fd7f34ecc63a0b5d65a6badb5d62cadf15facb5d69ee7791ecbfba4fef9bdfef97bce5aefbd4dce39efbdf7ee799ee77956ac4c4d4545d52c316a1e31319ea7bd310871b766f2cda77f983f1a27f7c8acdb72e1de883fda35a371f073feeb6aeca1730fed7a8d93bfc78feb4261c5bbaa8887e295877a49572a05e1949ea922434103cdf8b71983ae5c3c93f4ffa3fa5c85f6f3a95b042b8c70c5119090042d4a580213b068c2169e45930e3b2460090f3df890fb41010b60c2809b9385f0d99bdc9cfc42f8ec40dc1c3e353535c59dc838f9ab7c50cc549169e42bb2967cc3013084cca7388fa90f07170b114e6e9073810a72a0c3150be01b83e0cab7c405960882123ebc311ffa9a26000325a040cf95099df14419365da0f173268736c92fa9bad138fb6f340e46552fe6d12383a854e4ab48d5efc89b3c874254f5a371f6dfee795fe736bb12e692e487c92079f5e4afc8df91fbeafc6ece470aedcf1b4172bf7773f6df9bf3bdf88d61280ec1e49bfb2339748373b8c773cc3743f7def07b73ff924364480eed7d73449024c11fc92038a148ca508e771cc771c4f953914f861787a16af5e5796fcefe15f91825cf1587aa4ef23f941ccf242d722b3c2b88deff5104eb933785a1111d98426f9523acd4406f9523a4c2150cca272f9ce18232e6e902795f8ad9333908963121cbc7daff47f6de8b4157566e4e085ef043cf95eb80661fb9095dfbb35cf0fefbef4cfadefb8bae2c17be071f3c933c960fd44373ce53bd09e2cd57ebab84134a3481aa59092594c0006f42cbff6074aea123d1e480174838c087992824228084110e408204489c00892790c80112424082093b1f7641ec22b0dbed50b0abc1ce073b2624914b0202493c452009100491440b929822092c94f84189274a04f1a2069d08d709225ca0a5092b1ce0440d22e00acf9ac134aca77922d6d3d03c0b06eb61641898bf2e7ed5684ccc11a58c8fc17222b7d62bc58262b1a272ebc56f2968a45ca06476b935fe7e192932503253722bfc7d450a746be9cf7aa6a0dc4f960c34cf7a229a67c9789a9388e659cf3a95c4789a9329a89fabeb8a7ef935e60a8dd4cdd92d7ebd5a647632524c19a87cb33f3465a6e49bad319025323ec68b364cca3d009a3fe107c07a18381c4114e3613c0e324e56ff139ee64ff88c438c139ee659fd243ae1a461113deb619c20089afd2f396aad7f95d9f1dc9a1f7c32bc68488243f2cd064f19167737673fcb14f502cab547117717084abb58bf3ddd9571f1ba8be2d61e2944e66a7050f65f7b941bb4803a8faf4d2634ab40f10a78b36abd01cd34e6ea5d98ab4665ce1e853fa131e6ca7f803165766bf962ae3452a0fb5dbe5cbe3157666fca374df9662fc937fb9bb838fb830842082188c8f9bc1728716fd2aebdc79c168e65762c285024557faf4bbca271f6e72857b4ea5cbf4f6719579ae8270b6a49b95e2d489434e5724b6726acfcfdb509630384cd0f25cdb3f6af48f9667fcb3259501a673f8d79350e2b0add4f23659380fdabcc0edd2f5ec9ad1c85546e65c9ad5c456e4d895a340cdfaff9fcce9bef87f156f5119565d70dda778ad59964f2e740bc33744e4f9726ff7d315d4cf1924361ff8db340ef5028b6782297f36a1cd513bd9c5ca77154e3487e2785c8b22c3f9b353828cdf5062ddb5ccdd55f12ff369950bc1a5f65ae2e2ae828fe6882b47dae375841f77b1ec9775194f9fbd0af054c0bf3858952af3b34cf8dbd3ca5402d4794e28fbf8a3926a52eb79cac769c0953107e51fc9129c8c3a2677eafcd8e3f3cd75512abddcdc9e0b98a3ab1495c22e64426f9e68657b4eb6a9ca7ce55e7effb7e15f1677226949faf26a358eec0ccabd8e8caf552592f95f552c158dbe46c7c28c7ecd954a0dc2ef0c327023f3c951ce1bd122225e0c9146443019b1d4a183b0c00a9c70b3d80dc28d0ac7972eb732b6774c6df5c1b68f632478298c129fea452c2bd404dda057e0699a02b38b572a7ff9e29e85472c47d259f4b8a280ccf1b74f111e5f7a04a8b76dda0fcab25aaa7702ab7ae00b5805828013f9dab6a2b41f574733238054e8157805a402cf24d06a7d00c964fdb02227f8c73d65aefbd37e79cf3de7bef5d4aca46025252566c24e0020ca87fa8f3d73fc435f7606023848d103642d8086123848d103642d8086123848d103642d844c0c6c96602364364189b1fcafe5ea6f1ba548b2c1768b7f8012f6205d59b5a64b9397a0585ea35b47273f4f39c8e422f007c805f54c088e60fa1565121540815428550df4e8b76f19cc6c1afd33939d1ec6a3650aa6a8a287ff0e229ea05113f8029a350fce2e9e6e0100ac53b34419aa7fd01299b2038b70902a683e9604e3aeb6e8a38ebdeb5ee5ae3ab55fbbbe9bd36b3eefa5ff83929a4ff72fe39f7ce79e77de45c777def3631d79a6bddb5dee7bdf876a81df889abd5f771fc63b532f3efd575e5d58404c1578de35786abd5eac599affc56ab57eed564c59fa0a84c1ea50927f37373755d71e29142a0598edf4b9749a1ef1b9b942f5d4fc107650a9407bd553e20050551708ffc21a21863f207ec09dfa157bedb917d88ebf763a87afc34d5b99227ec0939c44921cca39ca0a27f2df3c39c843ef3db186f069430274c5e2c9f78c4500c54144fa10dc5751ac743f57b2f662c86413c2acc7ac96a889f302721193727ffe5516c50ae32a4647c1dad89a2c4b7a37746c65388fa3b50864d48e5969725ac2284ba40f8330cd0bc0234f3a71d8fee99e11553d8ece815211651c2a928ca704abbc226aec880c1cc0a4aa0055dcb1b3ee5895f80ab6e108dcd11e57efd6153b824cc854cc0281728ff7aaf6c2d1b0bdec497e4d68783f2eb9cc35799e7da6eb19be087a6f8993c57b00830cacde13cf7997c49bee1ef81e6d6e29143380b5414c5f07bf099d0dcfbd612ea9a70f1770c054c077bf2f940bc33f0f8f7310e9641b97fbd41797efae7c791ff7bfe798320a8dbf7e61c3491434df456819a0215f178fbcdd1384acd0dca75856afd25cce51e0eca511ccd357cfe3db8238b2613dacf7c458641836de502c9809a12e584d3146511ba49a465980ee60473426154603a241df711883c97f3df954e9bfdb3b9b3de599379f71c7ae79a5bd17c6ffe9befad7f9bfa59f95fbaaccd570eefbd8b7868fe8dafceada9ddd45d5d57d0b5dde8c73fce75d7dfcf8b77f6cecf5fb99cbf3b2175022750318fd98583d0bcb3f9d2690fd54cf8732b7fbef1f05e4244effd3b23eec7f8e646e35ca2ce77ce7f4354ebd7e79500d65aebef3dcda33f4febad794d13b6d69aefd75bf3efbdefbd27fabc10e4dce324d177fe78e7cf9745c9f9de9d9ff7e6e0f776b0bbeb6e02c5fb3d4f67a71820959b9363682727cf09a704942b7ff2bc5d135bb66cd9b265cb962d5bb66cd9b265cb962d5bb66cd9b265cb962d5bb66cd9b265cb962d5bb66cd9421384104210d1317eaebb40b96ef2cfff8917efaceae6f8e2f3103ef89964c930bef84427d189f5cefb1bd29a975eefdcfb6de2a04bd630a1cc21d4cdb15952ae3c0a9b253973a69eb90bc49bc51a0e98c0043230410dd01922c49b6b76e052353328571868f6d0fc18669bbb07a57e7c5e9b0d943c4f3183030860315a6305b622632bb0d6d80aac37b6026b8eadc0ba632bb0d656606d05d656606d05b6a2c68aa9a91a2bbe2cb475fe1af04096f080b3f2303ed7691c91c7bd3737a20cf7e2904236bbb21c397966b24608e578b2787494ff28eef1bbd65af31f553fbe388ee3388a7f55e373b3a31c2ab7303a8abd8be7688acf773abcfac33b2d7e163f141f14ff13dfcb1d25c5f15cf78b2787ca37f8c53377813c73b7f21f780ebf3649dc9ba0f301162c58b060c182050b162c58b060c182050b162c58b060c182050b162c58b060c182050b162c58b0883f8e3c68ce959749e2ef95dff1ea707f8bfd2b5cee87a1c5efb3243fe6f7b9c2fc582589cf45d1eca2f82ef6b728c5219c05ba5ffc17c82f5f7373e52ff257a9f8bb98eb0d5954bf4f56f9fbc5e726abfcf139ab3c7988ff2e3f8a7b14c91727f2c53389fcf24c72f1fb4599b3d5688be72f9e6bccb94ffe2dced51e4755f99df4b16158553fbe78ee7cb3b63874d287866154ed9dcbd2a6017baf3764110a5fa471e1b1dfa52cbf3ccb972f4dfe62babc4aa57a1795d3e6e6dee2efe7e63ac34dd5f3d234825197f7f68be28b3764415ffee5344b73c5a8cbabf8f8dc54bd8b5996664655e6ba3a69dc6967a9a1a2dcfbb9a9121f7c95b9e64045293a85e61214ceca0d59b45600153e40b19356001554ec563588b2b2dbedac48e9dd6e777795124451e5bbdffd76c01ffe14347c992a253002d5ffa24ffd9cfcb1592c9e3552ab1a299b060c51822156ab526ab7db5ba846aadc7b550226bb5d7983959d959d951aa9ddae464ab5a5e60735596a82500304fee313891f3e0f78e7fc658ccb8df1efbbbfffd03e79eee4398cb3de9cf3ee8123376b7850e6d73c08f70f7d99e427cbc7f7fc873e7eb2be079fc59f09054ff63b799c3727eb1733292454230599372954238546f7de526c7473cea5e0e8eebdf7fe799ee779520c79e8ae91a2ccafa5a4a6a6a428b18f4f3cd7a4a113d07e02ba776b99831a1d443da12aa0b70a0996a0e2eaf2c77b7f7b7b68ff1cd0ccdfd3fdb7c9bf0b71eee1cc44cef8a2e38be369ae7c278ea6cbab9e87f257af49960c2eaf7a2297f0554f549e3a11ef9df9a77bb93db2c74547af3fffd1c7caffd6e0a00ccf15fcfc5a1c023ffc1d9ac677f977e87365aaf80fa8fe1c365398350fc43db4aad981de9b73ce7befddf33ccffbbeeffbbe9a1d8064cd0e4afc57dfe7bf33cea48fcef7d63b676f6ae78477dee1bf742fdd05f7f63e97cbe55e4c5eb9571328519e425de884f233772577259763a2fb767a87f7238139e51b0ff5604ef9c6f33013fc14f1ce6773853941f777d813bcb3a740f9e7300b1a4ae51b7eeef4a34c7a3fe64c97a90665be28bff9e6e342380df194d32e952ed702d18a0ea142a8a710ea29847a724108154e09a3422772ab875572ab8b57ae44f1cabd9abc7246440977518c087731788af204258a113b2950e1aea605207fe962f014e5094a14237652a05cf0f404258a113b2950af262f27215494a7904a9427283b2342a870ca5308f56af274828f69ce5c2ef7f46a128679f46187eafd5c8a87cf19f4c513a62cab8c2fe23193f81ccd9a274af1fb4a4d5e149b17ad794199043eff1d596ce972ebe6561f427cca2df17bfd6214318a68c42786fd7361a3e0f93b74c7678845bed1926fc4262d2116b829b7565c5305e4f8ec353028bdbfa1969a17949853e155b8150e05cf2dfc1a5d31bac29ee01d7c0e81f6afa1bc55a0a008b538e5d61a6ae154aaa0fa7995ecc241fa432c72eb0454df2a504c564e05d5ab98c33b2bcc49119d4516334c2bbbe2a7c11e952beec4299e378e991cc7f177e4574ea57afe959f7ebe5a9d364c741a57fd2802559d9ea99b684dbeca24cf95e9d5647ce5f44bc73dbdc751c348f1176b5a5026e132b7beffb7be747b84f93eee3404da7f47c6e8eb237d88a83e5dcc1594c6c13c62a0dfaba46e8e508d104afc6bb8ab4111280e7778b75a018a4f11cb6a05687f6de218e82587b0d405fabebf67da30515e2c287eb08bd80c3bdea19d3ba1df7328277fca3732d0ef65a0fdbfbe8a0a775ac9c9f9f3efbdf7ce4f1829328f4778b1a64a99e43d477b1e31e983a3de7758a2d079800c34855f8f18460b68e2bae4b8cbd0430eeb29e8081271200a5f8f1e7a2fc816c0193b06d0050c0c1b1a99143e155c0862a343f8e9783ea48fcb07106fdc1e0d07d22d70dc28fc0e0ff4c2bdc35dd83f3b30676993cbf0805f4cbf8460335f2803ba74602106d1c677b367ec197d034d29a8760b5c051e039cb161ac6ae8137668d2d9c19a88ced99a23e03c80034660ab21d1012b513b24360045014c3820619388a8b12107b0a18601cc200021405e08400fa01f9f1b9b1a00fc4e010ab22042155856d05173d2bcf09e5c5045031990416805a0136246b13b3105044744d100940b10f1340101dca0b0a32381081009c9028b2b8c6a20c2104850647369001033223a60a11725089d0bb4c19522a200bd5a2e60c4f0f360b03ec0840daaec470c01f0c14388024d7ef0e145ea860890e8c006986898c1d59a81d930b062841974e468815e4fa20710286181270e5800022a40010a00f123061d301446cc04bd84ac654ffd1072968d254bc1a5f00d720d320dc0198c506418e427f2949c84462227ed15cfc032be13380cf245769163308c4b0bfe925d70b95559e46106bf2f7b1ecf1b6b8cfb25579aa547d0b419d0b45fe8293cf68ea043998126ce47cfe6063a6c226a6c1723140e740534ed17409910c64c029a4002ec1a740cb1ede3c7e3fbe9a186cd84bc054d1f54138c0d3a802e3d012bc61d34f12c9af8484383a6b13d47d004cea8b207e33bdc492e467c01811d3693a695b932758ecbce37e392c50e5c85efc7c3a1571ac707a549c35063a787dd88262d769a4d43aca1f11e378d900757c12340a7e9615f479811069c01653c02e826ecd069f46e074ea5a966dd4a5f5473c2061a57dd65639da359d84046173d7b34782c2f061ea3c25dd47c8f5aebac3dfd693084b1b52700ef65052474f1ba87b5b76dec9d8dbf179aa503288373a04965840e2e7db3be1676d83bd8e12cfa9bc5162c36b802032b2ba0823b1212844e2868851c2e5b8d0843d841a15c7d5e016af81143162f8022063280e07182c08e08eca0064fa880041f4042033b3a364f702d10eced0845988a620650a0c0034c391cc0861a06f02386211e3262b89099120226243a90030e4527c0b0d564a876a083224a4d596c4902126c28800b19971aa820052520c10798480288263f141d800034fc88c1c7132f48810b44e0d40307377842052820c108320065013c24a008533598010946304589254daacc87900b3945c5143b58010a420002294504c100555674001b0a40830f2118108e0bf9c40b5630010904b144951d20003e7808c18054e4be544cf1c40b5890821080404a124534a0c9120304c0470e1890cb460d9c17977db3a0620a1db0600529404108409044110d08a2090396f0a0c80036acb0a38305135e7061a5ca010c1d93050c80061e0b30600149901c0166a1b7e4267026e4257c1cd81bc845640de09e5c43f3601d3147aba05f46468681591e8d7e8167b60b2c03b67879d1e55e8daa92a4195f88304217d0fbba977be75ce34b1396a0967b2f7bd3516946695b9bdd967654992d36abe14eb8177b60957e4b900225f8a4049770fee65f95cdd8daecd9d9b7b38725b9a5b71429f6e9ec411d205009d2b877c81fd5662ad291c7f91c38fe26078eafcd8e54a30f8bf22bc2bd439e88edd9d9b7498c0ca0a3a353001f3d3a0dcfbd1c767214eebd326ef9d12e7bd3a9ddd4b64d97766ead0ab7fc703e98daac6555d97f6bab3dabda648f73a3c66e381b5b7a48f0a4aeaca6b6e9b3a51725aba9a5d7c453f248ed514da53d11dbffcd46e496b6ecab47b5f486e4bdc0de746e4831255569478dcd68ec057788712f88807b559696125171b7f6de7d7e3b7d8a8b73efbdb9f7a270622aeeabc6ebf8db0d174be1f5fa79b95eadd78d978d97cfabe755e3c5f37afdfcfcb87e5a3f377e6cfcf8fcf4fcd4f8e1f979b97e5c2e57cb75c365c3e5e3ea71d570f1b85ead9f96abd56add68d968f9b47a5a355a3cadd78d9f1bae1bad1b376ed8b8e173a3e7468d1b3c375e367e6cb86cb46cdcb061c3868f8d1e1b356cf0d878f9fcf8b87c5a3e377c6cf8f8f8f4f8d4f0e1f179f5fcf4b87a5a3d377a6cf4f8f4f4f4d4e8e1e979d5f8a9e1aad1aa71a3868d1a3e357a6ad4a8c153e3c5f3c3e3e269f1dce0b1c1e3c3d3c35383878707098f7aef65dd9247b9ec4d470957db699588a8375bcc00050869dc8437383c0e8f807f1a218d1001ffff7fefb5b9f7d6dc7b678c5cf6407cc7104e53721516f7de17b7dc3d1832e481421a43863c504b44a5fd8743705aa4a3d9edbf25a2d26e863c904ca5cd52226c9a23470e935591626daa0480b3dc4718f1345040e1dbf4656a526c564b9796dc0bb4d372a3f7d9598b33fa67672a52dbe260b8d96e37a123b6dbcd16b461dcaba3b3f3bdd7bba56682958bf338707c8d9b576bb618867ccd366b977e3871ef3da1eead1971efede9ded6c9bd3c36dc7bf5fd7bef45ba17890da5eea189eebd3694bac7bda7117bef1572cb4c057bd3319ad1da9bcd28bdf7c6dcdbe2de3be3deedb2f5bd2a8e961af921b79bd090afa55fb3fd109c9ba5f476a48603026a6938560d32e4652a8d96c6eebde5bd7775ef55ddf1de2be221eebd02b825b652e22ab9e6defb40acac86b3193d11dbb738dbdbda22f6a6c3a36343a7460eabb6498c767c7a7e62eacb48f67af5b46231b5468e922da66384436a6747496e2d3b3b526f3bb6a35b1adbb9a932da0c67b4a32ad990763e078ebff786f7fb7e626e899b6e3ba32db13fa44d87c8dcf256b9f7bedc1213dd4b4b8dd894d4dbbd97bc9763a007f75e7ccbcb837baf8b5be216f75e985bde2ceecd814495d5663795adcdd89b8ecca6b413538de03c3bbbf99a4a7b1caf1abd8c86b321b18f73ef05efcdb27befcc2d2f14f6a6d32a11c9f766bbddd4afa52f536b375cabdebe669bb5695b539554da92d2ecdb597a6bffa8366bdb54e96623f269eef75e18b7bc33f75e975b5e1bf7de23361a919b6ac4f649da592c5d4a63ec4d27a61a293a92de766237762766a3d1d29d24b7f46634a3dd7bb9def7de3c566122dea8a626f4eaf10927722eb724b7cf1e6c9a48e2bf733eaf0be2cd6611cd69df921d74c89ff7bd401a7b60efbdf9638c93f02a84cf224a56fc0160eabf9518c9cf5bfdfcfbefd05c7325016042bff7ce6d26f1b38812143c933aff7dea353ff6cca44fc9de5aebadf7de5bebc7ff831721de35fffa3df8d92ca22f3f716b247f7fbecd223368523e5b8df6bf68781651d203a31f3e8b2841bd33697ffefefab57993ee5fa60f6711883650dfb5f9808d1238c43be392667e11e32012871cf27f6753fe3d8e0f7ae1fe9034f392fde3631307f07106c96df6b04290fed05c92c341fa7b582168ef279f488fa249921f8a9fc341e192f145f02f29c30d02c7271f9b38848fc113872388c2c78f83f8b93449160fef93bc530349f2f7530349f4af80969f7983447389f8b90f0aff339790dfc307858fc9b3e95c82430efac3b3492fc97d10f99aaca1d44f9a38e4a05f3c9bf493329044407238287cd0cce1207085a0f07338487c319be463737c4d0ab164d84f7e0dfac7ffcc25200d4c41e489c31144e2e3c7217c1307510c4db03f0eb241a22c72b340fb99a48124f9359064ff739225c3fefc441a080964ff2581e8ffde84f86997122539e4d79f03fefd4a8e20f2bebf9225b90fe2dfc3071d41043eff1cbe0fff9e4a7af8207e2ac9a17f78e63e483f78e6f09db90fc2cfdf3b73e8f8f9a9ff7e0e1dd7441b107cda1594cd1e700f7b49ee88222ba0f87bd8e711455640f5e72218d3207e9f077b72613ad813981377c2e45154f8141694f8f21d86bad2faf41429a2ae0558b08008e88a3f08829feff7ef5f4c9abc74a0c98dc026cfa4503f77bed97c976ff4d7aca0cc8f3594be7ff77fae2bda85ffe67bdee7cf99448937b7f42e7b6c1c60a3b301c206089b04e83eff8ece3dfe719023e19c932f92e68f8d037e702001af96ef6af1ae967eb53c2d79406944dfed96b8e88cc82d150edebfbec364df7db7ceecbe43b3d493eecb65169b0794ab8b4ec6a9873daee0af9ffc5c65a0ba7e05bbe844a953c669091299a8510130523f3b3c6fa5fd3829313a8423dfe47ebb138cf64ba53bb955f4d0aeeffbbeef07c76e5f25a019887cae6c9e94eb4a0a34c687d2f8beeffbaa5001142a3881155554206595751f08835bb87c7e202fbf5e2b65b9962dbe9798181ec363788c39eacab5d4953b3397badccab453564241751905d5bad4e99e722ba3facb1d1e195066291454b1b3470ac59ca36e77738bbfda55ea97dc7350bafcfa4d69170ee238e6e43c8649bc42d47239be5a254ee972cb1b6d9a942e2f6644117f6658962fe726c111bb982dccd25ccb75e6d17eae5c9c128ae13cf3cc31d7dfc91fdeafb47ca3dfe68717e6d538dc46bed1dc46877215a7a6722bbffeb1e98adcd2322e1973b9a5cb2d5687c1fb357fccd93fdfade23f34f32f64f0cc8b9f79a2173f33f32f5efc8b53c98b52873a5da0ac5d2fc6f1ef8a5c3d39f3e26762a038c50c11f3945b2ebe1301137581f2772da5ae74925b31afbf742a8540b516a750adf9bfe0274b86997ff14433ffe25472047f252f7e869f44343ff333a79299a739734cfe55140226eae668f15771aa8c314b27a5d310f9268f2e347e61b2f2cf982c19f249f4e2699e88f533cffa99f75e9833a614ef5d5c644cfe97748989898931d7fcdebf742634bf0c20df68cf3b6de02f66cc3b553a95ba28caf54ef12f677efdd0722dd172062d75f92cf3ceb8b8fc7aa75e62fee55b982b13dae23d52a8c58bba08ce62ed660d0e4afe2e9ef7313131264c0ccccb7b2fbf43b778717139bf95e756aa1f9e4c1d4c0950fde0a7b577ee9bf356f1df913b5f471d93652cb9f997f030feea79205f85c311dfe330feea3bf9aaef7f574f34feea24225f755e16c3d82ca0e451f4064551acc141197efeef2bf7832613fa79fbfb26c79d7fb3a4742d6dc9068828192b3b671a17ebef972c73cc3834435e8cb912c3471917286fbd5a5ca05ca05ca05ca05ca0c42b32315762aec4441123159325a68a98a9182dfa6ab954647651680f2526856e4317fddefb9b93733791767d888c133254d0fd1e29944fd9d849a1f245f15c459a2ce020a97befe7c0f10f34e4a836abcd5225f536e45b365d7a35564b8dc86a692ca6ca6c2c282001413d502aa80f57478f0f8ebda92cee26536f6aed88eda7a706aeade16c4ae90e17e4eef5a10795add996583588526c0032da0f590d6589da9a7abbe15848c2ded41991596b8b41a60a51d5a3b4157234834176b3c5d4202acd687654c3f178416d6d4a6a90db4d28e98710118bca541e4a6aeba39dd1c082dad67cace0dd24ed8c366b71371d506c7ae469b434f63a90866ea9632645faf46b3325b59dbd4a3baacdd847cb1c52f75e1db6c519f5dc328705eebdb69aacc70757e6507394578746538fb0e9d3a8a94666477f549b3d11dbbf4e121a0d47e4d556a52d29d962b299ec59f589d8827040b9b822b31a4e496d6754c0906ecf0c35b4c60c6d4d554a922e15a96caad2d42047d4184aa01d0882a14e1223100443f57176d81c968dd96439b559aab4639be9e8b0319b6c6707885d6a5ba41b03b435d5885a8057918840100cdb9a6a44480850d0bd37865bbeaeb8f7eab4ac7a748463bf453a9a29bdba94a44d55a496868ad4d65425dcd12d7d15a955656a4d5d5a4a919e4d91be669bb1accafe113586d70f2acd16842d8aa5291bebf1c1b14a6a3bfbf1e12ac9d09b7a83aade78dc541ccbfa5069b620ec527a44087b23e20144496d7dd05223b6dbd2124a5363b59405a2761624536f335c1135483b636d4b6a0b49684b4b38214852b5c519cd6e6d8bc220539362365a4d00ac92dafa50694629912a8405d5c84c8da941643555c86dd6821a9bdd8010a9b16aad9dd17053891405b5ead12d6d8b64ea111b8d96eac0a1aa448a5e98b5341c0c311bada8a5833d4a693cd25b8b4488464363ec0c0655e56154b3b1a91ae4a7a706ee06939bc4766b59b5859a7a83edc64335327b81559792a82c9b16b52acba23672d07feebd416e69a3bcf7aa375c9baa447e085bc31519491a22536946694d7dd5e85b16a7f4b25a3abbe5245193aeab868e0d1d9e9c231b0d77ef6d4b9f2cb71a7c94f4e8f8d4d0e1c9a91db1dd8c90d814c967003d6ee9f3828d48cd47cba62fccda59cdc70d335a5a9bf1104a425b5a4255364502420404f4820e99bab434bbb52c90129b1ec121a5c694dbc6d4dbcd16fbd65663ff89ccd42524eab3b35791881e36a4e8662b1a225391d8596d46e483d89b0edbce946a4a5755b209516bb618888e6ab35b2b2448d112ab2aa5351680808cf4f8e074dc54206cd14d5d42922add68696b6b8b90a44a3a702049956233b636bbb548d422954d8d74f4f8e070dc54204852a59a8d488907766bb6197b5367b8f7ce64aa919bcaaa4154f5c623c6ce948080d89a9a646b8180585c9bb642d4582da5a5413622351f496eb63695d5521868e94d5dfa414b956eb618d82429105625802d0636490a44558914b149d225964d692cb06c4a8be950218b21b129520da72acdda1aeea6a3d21973b4dc210f34e4dbf4d5366d9770b79b8d05f6a6434b7768295099737574745068551a8d36556f47dfda62781df669694ac3c56869ec91dad9913faacd64351c104d35a2d28a80d89b0e129b22ed20b1291210507bb32d2de1d8225529bd11a1f1c2bdedcd86c4becdd602a9ec4d35a2debee6e3de4b12dd52852cec4d4756535b24eacb016ea90292abb6ac5a2bbaa9edac48c9762352145397d89d986a44051b5208020eabde6cb1da2c4562618654f381c4a63d74e8b8f5c0414bdb9aed86d9cd86a4a348260408489ddddaa225d688ec961ea91db1b570c42ae9f8c1e188a9330001c554234040b2962d5a62d33695b569515b53d9b44d5b90a56a0f59aa22c5d0a3a8a8c70767c4b65484245542e18ba1870d23404045ad4d9626d16c4b699bb650b3cd6690a92d3b636f2a90195c584a6150d51b51108d86e3618be1681664b311dd54a4f6074b9ba5adda824c1592f4a36849298624491a536545b7a2225509c7aab725db4dd6e38393d16c48ac7a436a8b64357589c5b129ad550357d4b2b719119acd66d4b233246a114b4b6b4733b6c5fd686bea6c6916536f3a7078a84b4ab1013c8d189262365a50daa6b2b455958a54a59a8f9b2aaba9ec8cdd3daacd6846332140404a3124b5d9d1ecf603004af001004d1877ef1d724b0090b7b6a6ee2ca54aaa0e1c997a249bdd8cd0a721f391de5220373862ea0c6cac6886d3e3837b1a3d3eb81bf586195b244362d31e4040b254452a8a15512a2aa224bb4196d2cc72d9db03cc23bf837b89d86c3720aacc16535b686b2a6b8422c5da54899dc1d0da6cb2540d92e4d6d68468a92c5583b4369a1059aa22a1b6a19a6da6065165e9ed684900b254ed112b024311a516d41b6642549c8a930129d6a64becac859a8f25a55b0f96556f47424b4ab71ef7de17ae0b01b897a6d66a29120b4b4ab71e6c91daa62d5b9424a5a93720a0a2d686c42ea535216a9bb6428e6ab3a3946555188480811651cc60050bfc808a1544b1018e02c474a6cf03a18b079e08e2437043ae198109183cf400aa66c5c30b42983ab2e70113902225c78a9927332f9841112de410032205e4d041bff7f210a219a52d4e49e8e602c0c5bde92d6330b949d4a4225a1ab371e06620466943a7864e8d1c9502d8898baf9ce09627c0dc24b514894d55224466b24f5f35922221c56648479e46929b4d49557a5b5b9bc9521b4da6b631dcad656bb6183bd3499f880d89c57d12a397a9af1ad5d2d8bd2cdc7bf78e1772919bd4b6e9a76d2a7b36497a8496be4c656748edcb6ae923b129d2ab325b2d1da2e268e90c404032b576e4872a3baacd906269ecd55bfa2a0df7dea07b2fbae3ee9862099764b3d158bb7b1f4895d962afd28c52a458aade6ab897a90f549be14072ef855971af11eebd04b825ebaf4c9ddd5a578f0e4f8e912c479ddd5aa0974c35fa24b254bdd1d27bafcebdf7862213b8f712b9250dd2bdec4da74895d96ab35bbbc3aa4666ed4c69765bdaf171a9448c8c5c3b357a521e974fcc86ccc7c7c60f8feaca51dbf4d6b2463876876d71b6a4988a14db69674798878e4c6ddb9992922d86519da4586a64bbbd6af4b534f66d8ad47ecad6d25bfb3575c9f6b6189a80c22d5fec5a9bedf6ea517ab3b5ac12cc5bbe6897b4bcd871ef45a1c6aa36d9cb5215e9db9a7ae3f7b670cb992ad89bce4e91daa6b2192d35a2d3aaecac9dd1d2da4d6d91a84f4bdb9aaa54b3dd66466a424aca1926f72aa92d9b84cd59da6177eebd30dc72c6c6bdd764d35b0ae46baa7aa4558d5e6553f516abcd3e09ad8dcd6e2dfbb6f65999aad6fea61a29e16a9f14b3d18cbe364b91629f24a5a941eebd3faed60d1b3ef7de9bc3fcc2bdb7e8962e8adaf4686624d5463cd93374285a56f639723cce4d4da5e9c46eec027cb8f716e0963158eeadf13944682afb39723c928e7a2453d99b8e2ab3d57654f5d61aedb035dbac8da9349a2db6d332528918bd7a541b3c2d1f998db487a727c693a632998ba7461a4b7f5e3d2ed5c7868be7b5136b59dc6d8700443e584081a02c3c8e47229ba56dda827a940251712aae48d19292aca85569b474a988c5d1704ab858daa62d1ccd88d46c2c1ce0de9be396313b2e6bbba937fc516d968456b3bd2c55696f6b9f6d6746dfd6d4596c4684088e7d99da129921b117d370ef95dd12268abde9b4b17449c916f3c989a9325b2c27493bbba54bf7ded62d619aee95a9451ec6e373809eafa548edb3ac4a4b524b9f55956e298bb3cd8ca83720485459cdc78fb686b3c58a8edc542336594dbda96d0d6763413d4a81c8541657d4221dcd541a68294e36333293d566454852a512dc7b6bb8650b14d066b81b3a3d393622b5598e4a4b63ac4cad1dcd5816a94d81dc7b7368a8503b9ad152f58619dbe26c49d21756b877aa69c8f594bcbbefce81aff78465dc057b7785b90c56e1af67bbae76b26378f9fbf8d6185f1759e7e68b65d82be0ebc1f85d75ce6d649d7b60ece18cb1ee37649cb197658260dcb17733267574eefd267cbc73de3abc31ce213e42c6e0e57867ad62bc668cf32780ccca38f79d711032c63d6319b00b8c75fe4ec0b5a7c23218c77c1a4f0067dc33c65917e1bc43d0461eb18e0ff3193a07f672de35749833c659b53002760d91870692efbe190378eb9bcd33d6973f00f300df7ccbb2bcf8e28d331e02fe720a18679eb3c6d807efe00f8f39e7dc59c028e4bc33e6d13ae28bcf06932f34190298e71778638d6f06923fc4fbe659bf3f0433405ff304f8ee4841d6b8e3ed61126ba79c3f208d7f34ce797f1ec11bcc31f678760d193c3887fdcb1863ec610c8473c681ee18cc5b1bc167c661ac73302866fc7938efc831728c7c82befae215ee396372977ae7173967dcf12682c11958e41973ac72618c3166651f9e08666cf48d5ec632e00f639df1c63df009f9956164317b99638d33ce62f636d65c63ecc21adf60108f1863188e9143ede16fe38c3110c65d26c3e09d31dff89545ac730cac75ce3b6ffcca19d3e41083f8db19630c844fc8a1a7f1fe36c6d8f57826850f875681638d41bc67300802890560c5c000d838e79e3d1597313c620f8c33ce18e39cebdda55b03139e22c010ee1380882c5608604e51050d290f3a200029a42e9179832b840818a842d2276e1022a64c208869fb210311eb1490d4c68e17888861841e290c9e1711333baf54c5019714e40c9719e29614b809ad215950a0f55baece1026c4c8191225d390ca808dc6f3a4a0c2130d22c818f2a38c77f1729f5a18e00ee141dcdf09b857df7befbd37051df75e1acad514f6a693b6a9aca82f096b5ad0e4811eb8555fcb03f50923910e319cdf4d0fbf9f3daef01e7bd13ea249edb2af2420024d5d953d70d3204ff8e9405338c328db34543a641b2c2e808d3b0e1334e578a1059704e41dee177092d0e4831d52b0a203e6402e3276e890378dce59c2a60eaec20c9a7a97016449cc2a88073da4830f4d2f401a469811e6061e314b1f1b22a109470b0e0d6940e9819ae449fb07263a7c5bd54fc02b1f1db058004c81064b3e2d10506591ef70f15edb05b500bb1d5af09b904768d36dd82978af1d9a206268870d002153f22c21632f28c089261147440209207b567806181ae6bd68e9107aaf1e7e2aedf231a44f087920a129f47c9042be1e2c211e21d8631422c3d144938b4da31576a016640f03a1e6a8546db396454c3285d0c8884000010000b3140020301810090583a168384d1529a80f14800a98a04e62461888d224c97114424612600c1000000002202023230e089f67623713793497e2bbe53982023967b82d36261b1da63305dde7cd7072c48d67a1397d5568d4d74c70cc5ce9a5d25f2325e5d4a6908dffd9dd6c3df8fd09b9ed28e0dbbcf929787ad5070e1957204ab6783f41a70e507991959e26bb8a162df61deae7f3e00eccb943a36784177553a66e5eb54e2c9a00731c1cb4d8684e86eb5001c5f86662ed2ddce41addb4804ee3a098086d716b6e2b6f8fa0d69d63f12571ff36e5b13dfdb2236db4fece97fe5de3e7de00cb7c090568237012c2d34b005d766e1f07f806d858012e9799024b4a857535ca4a1deb8441207d1ee7a53e16862fec7a2c13af0a3f3cd8a7d84d45346efff1f14968a2ec73f7be0412acd27d9ca86455e07f5731012e0de3219e241eb53530cca6aa3872fcf476b4e589e1620a9634afd557c0a399464ac79efae1be9b8c4013299615e2f5bd5588e93c605d9b7e398cd8c32730dc608f0a44cf17b34b2aff17922d1b2e0980cac09af8eb807735a9e01d5231da3c9c9d2ade280e2b1ad6c524b50d4620e9afcba56e241f22b7ddf94a3f01e36c98f69cf3b983e108e3ad8e0c7c4cffe9c196e844d69974706b607f83f40acc0d750a69f2d10a6fc9252c7c15a25742c15101fc2c02b4cd7aef2a18ad5917fc4ac83ca33ef502dee79972fe1bf3f4945b4da44f63f9816b406092f5db67ae9d4da30e0d5acb0312a647cfcf2eed910149a383d0a76b2e0da2145f137714a7934ad100b6e3b97e948b7dc1cea6ce2e178b42df0f326f3b5c51bfca512e9079bdd2e863e850b993b651f76a42cf9f51e853a46ea7d3676ef326d349b49fcc2626330724afcd664d9434fcdcaf946bae21f308167561f88859043f0c46d94f4a50eda988fcb8eed1d2050a64934dfaca3d658b9f175f29fdaa42d89abfd80cb3349d37758d3d20a6ceb0878b9c361a2648906b65f9f1ed59f8e0d30e5f78a68a3d7da27274476d013c2c24612b82d9158b925a2fd05e83160984bb8813a6cb3dfbe9a65b01150d8b152aaaea4615de18167837be30aef95e0b6f19781a21cc3920f4e358b72a9bf8c0df07fe00e345bc89fc7f8f624569e5fd2b139dee0c34e72a511923d409639738ffcb62ede80a1bd9be8eed857d5c5441d2b114303334b154f2de15fc2408f22ee1e24ea1e89e33eba61705aedf0dbcb485fad42dd2d9bc055a27d38322bf4511fe3d1ca67344eddc0866328adb65b7efcc9a459f90a32f5fda84f6886c827c55a470d99508436f59d0474422b9e307a6286665be82506567b1f311d475d03da1e3dd504b5057d618f9fd44e15b0ea1aae5267a138b1ae7885b5a37680438c6621ee7eba949897c41a016a811f9f8c14557b4adca69f2f1b30ec6db1b9103e38dcc4cf64b2cd5526133ed57285565e1ad78d746fbcc12ad5577c21f039bf11f21e21505bece1ada2f1470dfdbb8682938064599f1a13c4cfaf6dff3b3cb228bd9b9b58f625d7912de7a4286443b155c5dabb6d898560f13fe335c6963bda8415940b03a1ab924cf55ac8c342a80e000272494e412a63664bcfc03812143751e4c54ccf8ee961baa8cf1ed364d721a480b9deea05cdfca84f59f1c32143df9db10ac288363f2e2f443d9c2305057b8d3281016ecaeedccdda433574ea104046543bf7775b568950d051bdb80d431c3e64277a0b5f9ae0e8066f12003a6881dbb51c58f6903714c9453e3aaa94deb4f8e348ab5e98f06549ace03e4c9051278fb51f5d09c6d494eca12d5817b7d7dfd7af376c7506243418f1f4d77b3481d959a9f6ef4c6acded51d4f816fe6b185c9ed280a0d6527ac1a589ee71c25fd014ff4f0ce5ecdbd83a17d08c90364095a8df7bc6184fe671fdc3c428486f4f86a1f4400c2cf2f9cd52b35357a8fbe758d1bc0416ce4d08baabaf29ec470d4be8c447f73500a61038014247ccb7c005889ab510afccb561144dc550003564e3753ea63f994cdb35b43540933563444bf436d1b13b7e7db599a60af4b9909cc6af76d819fefa31c3f939b58777b27d10d89d8ae6d9bee458da733dc1d8f52179a33fff9e67152e9b7c175d906bafde3887cc2bd3fcd5df140f0d598ac4896cef31d53cb4f59e6eb719b2df121dd5fea8ffe2d74f00b750c5badb6097e0f4bae37fe44e33771637a44c573580ee20058dbffd3fe30f810b34f0c591e71ede887f25d3712fd8f814b974df515857fdc2ed11e9799740b647d99c0fc2257607d00279104679c5d2940d74a7143740a7cf0013e27bc7c8848de6f9d30f359157ea4e553646fbbc9d96c5d3fd375d9907644b686b19b3dcffae4a59f930ec014dbca0f087e57d64fa615cc6ec1a34bb3b9c74a56f9be050c4e517762206496a7bddbe8ac1be9fc65552698a837f624506bfd33f35178fc0093b72ff0b87c24bfc5e7d077af4ea258acd31d525cd929b540af8de1f2bf8ad676e40db13f4a245de320ea43528afe59e77f0f1ba55e2ebb1165a51f6ef9872ace60fbcdf62ed02fa1efaff5c19f50eabd41e9afebdadc773086bd2f45fdf7c83ebbbcca792b79965fd3b17370dc3d9137996fa67e2148edf53981ebb1ea8caa27e69b7e62c2fa2bbaa5eb6c8a319fca6fd5a732788289c9b095e13f7ec7fcbdf48f1e380a9afcf2c49dd8ba51ae004e2c17b2bcc1376ae85d67aa755b7475d309f35832a6dfabacc5943ae4fab8714147f62aa608f4571ae3e600c7cf7d5f1bde63457cfcae04964b8717022766485b285c38c47fcc73ac5138b645e3f6cb1ce5a61a8ddf5b31b2f40f000d391a30bb0f523e45ec3a63a3434ed1b3759dd13743340bbdfc901d30a23f9473b43c0bc637c786ed01ab94d237e2be7d2dff39e39dc8987dae28788bd936003882041ee80e346f1a14ceebcc7185d51109ec9f5768792ecb87c3797942e0b0e9e7cbff891e325dfcf7061e61567342b4132439ccd8797e71c09dbdd1f3fbfd97f63975ee7ccc77eb993cc7de3101b37b329f0ff8fedf592fbfbe97618b3f38fda1317f650f3c18926f8a119e580d284e2027a2a32d7bc4be226e8b30df087cdf00ffa31d8cc35a8ffa9d03c5edbf9f90fb0f3a5f8cb7f465a4551f3e9c6e58086f7c4fd4f223e537d3dc75c510dca8732cfbd8eadd566bc2c4f98da58c984722fd9fe5ccb30a75010ed7c3c0fb12778030098c8c1c7bceec1f27bcc6fa124cee6b006917ad546464352cc3f23fbc1b8715b8640cb4f31adcfba1f6f55b96c35b851c4690d236e637c7ea35e26cbf63da7da5c4f8aec2e6fb2a5f0005667b0e90800462e2617c4a68bae2b08eefb94452795a24dcf779b863f7994b06bbbd931c85311e04c4190664413fb0b63ac01e4b4f3d395071d057ffac75f8d6044086477f760cc5f59f6e233d247cd4d1497316c71cf52b6de16f2857d6aa91999d4323c490eadd734a8bf7c089357ef5d905ff4bc9ab9004de340ca40f2787b1b7f4e61d73d861e033788c6197c2fad2419ddac0fce72bd4e4562a88010aed5c7a681f772159207d94910972e7fdbd7fcf09eafea49e0f25947eee94074488ad2a19703f58b4e5eff9eeb249130195cf7b84223ffebd207253bfa01ea975df8ac9579a7e98d939c1a274f7c816ccf56474eccf063a24c3e678ba80b8e47a42c6ea6033601de0963651668dd7e03a9c66634f97c165ff4b60e1ba32b875d9577a0328a515b179df36dae947ffa6c5e86ae54a9ba6d3cc86d0a46c44164a71df3a7b4fcf31832e5e5677a3a9bbd6d2128955635559ac92ef0432de07909d84152c5a334fa298a0a1fac95e29d66d80ca185dbe3a39336dceade3f0822f9765be4b05f8e7533af613fb99f99c486d6c577c417d6952c44ca8de6976d110ca72564ca71aeccc1e09037781edb3b32abb1044e7e399796f37ec5f8520b2da364b81bec6f42dd426824afa0b382156d91a7720e1b94d96aa7b19fc9c1604f7a6b84df7504483900227ef7d39f4b7fd93ab1f7e6c28cb719dcc0d1192e3ac662bcb5a8babd0f8e9373dd7fbe702f72355c6e22947c4d9873a235d76f08c593ee9571eecd1b693f7cffe00b9359151bb0100a73608d3451e798f81b0db652bfa866b8c2c00596971c1d12fd511cdab99ee50f3c8ba0f471d11e9aa3ab8fd430cf68e83fc6754cc748240227a55f657b359f91b4f759ebe05334e23d26148f42158bdefe3e5673fbbc71550b011c8c519cb611fc2799b4033bea2e65491bfa8a04191e9905adbc8eaa8ee70b3f1d310196627767bc942d05af95b35551c510a489cfcc3fa8a1360cc5da1107136dbf1013c7dfbf24c91d0763f92e05c21aa2101d4a9d8f141d0e9db2d1e0e4c88c6cad6a2c30b4ea00504341b89c5174dbebfa49b53a56913e5a90557fcdda4180cc8469c4c461198799e2c76ef00b4b7c8c3f4982644f92ffd18b97ab3db30e7cc2b3723602bf3b6b7b162170132c5e64011432725dbe9332f24ffad91c3bde7d874bfe00a752defcbe47d620c0c54dd2cd5407094ddc74a302923b57aa5112c084d1c750c45dfb3472f08cb1af00e8a23c70db48abfc037a25499fb482ee7ec1428e652628df4281d8ef171f248ea68b63a6334aae62ca1f7d97aa6350b65e98b8f84c517bfb042e96a235f3c5fab344821ca654eef1f368d6d66fded23ea8f4bc344911bf883fab6123b78a567b4dc6ccc79646c8e58e0b0b5130bb79632605869e9261726a9c860f99016d9d96fc680281676b9e3e2000892fb05882d91fd1526e2b41cee437be61929712a268459ee04bcba6e5f8906dcb92aeebb3c3799475dd81642305db095074c70dd3873007df8c27f28cf069216897111584c90522680d1fd006a484a920de2bd4ef97b4ea2e4f1c2d58cbcef1c5890318eccdeb2e501b544365fa31b3b79d0f7fa6c6f86648f3a31b6ec39a5dafb77f4bd49440496aa5a532384919feef8fe7213aa7e0aa20f65ad9aee3005cdb07e9ab268cf82b5b1ef07a0a412696fd69e6bbd313be5abb1dab9e2c05f8f001c43afb6b40146753ffbb1bf0d897d43214610e281a70773abf7c82e7dfb04e28891f1148734d68a727c712f23bb20d4467d8204d727343fc0bb1ad905baa862ced9cc859cdfa90f5bf867ea3c179e073829d274af32a924a694ad81db3d4a6d91bce6731a0ec21eabace1cf29da2cbce612b4d7bc65ed73ac17409a52be14c841770395928678496688a96a436cc885db3eaaefded83a2fe60730bdcf298240c87972ef8589128a66c36bae2db6220039372cb72ae4ba033a62745aa57cfa1093c4cacb244748fae23bfd16538c3583999d9b7dbbade3312a29d50709f5bb4c649c3493a63ae46a61fba9ad6e15cd6d42ea84fc034a16a166742a2f46b949ea0474601ae7fd1f828881680760060868e2e74f4bf50816e855091a1e2781fd3ff2dca576c33a0488c22698ac505f26e9044600b39123640cf6981315c442488cdb76adab1fa83961cffcb31bbd56b63ecfd2384d87dc2ed52fe602d1f96916c6ee2bb7ac4d589eec5f39ccec9596c31b99bc4709982887397cf23b5459721fc088db5b0a52363605984afbf681eca7de5d6673ef0e00b320bfcf422f2a79d84271bfe5e20e1f46518d90e39b872f0adc2377e24dd1648600368cd37c19d280e6925d9bf151b0160043d79214dfd2272c160dfd653028cfa6c9c54abf8e5aa5c6d2bbd1b6ee03c0cf3a12a3aa871c43a4dd291b83c8ead8de9045af787efcb58c16d7b6f89e6d468bfe8537655174a55540e589c5c00d2ca6b21134dce02687d3625b118d1e7680156eb33d19b1c756f147d050b54acbbf1a8996e3c3b8838c16e49aad2ac00d39dec46e9d8778fb4460d5832fc83b0fad8680330039d529fd6a7039d6d0b082b9b98ad55ae095de3b020677f2b9190e05b87920a9c94eaa5701c6dc2d3c00f23f73607d851164a39759efec9832516af27d30bb11a8deb0cef654ab0d9b9fb8434f8a20e69598d5a30aa0a41b2fc5abc68830673a498931c1fb2ed638a110e05b5e08218409dc71546c9129be81ada190f10dac09a75bff27657109209ad395add87501977d384b26322403efc9b7b472ba28ac8889a7123fdd670478007fda1b799eeb846f50a7bb07d953d33e9227a2fb411165c5beb98f0946b4b7f57c4f3eb23ae443c44e74eaaddc71ef7250b3e9319af58831491c164444b06b5aa0d5cc660a128d7caf66f68cf8f0f90de4c978c9769bf912df6a67d5ef61d806faa8109978af05edf62a6bac948f6293bcccce0ebdc6cd77a378334d15950765d5f8abf94529c16f72583b2f4bf4dc9535ecaa325e66c276a5f3a0d67e36d07eeac2b8b828ce20e8bc5d48cca36e1b0a96f69979881bdf166f9ff7ea6ea7583439a25d2ed368fa87336aec13f105afad55e49b94d0f5c66809edd92f5a6c97271008bdaff2a2c167711a8d336624f0ac24b49fd9b62f5a67cdae07849601dfcaee4d2f92183756f466e7cc3315ebd4fd43b7268a287a17c3f9e05e1f08a66b71b471b4880e24572a614337f6d72a991b6a27973f5c93a8407bf5f7e8d98329ccef83d31bdbefa3dcb0274ef713d9b4a8c4acd3cc0efb10b13a1b973d9b06b8208da3058b6bcee1ed9c9a301851ed89549fa9be6fd5369ef9ae5d0ad7dde4a0e52d858729ce27654b4a8e60897d1e2a36b819ad686359e87191f396fec83e6d484a26e293b5440422c92501c9b81102eb597455489739859288ac44b09f6b3f2f0d6d9b0fcc5a50eaf8d5945c9f44f8ba2dd5d1fd0d50c1c44c42a8b01482d9e2bec8d216b3a94a3c4a7dfa6b85c812257c6c249795c4e0ef4edd92e4579bdbac217bcd2c20791295c83e04de561cfbc0b9c6a6e637b0f66a9b11e5b684b3d19acda4998ae2603a93aeca1a1af599743a09f56fa472ccda8bfe663ef37fa27a8e38a8b89cea9b95a8c921b1533fd92cb2922c876f31eb4f53b51a3f3445e0b83111e804c477943fac87466450b4b2fa10b1ec6ca19f8f341cde15419bcc7c2372bd45ccbbd113a1c2de53af8da8293dd59bc8c40cbb73717fa287fc703675751c6ebe5e63e2c254a58259d49db489c678a5a00d60d4c0323e14067b0197f79972af604a0784407de0326e13a390b785b7e7d6cf38f8e2248613c4f099c46aecb6976f1376d0d75c5621a0866ac1f7ae0f0d93904dae64a120203434515c45b9b097f26e220a98cd056dff90c612a196d83dc8ebcd915c555a3d479fa9e3e1583891e6983b0b8b12bc42dc3df7e6c99db5cc13571e9d601252e960af11799f35b54c44995043408a55e894e5d926b6d0f46a5a60d03b7a94f91509e20d37575ca0a0e9f34e7e9c9caea5b93d493ed3c44c97b9d8fef67df1acc8e0a01c8baf1c48745aed1a22868d43d68385c5af250c39a9cbe0e903c60df2764205691da0019fff90cf04fc210bbe20371fcb5b93f1dbc6b4b59802192a32871ca766d904fa57aec1506e41495b4b9108c8f181d414d5cffd53066062ba32b56e6bd5913e20effe0a135d59d2f95f10d6defbc38fbf1ebe89ee6d0a4954f2a40d1761dc2f7b9dab75335427eb98cbf065d17130a32b5f2e05fbdf51a40075c98b19898970fd5087e4689ff1c000327d14a8d503ff6fdf08d70f6b71f7f28b1da49d611b45815d21e608bfb3fc23e28d5676ee36ef016637636077a17f243e6555d5be70eed64b116c9c9842b840217a4a13e9d31c93a6ad66cc7f524ce93ee0bb6d458e0c31ce8462fad25b3816f2e32df3c5b60c7c2fadeb13c061c170e90e5526a4e62d2729961074b905c4738b35eee92bd54381dd52ab3f152a49f81c02d83eda7a85ec9f0597735522dab9836c6b03eeb9c058f0fa3123718b7a57948429b00bad4caebd9d4c9a808e5f0ef69e35f6f73752986d75ca86920f687a71f28e943b7242f7deafdf269582ef9edacc2af96a4a980914a20653089962c95d4cbdad20263819601c27cef7f2f11b0187889640bd24a5a5e142c93d6f83b56ad39ae245444741f12550a692cc3c15f1ca29dfa87c92abfd9270ebc39849b652a0cfe6d21450406f231f93bd4ca3c280714525e567b0ec8ef94d2fc6e146165748ac93b7c670d4e4696ebf12f88dd5033034a45fead4fa90a809cf706aeb832deb13ea75e0e66e5f00cd68ffcd8615dc65f368dfa7cf6d033601facbe05998a0a0079c65427f8ed5310c614cb5cca792c2de153de21e42ee0a6f0d88865d703dcb9e1679b739288ae8cfd9db91a4e64d5d6af8426fb2d1651c01496821e8ee45008a30f8285c77ce46bc1b77679802471fc4eb08c5a04e09892b453ace9e313a60346ad9bd9d11f64ac4fc5911be7dd824ba15156074106f39721fc218b6aca5430168b96ef586b85dde09ffb8fc42c4d2ccac6358c5b74a03f62beb894bdf1a7413d836069b1ba7a3b089e140d2067a508a6059ed9123e9959f793f8a0ff5d3131348533897e1856930049c2de0f77adb1ef883aa94b16b20b83ca7407969a56a45e0c09cd84c18d083831f29b097b324d761fe0efb97390e56fe16919cb9d2403c77db0bfd288ebe980ab5ca87c0612138aa6b72d1b9f0b52228df4e7f13ffed56dc8b09a3956550405c4e93ab9967e76c10fd4a155fec028c678e7379597d793eddcdd4061e8cd9adb2e2281c304ca39b0ab9377f00756ef9f03bff354e3d2ef82b7481a749cdac486bad5f0e77a48c84a7b54fbe5102ca62b311667195a10216009f1587499c4b57e5f15b8d061d9e7aa8387b5b2ef87fb03184247231b03f946f1a0cfdf7020b52762ca540f63790cf1727b9d1bf33a2c4d78e2c6e550d6c250bd81c96a1c9c634d66836164cba4c1d6173608fb02a0a2540a65c77bf7969a850505bc8c5abf32be531e28b73d083393e2b3ab0ae7d660a202dfb84f034bd741830fc07a9deffae2de51a78b2990cfc6a63e9dbfc9d5c2799592d06e3fa8cb440c21ba7c6f7e852b47e4b6fa70734d2b694a50ef07e71d5b0b48b9a8e91cdc561f9268b6651cb8bd650363072e3c5e2448ada57d0a44f1702a94fcb2217477404512009f88e18de86d0471d92a264b9dab16bd4e55e83ce309d68c3f6054fc87927e44d71daadcf466728c3a43e964570028ac655c894c49083af87b0fd82ce2e2bd384627cc2977d4908799047f3b5a424308086db1a635edaa695554644645740f71b4f6cc492d1ea7f91793311d4de3ae32dea9d58b8f426c42ba25ebab5c20a097fa4fed09a548838c471f3a33ecf52790f6506ce11d7249c0ce0829ada4fe50abec9118d13e3c38d650456201f01997f788ab21782b9f4455b068157300039a31f05ab1ab44f0133a1686fbdeb0d051a22aa609baa06bc4ab0b18cc0a50fa3e2e7621d999cc87fb0feed2dad488f033aefc281e243faf6805d505f24c4d809830f2891041d49ad2c9859688ece82358a08b3ef534de6b3c42409d3774aaec4ca59d72cff007fc0a8960fd4202f5da8157905cda899608cd75cbeb2e19dd523509502f6a6df388b60b6db0ca90bb83c8ad4cdf75fed1556d9645f143689d16ccf186955a505e853f811e27f715bdf2019f2063fe0a2d4ccd98ef8e2626f27c70b4d661ff822721a545716807e5ca6fb333b699880b58a7373111d3116b7c3d80b349b85593129946bcbe7452766847ee56b6291eaec397895ad4f0444616bfa6e117cc83a615c420b4ecb3864240ce55e12d9c0bb6a59976a18a6f6f7140a26bcd9b2941b9e4555897bd2122b78705e9c45cd076cb29053e1aca9d477064f9dac7a1e28bbff3b620acea153f9f4bb976ed980978d121c0e7f0d710db18eb44e932efd262c5e5fc5fa668767adc8650a135e144b67aca90e4a04c64e2658cf424463d1631020c29682c87b59efdddb370f17fa83b121caabf8f0c597f94521878ec4623429c9a61fce1cbaacddd438220a611b893045885671191b5cccf2b677ad81ff618c1e5449d50d9cd8af9510aa4c7340660f38abf8a02704fc7560561a8a0fd76627003fd2b235dbde039f97f754ffb0134011b7272ec865fc6df38986a4db5b692e2e2ef0b1e70611860011e038e15978ec5e241dbe22dce3e93ee336b3ce1f4ec1a590244045c3e5381badd604908f30849168a8cf4008abce53488020c4141c85c46c6ac5b9c91bcebcf50ae215db5ef9f80de8d5aa04bd51fc3dd35f440f512afd2b50fa5131ef5e1edaa7a76f3b764395ef47e0c4330b7b4e899a9b381ca0110acdb0125ae8cde968b0e176e353877e0f142e8779bbdcab087f3e6eb27d9458c2f6d8d786fba402654761c2586c7e481c01d121a204c02e3cee234e416ccf8802100d69e2d820f86d46569a1c665ee238afb91ddb6a20cbb7b14ae9e3ec5c484e0aba9ecd86b7ec57ba4d09931f9090c895f14fdd3b2e086c003a028a68458a6d45740f7f698adbc04b15c91b319849aa99edef9f50a708b33814a9357b75989d73d878f5c9748f077b265cc9f01a1b2b21345b80d371ad6092e0b2e49d86ddc4a1e18c97e0e1c4f03a77b30caf4d69db0f4b1df8303cb91fc7154a790ce9b736e662b5219fc629a48daaa19bd8e90f5ab24dab83bf34879956489a8fbf0c2b363a1ed067a568568a7be63f4807cd3a0444085d3dd013fcaa009df2f999ac65d78932e6c3282458821bdb8b22f76311a558bcaf1a381d3eb2e3f32df8fd567c815e3738cffd39d00a3db30d204e2c6a44c1a3ac82554eaf00e8f76c90738fc180d43b9fc67d0d81cf9030227dc1aa68e1b31f03529d84223a6e2162a8d1230927227d1293cfe37bafaec6dabd8976a373a10fc5d6cf2f13499c7c1b84c0d062183e794efc89915ee5e4094139e6e6eed89e4d5a0e0f02238ac6d46bb82200e07058214896228ce347f80fbd28fd6769e978f0fcc5bff1e6de217044c71f3ee2c5a9e7b8f1b3c86df9ff7db776ce1e084a0de0a2e1cc85a2e3a4e871a9b402949929ad3252480dea5f7356f23d1e69d62376f505bf72ad0b9f128eeca04b4ff299c66a9805af2bf459e590f949ecf30c607eb0b56f5f8355b0ed2dd78a2b70b2eb645d386d912494593219fe041f0ab44164ef4b817d829ca3c2bd3c1e9d98626b6c7bbae80171a059a0cc5d27a5caa03ec92e4bdbb4016da37de6e0fe9412bb5c65a79cc4b9184719b398884f12236ffe684e0fb5529b4b28f1b3548bccbcd972512e1cbc994779280094b45630e99eeb16ebf6b41c252a6835e885a7085fae636c1e561b0c2df3a24f78faf72c41a9a704c818682aefc44bb32f8d76e09f4cc9d5ee8aaffdf038b229c448cbee708385c7eaf06f32b8946742c769ae101132859571dcb36124022432a7842d6408f1ce53171dc581a86489f1da53b7c91e85d8e9b1ef82ab73f7c39f43a2330ed8634a3b8a6c51045858bf44d42098c0284cd6079c0490ed0bdb5cb234a0b650e46ce65b3c001310c10b7d47128743658872aaa1d1b22224592d4f955019b4445d990dd7416932a294a1145db1b6d449955eec199285bd4fd390b8d176b74e70370b75d5019e2671c1e6ffcd4d76243f3ea77a30d274cb9dca8cbdc7cbd05b09830c2c2b1b4952f631031b96c923bf2703b9499cd0fdc53672ba4074b3ea05927175d8b32d3531af104a5c710be576bc45ba8e6b3fdee0af0ca4904a7983dc867ff44c7036e275b2a615fd6e6ba39de373099d39a6cc9dc11d3797e8668e8c19ecde16bc412ff8b587e31dcbb8c052fa9461e64b352161cc6a0a71b7293be6588f30f6ab5f897270e16b30a6e8c9e592b34fff5de04fdd189de205b79adf8bfa0de063402c1aca10c2a472a9b78c59cdc85b7c1cc1ac88184fcf7b54c3eeba8ad0fa59478d686923c8b90d9a9cdfa31abf3e3cd89e4a552a68a9bb01d5ce7a34e113fa9a2371415d6b0b7201ddc08ecb7f982f8eb401e6b161d1891ab0858c1c81e56304d0520a14749e9453578fd586ca6eb9d75b6f6809f632bd51674fe164c07e1864b3ab4ce0ab8af15e4901b7ae852594087ea80d47892a2edb69f0b6be67adadc6ba444b604ad2bc346b8cdfa08ba7dfb0040923d6378b36b4849b4a8e0fd6bdc619849c16b4d44c9aec4d48766229870f6deeead2a9cc20359df2bde5d78ca7e5d981febbf118e85cc2946294dee9b59110256dd6dc204a16c8effb850798a78328a514884e153f4253b322c319e791754a567fd0db297b0939808d0c66c1b23565a4611657590b31d9e44a65aac0837cec4d13393eb9961e8327dbe821d5a9a631321303cb71a4d1e3fba03c2294a0ca5d2e2dcc9675605a2ac9057651aa29500002af747943e13b91a05ae4ba886e32945e894e4b669d4b0be80d366c3b7a6c1a8515fb693fcf35ac8af1da31ac626062a71b4e4761f1abb1e1546fb0396a8ee298289ca16006dd7eb9e719c285284cf6447ca5936881ad8c4038a9b7b8162d7d1083d85265ed4b0bb907e9c9898f0f1200366407ab2833f4b0e7f1ad993119c3b11134b5549f0aa4437315c3e8f806c1334e8aec4fca2238de660d07582442f67f8f022f9c87edf94b66feffef9e140934c6c907471e73c2542e2d643bf0ac06e3e714884640dc2ab278a111ff26502d56026a32989bbbb463db82878d6e0be58a8a83f1144f86c00c1850db820b7f654c79e82417b3552884153d733e8811f9c86222c03cc6b9e3b8e08a950ee76595add7ecc83941b9b91e07b4e4d57e0c333bbec30cd1779878ccf4921fbfdd63adb25d0c516016b561f87e934bd63708c2e7d09c7bf01157a24f6d335d458e2ae61f250f62e4ae3917ea17606b67b470571393cd0364e875b8f04aaef5c0e63bdc7dae6195bc26dbbf9e5762c6fd684b572f4deed37b208eb856b266e66aa4a37dd6d4ef875fe14e2291776be110241778b41619a6b821b137ec3897bc5e5288c0836ebf6ff8370c394598d3e4745881780a475b9dbf6604b98cd9851dfde27ccb3c9cfc6e6b758f0f65e56ac345d5152d8245d80c798489bca97d0df00674e3375eef38871b1b61dfdbd2feeab3b863c7fb894a8b8a4290461a1e44f35337c6ef47680a7b48cb5cc925c6a7d25da3a17fbef69caa35b7b8e1f7b1a2e6dab1a32422ab401c9886f5f6db7b5bfe87aeb9c0a5da46a5599cf1e3daed0a047ce32babe31c84b1745c65e1f36689e31b4276b8fcbe6ded5d3d468d04e33a3b4dc3db9e19af1362f6ebe64251e16dff8d10d7dd4233f6ba6020bbfa2e44d267bf929162ed3ba75f4fc55bca820dfba054abca50c4b3f7a7118d24cf9b7b5a5559a153027cd4b0cfddf80c4b624055c7193d5c0035fae1311d458d181131f088d73fd5d9a096d91e08045914256fe0e015682ebed0cdc788edc01eda3498c03994db3f86b29ed51a6a854159209c52b413644e588a646d9976702dad852f65cd528ab97dafb0373e23efa5476f8b43f48330125adfbbeb57e200f0ad92d60b7fc0679d2484792cf3000422bb60ddc191ab3723f5efc1538a78f8594317faa5ff01332466f2f240df05c31f48f93928a42343f9c9d05b07b671e94a0e627a6989393f0f8538eb4210647480e53ab9dd30e9f4256feda5e8f1265cb5ce1fb87ed589f8f9a33bb48dc415de1efed63f5f6d9764c4294b7fc4ede543ca365cbec4216c000c1612d1a403d538de3a7d5b5ef50e6465355576b82300c9e5eee2228a0314711884246b31658fa9ccfbd6a97f9f673f4e07f36b6fcdb885fbe90f71fe06bb08e7e1f09b42fb730efad4a90830132b7336f2a811f76259b6bda2b8258e90b19a4b63876c176bb618bdc37228413563feed32d15f62e720ceb15e75cd780b83766e96b4726390b7fde561a6c82da78029dafc383c06b005743cdd5c8f47740b14454386d1b81c30d3f7363f75f81f3d704218e0546764b2a50f01882e91f517830cad89dd332dc90f78bc7ef1473d9778c88b7bfb8be7e14e473da6c489b2fd4562cb11182098bb2eab4f802bda198ab654e05a0c9d521a49274ebaadfa6f972a7bb63fcb0b8482e50e5e2b4f2e0206f0563556f9262db6db07dbb0bab38e53cbf32ad450a2d1b5497378e6266cd3282396504e8e109ad80e8230a4216a58bb08f394904011234ac00dbf2256f7ed4f8ac13a33f598d45527b31a35d4c1505b3db5d66054436d75ebd5c0a4965a75d5a9b31e06756aabb5a69e7aa873751db78b071ea21bba9d57bcfe3d7078a93309b450fbd50d77602cd9ac6349b34846deb26884eaeeda2d03b0080e1ed5be202aa40c27dee90f17abe2ec44e6b482a947a9f5a2ec7de69825c426ff65cd1968602465ee123199f833c07f581ace0647716297106adc4370f2a175f80ecd3541e3fdf2601bfe78805e678b5b389595e0f731d788ea8bcd37b9082d6acfc5822402c862ba82ea3eeeab71301711ca7901084604f23bee5d7d85f04c8989e2c13b56f83526a1d5f292e36d36260308a7bc0e6f60ce3e241830920532452ad4aaac28b4b57d4139113dfeec896bd5c9ff388ea7919ac8306ab1f16cbdd18b70ba66266d41b3b5e9215e151bc289ba58b26d7c314a89dee5b82ea54459f243f441f7c31333e95dccfab9433b9a79b5daf1c69e9ee0f3710686ffc23986245738f496295f13d242e741ae018655f737751d972452e5fff0b138315a67cad699ec1d4ede93ffca677ba94604681bff8c0dd9f619a50bc4c17b21095c2d065b55db596b85fc882418e7d45a6ecb8f62e79f32c0b52a19a7575e43906ea58f75ec09586cd9a0773ae3461d603df909cf68cc0ba00f8c80fc46306aee38c9aa172a9358a39e33908584622bd55d02e6cfa42c1da0ce858151e0459197c58120d035afb31ee261384e4762a509b2d2342751bc8c776b822e282417c1a197a017a990376dbe85b7fe39d54edf6ed4bfefdc88d1af486287a13a17573608df439c4e729d791b094121696e37705f84467ea0b8dddba294a9311789a8313957865474c58da724b54968a7748094e96575ea2aba9267a031882a44aa4e57127a5502e38f40d1fbb12d3632b5a10612c0bb64179b8a9bdd4fa2dcb35e8648cccfa1b571edbb49d211e229217e994ae0c1c1eb032f93bb40964dce7e4e8eac17bce16bde35d82d56771543079d998186f517903e3ff50ee09fbc8988c1964d06140c380f204e601b652cfdb23aac457dbb1821da4da23a943f554e2702f18efb254ed05833e042cf00b72cfcd84afb802d6faf4e7592f58b44f0507836874b8eb36fd390e1f44bb6edfd62c16e2feaa2df05381e5da4f20b16f989233e8545b7ea25f42bcc061bb516d18339d696e9fdb61a6cb8feef4a65abf61c040a19f7231bb687224f0fd4f710847bd6c482e31511f9dae75593fb039becb302107bc25242e4c365b6e03e6ebe766c6ee47cbaa52be56fab996b67a6ce1a26eaa9a38eb97aead65187995a35d6a9510b33b56ad552632d75ccd454b7562d75d4b9ae6b81d35ae50af32ac7ddcb1732d9c4a942e2db9b184e69f306de9f1958c048ccf7f4b797d2c339282b9249d05301c761365d2b1803c42240de45aa106791f609b4f754a497c95bed88804c62ab9dd6b5493d566acc6e1a17b63568001c0df5f9317ec06a13ff8b365d286b13c3492ff05636bde693bb17ae1d6eba50b0492ceb7e3489d44666dc3efd8ad5ab9b25d737d7f8f187e593eba3265285d02a8ee7c53ef1dd6ad31f1d3c3dbd7734f91087b30262aac017bc99e0ccf4fe66692427eeeb1d824a9739046f6932ea29a2eb33948faa8f602b4d1542cf853a9f94e7de090ffeed80e37ffc0b4887bde44c8419c818ca5df07dca2cf05d6cd15393a2371105cc08d41961da82fa0382d225b949265a937e25d639aa85ee31bda734b40601849e6fc924d24482f097740b594ba28be010572bad4254d5c0310d74a16fd3c0055c2087d75bfe9ab0c78babfe7cd2dc64cb5212f2445e05e17c1fdc3e2f1568bc5af6c73f075c423a70c66347c4ec12977e8c0422c93761d57e0bdce3a44fa270b54dae1062b904c2c200bcbd040402ea80ee49d8a855c8fad85ccfa55a85b88e132707028fe620860655c1ca07275fed18f749b89f0b7c7eec19ff83d126729bc246c64cc268257cb01e12a58870d16baea59208ec039e7ba18d574095d73a0e483033dfef4dab90a6b0a60c85aa17c81fabf50bcf7121b7b93746bd014164e78cc061b58f4bd602d0969b82df435d8307217136c737ab1f436454ec13ddfa09875811b854172e411706a3343a91ca506a6ba18bdefc5877ef0a3f6bf0d1f80ead8c053f3b521d2c69242642d955cb2d3dfe604f9538bbfc97c636d3ffd5152b2df9a3ec832841453a46874182d13858d820ad760a569d2450b1f58ecac9575ba3b3b6452ab8092bf47990aec48e09a5aa49651835419b1a620a5172e157605788921036b1cca5020d66bfbf4591aacbd9988af9d4203180ff1a84469e58f37f0a4d2422852841661e6b65a1a19e4abd5c95c05bfadf53fa8aeaf594d67fb2a51e9a73b697ee78fb90a791c5d183da584f45ce0121a6ea201545c7f7070e1e14a187bf1fc561dfec25a64fbcefe10a30a78c89d04f8de0f089871ab0a750a25da5346c2546c489a67d8d57cdea4787ce33cc53e05fb30381a48929dd2372776d3e4a4f1df14ae17800a310605bf446c89b14c5e0f99bc7767d22f42e39f2c1a5938b916bc36e8c10fb44062c05ad452c44ac754ca471917149c5e2e33ea17906cd197a3eff32357a1c0400b2701ed42722801fa26b3bb0c13928eb4bb58cf74b4a0b23025e95bada488f55f0660199e750f35abcc7967e3a4675978e1110fd44cbaefca081be00a1944196138add8daf76afb8de2c4ef7759f46be08430e3af3e9eac7d08ed1ca248c65a9d2f6fdd5ebf4cd21c30ab08d18bbee3aad5cb2898a3df3a2b2ff3a51d4305674f2078605afc2fba60b78d70d9e6d980c10e302292a081782f41651d693dc13e97fe4231016183d9cc1d84d1fea6fdc4801b29075d8295ac89e074ad94b5b7a753005fa4775315c9264a7d0815129fbc1927035e9c4e1fe29370e1faec226f7defe52fdb1d07a276c180e26be09f793c1e4d311a6a28e23c89faf72a1184e87e83b1b371f255d7a20ff913d1d13c078b55d59d0eda540c9fcd545852a6e508315f48e14bc4cc517164efc235cd23ee69fb08df006a6f794fd08c81d628d4f285d986a22a6eca459ebf0a8e6293a58c0fbadbf943efe7b6548621f2db20f0548876899efbea5e29ac807e611ed72e5f8170f80a80ac8cf3d2e9bb5616a887b4cb67663224bc2d532d3986c75a6273a5d0ec65e8345e89fdaee968cc21ca6f04e7d225e180ca4682869363f5a2f82039ed3828df5b09a82e3dd7060c3464179877c69cfce187d82b5b87ac04ac6de361dc64a5f619cbbe92799149ca251a42ad89a86172e26024ccacf948a83976823f65562cc237d6ccc51493e8c83a9f60ecce53be2dd3bdeea1e4a85efdddce0f30484f4c1772fd82a33a8911ac63de7f8a5310b8a0f96fdf84ed21078493beed8b38251bef3ead477053fbef8e1af3d2d46b6d0f0052723183d0d4721b1a5d76288b67a7cd077a29c052c554d788b8ca00cde60da3c31fc97b5a045b04d16ca2e6ee0b34191c7bcca79c2cbd477077ffe768eb6b2fb0e63682eb1cf8130d9a03d86adfb04ed894d57aa0eb207d3fe7404c86e20762a567aeba8ae8b8a9f78d07dd4e9e7e4384e88e230a85a4488038e67833d4c6de207635e10fdd3ea88f988e2cc84266bf94c85153578169366673aa56879361febca13dcadf36db372e0a711567c9c61d99bae0a39e4190cec2617e2e7e14065e9720a4191b212d15b8da02d0ea4d0cb966c0a0cf9fd11d787e44306c8ef6ad0b62a4f9a1f4992294cdf7e75c998699ad6745e1b9a84e0447374634937fa2b890ec80091998958dbcc2eca7583b0ce0c04017171d32189eb66caddd47f98ad4dfb423071ba74d6b77f3b64d7faad54fb2dda5afe80aeee092e38517cb9a326b5023f417b35a3de27873ff5cb1cd0b52c2dd60d493f1ac68f0f0b4384519891369bad00656aca293574fc178a60d1d182dabb7886aa20f96752b53fa4f69a3362c24f0091d864a8956128cf8b641373631b6008ab0cd648e858cbb390742f296d71b2089d462bc201a769f9f8d0531e9c9f21fafad055d317b671cbcb385914b8f975e8069376ccf233f047f631550007795bed80a866a0e3da34e719d056d499557339e981448a84d8a280d080b6355c09d8808c6a42c89096df446d38785e64013866c8c926ecfe6780bf800ce39605721a28132c29739d6fbc5108c221643a624a3f6740508127d0437ac20797393dff5274699d8320fc513846fb53417bd10b0424178998520fca4f7c7d59914f850e107d9d2324d4f0ec7066ba8e43af8ae63b339bbd0d3ccef8c89e9bd59f4cd7b12758c1dc1725f398540e994c8381ea0576813e084d4e7363b70f753dc9c544719d1d7a0916b4f27d248c89aa1ee828344c423f30df879c9569ee79972f6604085caced40aa23f9acc9189cedc2bb66011a30b602efd032525e75d4cbe5e2e3054c1499b3ec5cadad6c12ac846245a39bf112e9a3aa46e677b5e49aab45ef5c691295c98ef74285560ec1a28aa34cfc4ed0f6944beea6211bccc83118cc6be9d047e5d0bcc548e277942abaf4e5ed28c1079bf99cc13d049c544131f5b64a235c189badadfe3cf16f5e3e9d1ab3ea022c0af8f522500aa1057eacfb341d19700916e159360b4e8f9a1a234b0d529a3f2f5f229090cbaa6599ae7cd03c12f8a379d96b342ce46b3de455a0f9fcaf48be7515da41c63b9776c377129b858c2091ba44909a3775efad42fafadbe597a967c8faf161ac7942abd6cf1dc063abd6b99b39cf226f6363326904cf1d173d53ec866ce3c9dc6cfd416b5bf9043e10eaa3910bec3ce1391126bf60438654764946412541f0ca5c9c0ab9b0de6b38d2a494b0e86eefbecad423c56a65b2b2c4432adbdb8aaf0bfacc662d2717ed0df817cea22d01ea8a2ecf013ebf947cc9a5fcbf3af9ee4cd56e55c0bf7232cd13f3d38a1d096c4d2ee95cb078201fb99c51fc3a34c5648a442af7914c0495204f7f84a79196324354ceb20ef8a89c5c14431a51c2b92fb7c5e5d36e5c5904e6bf6349faf8e6de015251b1cbbfa15e804d2d4ccfdc7f3db71ebcbc372fde9f3a7404c8c85e4e452e1492f67de9ab8ad133518b0106dc9ea55b15dce75cb434521600cd4758b978e00c3fa52bed77c8ee558d02baa88f032ad19a03701becb37ba099fd1e5f0db2b025dd4977d24e58f19286a1b984913166e12266d74418b14ace04e45408d13b6603ba246721e3550b1f88996c1674659b67d77a70678e69d754a3a541412dbb43fcfa811f99e4962dfc348f34c6637c5cbc94d2921642d0512da29a68f45b1021b8763eb21c62e2a8de492b0cfb72d8740abfc1a820419235e70f7387a50893e5e82c955f343a9a6776e2c0c9d5279975debce474fbf3b6e7ec333cdb68f78cb2f82855e1ed03d6691aae6a869b84e5b3c47f4cc1d7852874a9c35003fa26e660c24e26a6be5bd88d06fcaf0cbf24bc9d0aaa2771c5956473e197e8a26825573982d28400485cd749ac13e87d6d7c7c150e16c0a51690f71bad11c37cbf4e744b28f143916521e2c2400d85ef6a3cd71a6680ecbdc5efc0144d4918b14cf562424273152134924bc39994b7b1e14a7eec91176233cba523b1854d64a86bcd3e8cd522d132b630a031212400e62b21e69f022b71b91cc50d1bc411c550fc5f6b6c877c246afffcdcb9f8a57f785186b2ef6f8451ff812cf40e1d682e389794e1f08f09aba9d65471c6e9881c42f4295cb030c58257865b5a61eec91f535831d11f53b2898aa3d0e58dea0fb8c1bc91c49c847020ba4e693a866ad28b2ecc66ad42370e37e0e3a2022916c9c948c54a60b9fbbf52956e35be0aabb155d2b4481763cc871dc79195818924fdcd0edfe5927a65968d327d8057d13c8ab3733309361e943a3dd7d21fb061655163886126a9c39a26e69cf231be6d5c713d51b5a56e191e65bcc5d0a8719c470b6b8298a4e4bd540c0d683811b1fd9a2a71b39509ff6ff14e0c89786e699c0011230c72e7aaee2dd550456b5d5db276ab391199c6b6c5eabf2fa5f0a8597290582ffc0cfb2a786dd001aba2bc200f2942f0dfbe7d0b371071f1f1afad2233bdae5c1023ddc5b59e4dc50cfb071b54ccdcdc27f7a2c557bfc67d82a3a128bcc0918fa265007ebaa17a13fdaeb903ed2c5ec23e164084da3a7ee30b7302c71b689c1f08ec622007d1ad486873648340910d8ddd83becd6f17aec1d411675ab60aaaa3b2ace0e216d088a42bc91af4ca92a419ceef6aa1b58e2fd4ef18c7c63b8c7c44bbdb60e02d0e85a58ce3686147fccb71560b69f4f875662eba1f992e753298c54f3267695e17e748c8e085dc6d4da6efe5b96e79251e22718af46b86c62533acfbdb7c7f1e084736f1a4e1e096aec034e741b612a51c971cbf8b251834daae753bd6d6e6c4f6015ec00b1ce4cdf6d7fb38160fcd53dfbcf8dd23834f4f6e95b5f84d4f02c9aa1ac6763bde1ffee2044d48182f163202f52de1070bc1154ab882b440dd2d8f4bd0e2eeecd029b185e0b574626563ae65a48b79814b9b1e3a80ba54ec70f7964dbe6a68f5b675735d622caace0dbb5e996b43478ddd36924f20516dc79f1b456b7a0e90e7ca906cae562fc551db637b8a16cfa7826d1548e2148281eaa3f2b97f1d423e1c585dfc8b143f1f9f9d6c2b3a60d37c986f42149946485114c2684435bdaaf1daa416a5002229c4b84f9a0aba95d44d0bbdd9dfd919ddf5093247a38b6c340d6ae5cb03080cf80c003a3e8a88ecb8c5c3262868f51043e4961098aba130298c5d38bf6aea977177fe3bc4879f171bb0a0205163577020a2f177921e912f5bb705227ca7796d2013695489514024e8e3a17385944e5552dad58cf41a87430f9d3e4bab595272610afaa9e53ecc8f52c13a64c77e66d29def0c15493bf269b7443554f6834479573e63df576ab22e1da7b50e14142151e05d083e357bdf808355d0fd2cca1fb69ab3b33ae34272f337c430134e305a0172e755dd08c0068ee629cb65ec1531312ce8ae7e66f6a8b0bd4ebd86f4c65ac8e2638eedb1d68a16835b572c77940a2378db4c13f667f079cc513c285ef4205d129e6f252afc705831a4851f2739265894a732cf7f4099c91a1e8bcdbe05a69a3c47d6251e6da7857ab71c625091f108f13f2cbdea78395358773084700c0b609267ef38d655cbb9c461c85b6ca4c4091b71b60ce8681493c0c059908ab49002c68cc138d4701c7eb1ee812e45b11499653cb08b85b5eb1fe26f0575114eb18a26c4feb8068053a184af94bee83adcc1e352f14f8d246435dc86e8448f6ec20205a7bfb1a9930f7fee86c8dddf09bb0ed911411cdb9cf6d6999dd4a2bf34420f60125315203cc8d886dc5bf591e544c7a40b59fe4e7f92dcf98cd35925a2c1599d8b1eb891f64f5736868d9cd9996ca4de46959231a5240c1c502bce94ef9983bd7f33efc2305e129e9a40dcbafc0b44f210abecf9f224688df093c2777a80b6c2e8d442affcb85f6df018648ba7e68d1ba662104d10895b9a5257d28f80598e2e6e2ad90f06915fa1937f8d77954655b0b7a213a259f061ee6a0c1b890d76d17c1df4a21129f435e83e08ec0c7d923f0d4babb6da391161f41c689882ae8abda020442e98cbaa64767afdec92c6b0d133be8729c4cb1285604d1dd65d25dc20e3ae0b331e00e11f0036cbd776c19aebe3db783d8abb06e3967fda441e5dd59e02557279b9f519ae30310d5161215c507392bb599e68b847ed19b67f62918b49f7b271d12c45cdb0302e80064709c49684e412756f86cdafeb48cc206fd8930720682d96671fd76e14f202294c5685a26a5832db7c93ee709407a089027541d5f545c2fe594cd4901ec66fc1f5597a2373c971d15637facc7d21b249c687be922ea5589ce2312581d41330243ea651e3314b1c269375858dce37de6db3c256a05371ba6044d12179f726d183973a1177deb1379d1a1131b53755d37bdf95c801d2c8e260807eddaae79832ac043335ad2025b0303a5fb3ae0a7fc99d1f00f2e7292dbb7d81f18c754919edf9d93a92f11f80825183d06186dc423729c2a8c9d029bfa8c0b2c3d4579ecc0eac43f207a34338aed0a60dc933aaabb172777f046ca7ac306be940106166f083c96b76f5e3b6711098f7378f1ba19217f90d90fba10031b6a9eb69ab12b98fc2a711dd0985328dbc5a9b813c688f128a365e8d661a7c7e8bd76503f21a606cb3355ccea6dd31ee78f7f763564968c50e4f52b4c094a58dceb5e66878e75e4fefac98fcabe191b4dadef6b55d966045b4bd90219ba83bf5d0a21a555a02ccb0ec77221152da1b8d72f5946ad74f8188cb49733910ad2692c24a67d1535d9926c18ce2c1b0ac03655f908123a64c309b9a8cd1539442772da3eb5ec54c6818f4b82a2028c19ffa5177aeb7bd9825369ba985d80b0e5674d062d32c25e7ea795cd298924d92c69bba40a4ef995df1541f1f9f33471e696fb14c82bf890accecb59c9962dfc83874e37b360cbf0f63aaa9db693bb3d02132c7180a45964b7f2600e55e649257c0d1e6ccc58d647840b6cd9ac8728faee957e756c35ab17b1caedef86fc0a096bf662d1d6317c0986fbbd120eeadff740a04a442ec795c29c4e3af9a713d4fe21d69b017eb9efc6cfec15b07f5b213163a5968087205fd3bd78d4fda38441aca9cf0d404ca8118f1fd5836a8185f2e1d2290c3635729093748b9f8678a27211c187c6f07a738840b6b781a97d35f98e08eb6c639c90d4b6cb614b1675e8cc07522a4235f26eefca5a26ad58f1c9b3320fa9fde3d738912244160f6eaab2735e8d80cbe5e831f4202eac4dca05a78327e0c2e9476d22e79525b9f4323f2455ff5c7a9fd44e917257397a168e0a1f125a1bf954daab5b2da8b5eeb2ddd1026f073d57874942766df53c565d4869f3bfec9a244d86346289d84f85cf57f28e46697fd39c765a36d197055b8100ba965aaaa2746dc1863feddaac34596e5aa150bcc66f3e1668daf689e21d9c5650f17917a07631fda4de5358966ed90168c4499e8ae8289b52407c4a4b0188381b9ac49b94d8ca0892689e6bc701cc7001de8d70d56c6d083bd81c681e1d4644072790abf79e39e14196bda893569f22bd5fdbac9fdfa5d0b7b9dbce91f5bac4f69d78466dc9261c94e427d1991864a0b5a5105bab1087ec50356c3dcc54a5af6d97e41f159bd468219f9e813e6c407f5ddc543db6792f2c8391d8dcffc6f2324621273f759d8b5e80420eb18f8380b497fd78f71e0fc8b93162ffc892d8ab404818dd2b20f1188a998c3e2eff2d8afd500313de03d90a08fa3fc4ff26f5c6fd1e5c4165ff32e56ba0ec2ac4740ea78ee69194303d2948543ce5182ed442ce9c7c5bf39c90313c326f719c117c2a9f394c2ac0aa20b08643321f599489b99f3ec7b9c862bcf2c99ab569b47fd4e2b2e730cdefd83d66d86a866407da707aa40e6bcd6d8862c59d9caf5053769cd6eb7ffa27d8c07dcb916ba871ea877f9d615d987dba7ba5bd17e7fbe7062b08a430872d2d378c8c18228aec1ef4f6708e0538a82dd2a2cbe6bc90a500412e2d6dedb8a825a26834355c6dd6e72ce7602b54412f8c16d3a2ad197bde9790300039e7084afc78653733b5c838b844ea2193fb79b1ebbcef9b5c9d879d3fd9b245385c4a5c20c15602883c04adcb06f911b2478b19e0dd3089190d61131dafdc1707cb00d49740d427a62dc00f9ef9bd6ef37af3e6077be80e2e8f2ba40e087a9d7cc6c4eacbac22374d58c9e7c081944b65f9813f6d30a4bc90305e676c0b8347a2d2f3ae1557ad13afac18c5c00572cce9e070cabeb5907291a37882209f701337f2992006991550bdc730e65641893bb29d606136cf431727a8f13abc84fed9c0d77331fb203fdb12cb437290c24c830cc28116662aeec98910d53e23f6213dc813a4470d96296a56a2a94e5ee5cdb916d133307375ff1a3ea28e3b243bd518e252b5fbbc76ed65cbeaade086ccfc2902f298e7d77ef6dcbd50c2cd20558481b16ff879e0e37fd896f7df6d0e132b433478635db323f166ac224e432a6846a785daa59cd8f5c6b42533c71338267ae1a107a6449e7e28984e132de773b2d569da5fd5b1158e331b34c8a5579d18c8bed0d664ec5108a002ff8fcba71301663fca6d81b98c4d015e3ba17c9cd80b54e6cfccb51286c581a4d86ad9c3651b532a5a3abd2e20a7a4553f0cc0a21ea866fa76363bc02e630dc06be8a8ca3e5886445f7933d2769790bac88db6a10983ce096314e48fb0204cde030adabcabccc9961637c93942c1aa62d4f4c1d27a6707e2f11dd88481aeaac29d055ed8b57ae9abba08bbc269b703a0b79910b085ae6bf6283d079bd013e2b283b4207d8b5bffb9118c2c93ff6c350b6e9e111b49cb7350cb07bd2fef4ad2c449c04a5734c73d3c370368c2b4ae0e89680eded5191877670333063260a040d2f2af3444b8082e2131343eba108f590164c19b7de9c11d4992419e6afb785851f49fa424f23a8ca98285b27bef961e2a8ccace4be7dc00ef279ce8ac8db28db8e889b9ef3d67131754f437b829b8068dda1be47efbf48ae0d3a2886fbd70af764a70d0f2923a0f1ae213b84ad29bc912f8a4639bb854e525e43550d3f85e7a112d547ed5339821d441f3573b1e56ce8777f27a37d0865cea01466cdcc582302dfcc280ec306b5d69970a8ed8cf46e44c1fc19caeebd0d070159237f8f53d328a851722f1d45b65900bea1b4e34e0b4182c1bc87634bbd8846d87ec9ff95183b5a0d49bc6c814e51cfc26b5257589bf422d9988362b88438c1e07fbeff3ea98d06ab04e1a776c4142668b106272e57d39cded24872335936d04cb708a10ef4b18a8b48c6c66c81309688e3d4345f6db685c7c17a127299e56c1ed1962e950c74569d73dceb47ac9d5c7ae18a3dc68429f7aa95a0333bd256ebe51da845a7ff444218f18ec9c5f7c8205b021ac1dfe67679d360490dd35a69634b6ae608ccd2cbe4ebe0a1d7c6787f62c2d3879eac59c8178a226fbac919232789746fc145c9a7bc3a65f33901c1620361afcbfd2a9c0b94266290fd9ad65387d07c53f55a611f9bfda39d7bf6ee57d7bfe8ff8d38a5063910c1bb8278b06f1d94f38d6f655fc38c9f190afd057c70a1fb1fb75730e61e64f2f98b01f4bd2bcfcfbd0644d40d8d92d454e0fcf3b928f5549bdf61ebdea0bcde831cd46943987fa4e0baba79f9bf57096d6d60f4410ee2403e228d3e45a4f02915e587a4e063f2a77cb873c1d6ad6a883ad05bb0d14b93af05677a8a57b4d2d27f84b98bceead841325233ebd7f19b21107141c403be7dc595d84e3e23dd7131d20e951c9072b2bdedb07a94850ce6f6bf6f4f7249a99ba9cef6d14f66c1b54666a028ebe57e8278309c9cdb8beba43ce16bac3f989413af5c899d1ccf434c40cfcc87df120a9367bf62dc9779dc82616114612180cbeecff2519d12371f19c24c21b0b3e6e74ef331e55b486f655f641bcf9b993951221cdbab01af9e95355252bb78dcd2b5d9e910add88a9e1cb183b3947230e7de8af4fabd39c97aa9e64c963b582129cd3c34ae04d07536581f567f3031a9f0189c039f604a6eaae398032ec2cad6bbbf97c9d77748ba8f6cc0e672c60d5492e44ac7e518c592d114bd51a240ef901c9ce595824ad0c2f0103a607078dc1a113e9b77c3ab9ad6dc4682344322cecf4e79b1e4642e4f4126247748a0a340f56e540d4a864a118a279002e3a1c30d7cfac23178a0cf4a23cdd748b7c7e4ac4e23bfc75c58fdb0b665847686638a7df4be1103d25423770817d387ba01aeb35123c606110df988fd09b203216ca430073d054a946bc9aeac1edf448d39b557ade501aa313d7bee8d21fb54add20f96494174b06bbb2048bb9e97797c8e2135ce89a713823423d500b9e4dc796bccd725a7a387c5c19ee585c8c9e1324f85e2c983041c7613a1d955c5e0b054dce21267c8d34078b2740ddc2d9100c92f2cb0479f42cc7ce6c12978ef63556ba7997f3e371abeb7f6b1e0d8fb53a5eae758baa250e4241fdf1b6365f7e1471e7a5c5fe70259152f7a209edacf58ce840ce7e7dcd09b36e6c4cd7cbc84c1980f5789643096b50c043f24a4df7fcd161ad0ce1714bc2240f066c0ec0ef556de623bb9b136615851c9ba33499c6efae755674d74e78b8d59715e74551ce45925db73d38916b116b3926a78a2e121fb44cdc5bd8423823c413b99e25bffbde78d9d06b5d19c9aa4c576b8bbdf73b41fb4c03c7467391b1cce7f014cc73dc4684f796176cb3f3160eaaa8c1917f42af057a42cd0e8831807b84e594d6745c226add56976d3b2bfdfdea5efbbccb8e266376b76be430fafde4d69844eceae99d7f4c97272f9e59061d23e7f67374fadfab38f847b76ecb23f335c4f8b67b728460798f5e1f9483d00e2c42410ca0c9bb9cbcac6262a1a84a8bdd123ab5c581d5f4ed509b57bec6fb8b6b8add5cd631cd016ba76dbd27b7fb4e5d2b6a5caeb213207f0fd5e6382fe5dfbb0ebefb39b195c9d76d38f9c47bfea43302e28b8d459c81ce05cff306d97938653671c506a90c19f2ab440965a8ff39ca30350f2a4a376d9477d1f3e662a9e14ec918d17e373286466a6e23a613ac5bce4ac62e4349d80356733b1e60ed5ffbab6e75172cd190a02029834a7e6b9e64e72b2928756985b73e26023e92da74c4e04e90a2ac774db3d22079f586b89ce9c8be07d3463a2754962c0d0f733111ac3ab1411cf0b30cb55a2637736d4a5a9a44b32139a358e00286ec22a69d5ec06843e9cb332903b20b9b52d8335548e5e0dd8d3f64674534db0ad6b466cc8788516cc631a4ae0245b3405662e63602e0b4bb24f7e8217e6c0d924f99f2fba905e78e128b7e3a3cb8993f93c411fdce0c765202d5a56cbc7ef1ea3b9e4b724c537f95ffcacbf44cf8926c0b18aed805d009b922e1dc7db956481a245a44fbbcd9cd3a678134dfafe677e3dde024ef90c34ae863dfc4ec56ceb560be330a6d068edb2f62fd926ea78d282774fc9ab190442c7b8cbbebf7eb85c39b68705c0261ee4b808620327ebfa854fea9537e1edfc989db2e4e8083610241083ea0ca0969e2843750c67ea9124b2ee9061a833fb9da81360685de870914a9e645f4bc4dc2c41303624b486d87ea24de4440efd4a9435a0f30434391b03cf20044826948a78bb25024560d25b00b98c0761d90a6f1046eefa03308e08040032accc778336248b2e752f440a8d291584f660e62b5aebcaf258db2e722663102c80998b167094ecb7ce0aed3e583a33752dd66a6d870262866a6a95fc99ecbda3268d2d11bf3ba255f9c94879e7d6a1a9f126f5192460802003af9d3a41b4778177e4052e8a6627debfc0e7a7d99108c1c0ef8481f5b866a7ce26e052085f4195dbb1ac9e425c31bb2e9983f9a6df8967844a44bcc8b3d5eac4e03ab89ab49638c97d103a6dc076eaefe0c9e8a49283676c83084f6172247b1755a6a43004585e2aaf627eb7700c2ec2cbbb63b0e4b366888389e391e398cba2106bd23071cc56d0835bed3f0ce0747d26584f9c1a652dedf594aa9b0e650a3d8638e186fd65ff16c534d539798e2d14a1ff75e3a0f20190401918c4c09e5df0d48a630c0d859768d26d4aaaaaf6668ca728295d980faf58da76cf27b30203a74df8cff3f1900a99c0b0ce516591d247a2639112f0c0557e0a1c455d95140aeda31c93bc79de9692c1ca409e8d931965701e56ae3c5416a9b224f57c39bcdd3fae71f6c7590c747448d61897557d2dc634acc249d95492f5292a5b02c57d83a807b4a46ca670faca8e9b29a4da116f4239245adaacc429a6541883b45d28f8578232933e8d885acf49b7b3cf228301594b998c3d7b7de19ddc5d0c6dcae961bc86ba75eae2d135fd53eeab36268644b60d293134a3420fed541bb63ac414bc24720848fb692fa80c5fff3f4a511959ecff92762a8f2582355bc82871f1211de99114bec99262cc2a7bfba298c2fa0a70291c2bb6f74bcec2c54fd55ffea77695c10177d7e3a4c2f8efa2707971cc67d8bd89c33c337c3633d9cdcecc957f705f4a27e8ccb3b0c824840280f4cea1d51a6f4e88c85be4b9e6452611ceeb930ef476f1bc5ef6dfe6b5a4c914f3f965ebd385d9a82791bc559c3a533f191fe7588c34b635554f57c814c3a1582622539bf82b3d055147caadceb6fe1fa1002a445944196fc72c08c7901803d72f91675e4720e631207767687cc114be9b2c1a9bf59a18afdc0971f3c659126f4bb31dda0f5a01494454bf912c352b24f2586e26ce767cd2019945edb161cf0e0f98206e258d3021b0fb7a8d3a2d1acacee835735efb57084d1f1fc8757f4b369dda12fe9ba900c10a08ae48c10a08a008018509a4882129244061022c244831022812582182c4e2d4d90db1a749c42511d3cee5e9206c074783e0a5523a9f0f62bbdd50386a6c7fddfc0955e9534a7461ce924f1377e3809bf2edb7fe13ca5953413e238ff3ae4eccdc5ddf9485184ccd7fed4c13e375ef423e259444f7bf8b2829c0d343b09dd863bc3f55ed70d2b2facf055cb682867c40afa178d0a06c886a3c0c2d6b2134b8f0fcfd7b420825c795adc8a4692e41fbdf62155a2e6ff95c7be14f14fc48c08c0bc50920226555c135b53454e5c32eafb1b80b0744db47bc0ea63c4bb3a3b907fbe6331bcc283ed12a5bcfd624a3ac1cd6dbd4ad686570febafb83e7956f40160f6dea1c1e67923c3f4f8c3d764d269a466295e2e57ab1bbbb06c05bf609d18a07c15567efa7a15daeb15d8b8af1f8d082287489a8e90486f4875be19a6574ca696a4714a95659ea847bfdf5a8cde4349c4d33d031a07c94968cc66d958992c3f298189ed6c05beb65622c2f2ec840c26b0931d20f545aa2ef3214c6de1456e3a58f8beb712358090834284e111731af09aaccb367008629eae4d1d8da697600884772704f72c436f05eda10759e58f1243e09d4e22c6b22d3ab3de2eb79a97a530aa4e489883bf580229effc6409987120c0f8887fb97b7b1558187a6d40204375e9623c6ff34b360d7cf92993c6ace11677a42b0959599bdac811c30b653e9802724bcd99204f40a2fbd7320ebd8d981a74ab5b551a3d65965fac80e72d543988f213d398e0ec3f7e11728ec806cbd806bca068cb2eb988f6d1508b5852d2921d2eadc4db6d310b75fc1a19779d6d9f444c79f0c8dba696b0a39baec5b26eb12e2209c371174864cf3a78b594caa93128081e94e578da6e52c41bbba03e37a8aa060009076bac55eb5b4951f2ff636f8b769f263fc3b75840ce1934ecf70500e108acd0225836f04768cbe24e974c19fa7d33590590e4e17b85816c8e6f892e95ad1e987a0e2d8198401d4120de8781b1d100636000e942667621ab26ca271290cdba7f71c743d04d9f44f291f24911230986164d3b929ba76d1f73bb2bf78687c8a9211091b5a3d2592b8457aa109b0d5749f9990041be0933021b0df2ee98ce9ce53b17aa05c8f42062fdccae108ea638132625a4d67f9e448a29672a4908f22400c4dfed8170a848c7372417ae1bc335417bdc701fecb348af464e70c38a190bf43d77c05f3d10473021a0a9418b192db8d879b4fb87b35769b9098184084a296ad7c82cbf6ff547a5db6b62ca47c6e9f4207dc2a05cbbf3f2d2c80f06aec4f4dd036b530af44a27be5b60f1d95587f37aad1f38945f5b61fbaaad87f6e54a3eb1b8bea561fba2a31d831dd123641ff37b62a370ae815c30711c721daff272673d0ab4be724453ba71fa404daf9dace0f931a1cf9045ad877c8531040cd381dac76f33cd5b18fccb3b6ac35244df293f11ee7f9ba9a6029098b41e2e8b965b35b2ac396724726d44892dd64e5d2162319007ff1ecc1af1e38b326ff81ffd876b3a98f0188f8e99166fccafb2405200c4c0c210c5d97f1fbe44f3aff522cb338e7d5529bf9a831d32a82a0f56a1c85696fc5da9f6c87f0645d2ba967315ed9a1f946d90e69891161f9fdc48842496be1fb14cb288e11fd678d624431220c7e063fb36244312231624c628070550d8e72558c083f96f45b7d12df4fa2f5e87229939893147cb80ae5aa18d10ab3babb8336492187498a2cba5bf02cdd31f8184b2dc280a28c29a208832fe7c4aba954d582a0f55ef83ebd57c24c1c80b5cff75a96b9deabb6be364071fdc4a44a5ff931fefcca8f67b5b0d7e8e5c758beee674c65b9d2d727c9ffdeab76c8fbb08529fefa3120795f7369c1f23d4e5d9be5f759ef87044431048a2c50882905297dc0061c1a98e18931cd796e878aa0e152adf737f46967f9411f61250bd01c29cda1812f5425149cd8c3e4c41add754ec36881852c2bd710110d2c23a22bee4529897777277434f40a8a61d56a0509bd808c565b8a8e5e5c56429d0f59d1c416f33f2a638e5d56a94ac5441335a5154a28dc4965d1bf959a8beb3e26fac3b4d8a218bd9fe436cfcf5e4ab39486fd69ddd357b59d57eaf573a6d7ffca4a5d3afea4fb2d0f2538fffb8b711197576e54bd6ca1ddfdb58909236ced2ea9d05d4aa1bb839450e82e05a0bb7442c984384c1798a3913c097bbd383cdf3afe3e296122063fb3c0cfac229a29f2e65ad04a9c5f8f96582048b71881072cc1e58423598ab49506e8411384c687013994fef8c18d369a7335e7cf5fb16806f53ce9ba6e86e595ce515de755ae389b27381e0dc75c43bdeb5c7e7646fbeab286fe30b7797eea419a8b6a5288c34f63a008d29a0463d6a6866be2393a1d92ccddb5d99356ae88c2a2f8c9d4db6c41ef3e1c5d9dc76cc6958a5fe8393af37ef85856e1bcb3a989d9bf81e2ee068a9dce796d0c536ef462164b98cdf848353efe2c4bd0e7f7b3fbb3fc329b1df530ff2b3f4bd97f3f1dcb5928859c7b9cedd07f30598f2878bfc80ecd7a34d6ec2d8a7ddfb243afd7eb3586dfb4607ea954b85a8a55aa218ca0311551c655fe0310455020862290e84e92c43ff9d8a591b8aae59493542dbf72ed53f28a32d7fe25af287f262fcd444417a1bc34b759fa570adecf3fe783547e3f5816e17f2bf5683492b9250395567e5cf891a4ebf1fdf9749cb21ee5faadd5106874f71059ba63960811224570971f2244881479cdafe2f802bd1a7e7d9f677d2c040bdc08bb1f7aae925e99847f5cdf277fc629df4856e9ac5f34de1765f83cffafc4ce8d557a8c3aea653263c74b32bd96ded025161dd3f9f5b14bcff27e97a32376feb38ccad2a35f28023947b31509897e4f732e661dfc59f6f94f735cdffbeb195bd0b174d475eca0bdb13affeff7ad9cc3572639cd5624ff2cd6c3764673fcb3eca818cbb99815fae1e236bb7336df2733f518751ccf58aca146de4cf183347bccfe2c3b77a90cdda6c39dcf47d53ce1c9f19c47921f6c7c2ccee1f031c64f65955a59b5e37dd106f78c3fcb8feb9274734a49c3553ee8e3f749118b9e6456d1c6dc0e1dfd153f2aadccfaadf9f7f347ff83c97af41f8ce6fb499595ed10d0984330e8da9957e51b615965ca2a76683e8c5e0fc3c416cd66c1489ecd5265f4ef27d956b643f985b21dc2ff566615ad956c87f05165815eed35eb633056c0c0e0285765fc1300afcfb602e1c555423f5c661ee8d59c8c219d61270de0e9b6d51570c41c5e2de504f46a4e7460373a39de74d2f339f2779e6742c989f5e8dceca4523f3cc0f44368fac1c90f35dd75ee394ef2ae4b5522e76c3e8bded7de9930a9710ce2bff2f37eba83c922064a143187cf83b2fa944fa25926d1fbf385e5f7c91796332f4bd8ebbd6b63b34c694e18c06a578d73ca113fadd91bfe15c3d7f751a45c1fccded7f077be57455bab7c8d357b431f884a301edce081890494a9d25d001f0c90809ad20b4a30e8520a4a5c94ba40c00e4152c080ee66b509012274d7710f01f470002e4d6408d00e70e0cb6511bcc10e33310c9043dba08424890a44e9ee239c74770f0a50c0f5c95448007fbf56ea5272fc05af52ec374e2589d7ff784c3a1cd1a5559756257497363da8d8408a0db050812a1468d2dd61bac17451b71601a060ca8184ff60300a039af2efce1311079c93f452a970887158c184c38643dbb8ca51fe8afdad71d1ec4f887b44efb5601aae300da5acd26b6759945f08d21b4a42dd30c8da34cee4df8cafa8f1d11d43fbd46ce39fb6be9ede4c33d3411a34d9f872f7338d597bc31776e56fa9a64a955481561531c40b6495fac043c986bff3a3332fc994f5683c5c107ec6489efdb0c76236b4aeca5a7d4864879ecb8fffcaf191fcda9857c518a6339773f971aed6739ca9a388102142a44895fed7c6be8a8f8fe463be74facfb2749bfd33bd345f8560c25e384ce505dae28aee29babbc80e6dba47e0cbe577825f36df3aa69eb1fc8814a9d6e230b47756eb19cbff3c4beaa3fcee2711795d7549853e6c6042824b6861430bdd611d9f28630fd4dd2fe0e00204c29e571f60871c70b04074cb8cc194190e78f0f1507a2fdad7124fc94acddd7dc7901b1c0e27e57dd2f74910bb0092d15dc369139843f77d4c6d777f69d3a786094cf5143faff657ce2af1e566de958f87d0be7670a443f5824251f1a1e236b5496b72ecb3a8547926a3469728c73e8bbb7bf58283d7267dbd6cfe0bc993373b0dc408a3832a78a0e213fbef353fdb4102a84cf1265eff7bbde667856edc2748094c544cffb4d5c71c824f7368400f3437ccc67478f4c96fdda229efab542a95bc413c3c3c3c3c3c3c3b3b3b3b3b3b3b3b3b3b3a3a3a3a3a3a3a3a3a3a3939393939393939393938383838383838383838373937393739373937393739373937393739373951a2448912254a942851a2f4f4f4f4f4f4f4f4f4f4408102050a142850a04081f2e4c993274f9e3c79f2e4090f0f0f0f0f0f0f0f0fcfcecececececececece8e8e8e8e8e8e8e8e8e8e4e4e4e4e4e4e4e4e4e4e0e0e0e0e0e0e0e0ececdcdcdcdcdcdcdcdcd0d4e942851a2448912254a94283d3d3d3d3d3d3d3d3d3d50a0408102050a142850a03c79f2e4c993274f9e3c79c2c3c3c3c3c3c3c3c3c3b3b3b3b3b3b3b3b3b3b3a3a3a3a3a3a3a3a3a3a393939393939393939393838383838383838383738313a507ca139e1d9d1c1ca14fe66b4190d5dda8ee1ada440032f2be138fdeaf2f1bbf4a048a20ad548866bfaf528fe667affcfc0793b37e580bca6d64219a2c302ab24378ac52c8dad1dd7574371d41d5d223a34c419ae5fe28b3d8a1ee9ea3bbe5e86e32124f2f28e8c7fa945540f71f7c411157ebe11c8af2a3324956d1ffca87e55945c714bf688357eaaefa37d81bcb22ccad8c30c7f3b333afd6effc2cff7afe307f3af14fc7f42b4d86d8eb3f0c8f55826eadebfe283fffe48a0622ba3b8eee564177c3d1261ac237de66afd47f96afad62a531cfb6fe549aad48f5c886663f774ec67d8de3ff983f68b18f32749b7dccf4a33059597fc52cc62af5efa3b4f959ce2c4a98cff2a530975ea56339036dc6a2c7519f2f1f263f6e62298ea963fc77ce2aa3f893a20defd95614b8d1dd6d7cb5430fda56779fa0bbd9e8ee354ce0fa124403abf5020a0222fa190a7a1911191905bd82b21c011109bdb8ac8454aafa48d6f59f38ad508f0ff89995f1cb2890cc7e93f595ca6ad08f16216c6ababbe5f87dc9892326f0e546d80b63197a3296a2628a5c65a520bd1ea6d341b13efe4993ec0d1dc49286ed1559a5f95ad8ca4539abbd1fa14a38635a0baa4077237184acd269bd077aa34f6669a5bb8d28a25be2e9fdad32da9aff6587e7c9ce1993f5e1e81a1f8bcde1783d952a4767b256935a2d7f71b61fcef66bed3c115b435e6128552a7945f9c222ae8fabf55e21f6e807138b54aa28383c3c39171bc1d0ab237b9f86b14a5d5dd1f32ccdeb8a2651a09e1c9ca3f9356a650812457874ab54af10cb300391eeb04d4540f87299c27a72705ef87ef8fdac270747246283e8a7bb251081eeefde20cf7edf8aa885a11eda0126a13c846ce396ac528ca9507743000c8b872c7c64b9d29f05087cb95996a193318701537f52cf5834729ceb83b2c73ecb32f4ef5ba954dd2ddc202824e980880e80e86ea23661b1028b0e8b8b2f393a2ea547ecc2b279c29373654890d7153157c474d76e1660ba72d3b9521bbee6d768cca3f9c159a6573ab962a4bb47987f087ece32062762e88901855c1fff2d1233b63b492a95cf0ccb2b323ccaef67a5d9f959ebbdc43b9b55be40b1ce2b69a15893604d82393a62d007ad230225a05b30fb559941a0c765beac9fa557edcf64a9542bc0daf4b3c30b3f2574370da175e5fa4941f872b9f6af0ff1d39b49286a575d3aa3390d926d0431bacbd6de6efac3ced83cf0d94698f259cc744bc5e16e0028a3db5595662b169afdbe9f9677ad159a9cb940f9aef97fe7fcd94408da4200c11602c8727d9fb43070c47ead3f282bf749dad73c56b95f24da9113ede8363b379f4ad0a318fcccc28f244121e7baae26859cbb7496b1a380584e6739e36ef4542a57d7728e3a873d1aa3de87d8abd31e1fd529abd8fb34542a24abcc7af41408dba17ac52af313ed96249ae59368966c87bccaca7608c993b0ef933fdb9127ddfde2674309aa541b0864ba7b7c24d567f9b5181b08f25a2745415b90309b911a74588f32f5e817d68296f466a427685259f47dd26709ad8b02db0722e87ed0b65e325ac35f9772e33e2e656e1f5ce96efcc190de8a8cd1f767732b62ea16e75fafd4a3329b6523a20668af15fc65ba558eb10cbb1b490bbd0db1621b72b721ae6d4808db1023aff768d27d8c2f87f126c48c10383621676c42aa6857bd6a125f54a6f3efcf2349d7260409e3f9ca9582f58ab04fa29f7c85ff482f9a9ce5184dce72486718949f5ff9f195ff799f9c334fb4b952f0951f6f41bc70104488090f26da84879836e101469bf040a34d789869d30b82b4e90545daf40215ba7b84108c14314d61469ba620d3a629f668131545da44850a6da282499ba850b5890aa0365181a54d547419e2c1c9c81e3b7ddae3a74f7b1cf5698f2e7dda43d6dd48908c00c2b6248fde9604d25bcd91de6a52e8ad86a7b71a2cbdd574bdd580201cd9909051a24d6436d026325cb4894c1a6d22a382369129d3263279b4890c1f6d92e38336c9e16d9223a74d72fcb4490e579be4f0bafbc807206c320ce94d86117a938185de64c0e94d8629bdc9d0ea4d86ae37196e6f3228a037198a28128210103a2872ba83883eddb1449fee80a24f775cd1a73bc6e8d31d29e8d31d66fa74471e7dbac38f3e9909d2273347fa6426007d3293a44f6694f4c9cc4d9fcca8fa6426863e99e16242228484378adaf4c60c6d7ae3d5a637be36bd91002444402812421121dd46b6235f6f476ee8ed48027a3b1281de8e54a0b7234df476444c6f473cd0db9110f4766404bd1d71a3b72374f476c406bd2109d21b9223bd2109a1372426f486c47b4382ea0d094f08a7cd84c4c807438ca8a14d46886d326287361901449b8c10a24d4628d126239c68931162badb4808276c518cf416a584dea22ce92d8aaab7285b7a8b6243779f4c38726488112444b60f361308221441b26d9e18bd796cf4e6d1d19b77a6b709426f3300bd4d25bd4d9ddee6aab73980de2697dea6acb7697b9b0ae86d42a0b799446f338adee616bdcd347a9b6ef436e7e86dd2a0b7e7a0b73fd2db9bd0db33e9eda3f4f6acde5ef6f6b8b777406fbf446f0f456fdf81defe8bdefe8cdebe8ddede05bdfd99de62427a8b997a8badd05b0cd55b8c4a777750c48891234540e8e0f4810d6d0293d426300868131820da042689368181a24d603ad0263060b4094c09da04864c9bc0b8a04d606ad0a630346d0a83a44d615268531816da1446d5a63059da14c6d5a630af29fdba010add2640ce0052c7f7498f0c40a2e8ffd8cbe6172061ba7bbefe6380e4d00d4877e5061014ba1b9020af337a74f71938bafbbd6aeb98334a74371e1fe9b30f7d1fe95adacb74e673357e9de9697cc64d6786f4f885a1fc038feec67fa8d159befe18f387137f84e9ee3fc2fec34be3a7cd81923f44e86e0a66fc343fce74f77c65fb61931f54f831816e6ecc21188b89307c3f3f68ab4a0c2bf529aba4310234ce1041c98c253e7081a90f115099e293825850a1e257f46a363548f6852967833d079b97a3708d921c1c856bc6477a8db0d78be3e04baedf9a577a9802adac702ad5b79ceb3a8f59bc72eeabbdef02b23f5ca8ec6795222dffe8d9d6efa319fb5f79e47f9f86258de6454246579c0b9d93550a5aaf529f9e633a3f49629f554afb4d98f55ba9cc30eb58d264c67626afc7acbb748b3f994709a26495a22ea6f763d2f3e39fd19947ef833f0bef2cdf4b41978e29fdfca5ebba3c685b1d9ed7d22acb39ec7856eb75e59cdb5c2dbdbfc5b9a73763eab3de0fc7af267de9ba2efe6edd568f519a7c8f594f7db9be688347f363779573a157eb3dc6ef7d1ffd120444f4a352bdbeef695dbe743fada7eaba7c1f457d3265658e3f89ca56cecda75f184a7fd8b4787e4f6328c72eb1f4bcb21ccb2a0dc3a2b1cafc4c6174666f91cc126895edd027ce9f79ad6c87ecd0ac4772745d77a98cee1219dda531ba4b21e82e81a0244677298cee1218dda531dda52fba4b5e7497ba286d51f24077298bee1216a50e94aae82e51d15d9aa2bb24a6bb2445778903dda50d7497a2e82e41d15d7aa2bba4815213dda50c749730b0c475410ecdc92a9d2e28ea1996af2e66da6833aeee4e22800ebff2cbbe4f7a34664ac0b9a30eafc6c31d52a44270dec1a53f09b35f0c40f80dd31d225c0e670996a940991df25fafda1f6b05b35a5ab084127ca954aff96ab4208f24e3fda4f74a3df487d13051c4b9578ba96867a68e92575cf9b5569c7b90e6fad82545ba9676bfbeccbd3eb633cfaf6867700e6331cb2bc350ba38ed0d9d09932b695fc5393c2dbd1f3a28004cf3cb9c0239a78103acb25421c68e0ff47bd5da11841d353b5c8d73fd6a43d01e491b0a3c20076903f3bf3a78e8d0a2268538ec995691b3998e72544d4dcc36f19dcff2bbd61f7bfdd0bae60f613b345629342f518c288b44f8e9b599d5ba5f6944d90ec9aabd5b1ea42e9acd62ad6b4b4d0acdbf3677219728b5c8c82c81569385e4d92c5a7a84e8a18f58417777c9007184e98ee33b288e23dcf8484dbceb1e49e2ee556d77e57f9ee3fc32cf2cf7f1ca4a617246a35d8b3d878fe47dd27c8fe2f191f092873dc6bee5da49670f7359a51e056db5def8517ff1ba9c730ec7acbd45e36399787c2c766848c91b41ba3bdb4b63240513e8f9f3671563f3fb2badf7eaaf4c25106281920daa20811311684c508308125de386125deac38d1d7c3918e85d9c02bddafd17e8d5da98d924da3872823c4e20c5099c50728229affcf865a788e52b7bb2fe176b98ba6dae35542aaf0cbd5a283a3a38148fd8c40650f784d91bb22182898d0fba1b851d5f0ee7584d52a9708ed535a8690d9557eb4edcab685454735d39fbb9c9c9d1a149a3c9aafdac128c0950e86e262e7003c5b01b28c61d16a5f730bf32744cbde895b2b14a5f24c0407e2ca318e7f592e64f045f58bee69825bc4a1620327e37a1cc18a4797ca4f1915e2fbb6238823c46f0c60872709202bd1aedda17e8d54c686c818614ae7a85b57f599b1d852fe7610a3af130054d6738815fc4983a99799f3dcbeba14b997f5f26a337ff153f89e2fab19f2ecf28ea2eddfec386b04bf628ac85abfcadfbdfacf2cab7b0f885576c198d62952b99550b02baf2bf1f59a5d87e3f397ccf5bd50f89683f1482f7cabb68f2ce59a65baaa5acef933949a0c74588195c329659262bd77e28cb7dfa85ff5672ed8780f087e0bdf293defb494421e871a99f05e3f0085ffbc124a640f7ff0be975c9a4ffde28f646393a47d635e5077a5c3e265bf8317ecfdea210f4b8205d09decf724b08de2b332cafd0ec10c6e36399e3fd24eb610a74ad95697ff2b72605aa967e325bd727b304c22ff3aa4ca2d99395f5d3feccbfe1a7fda1fdd06a94dfcfa34981022975e92ed1d05d9a81a64a0ebae0f0e7ff7eb2846460494ac8164e56eda579ae3fcb0fda96731826e29957e5ad22240c0d63f3b128a48b0a1c9c3a981d181184083d82d0c0a6e68569120e0209721424280894d296211e6419abf42cda99a95beca88cc5017497b474978e4aaeee52ab341485490c54378cf64349849147a55fa5fe30bf9ff45a98c220a244022fa5d504c4b823553f8ac55976299fa43757eb3050bc9e6331749b2b7dcdfa1efe595ae2f307a318dbd0c7af62f04aff240b93723eeba71daa02f4f4ba7c56a952c140f1fe34807e54c62cf62e8b60f7b14f5af2b4710c1036cbfb999664280d7d9143a9a8bb443434851745ed755107eb4a175e17417aa8bb278b0b1dba7b5631f6d70917266c9187ab4611a652dd9ff2ba32bed4ca234997f7dac28aee9601681c4c8b20b89755af3296bcf28a520b026831a5dbabe2cd8eca987eb9f695c70367bacbf66b756e3fec2128337e90ba304da2d3e52c0df1c051cb2b4a0f8020cb02882c4c2d2c98e89e7959824e74929c78170b8105162d5c714437075eae0002bd9a04c10e98e0fbe4cfb4b1caaab6cae80dd19ef64356bcc00a1bacb8d2fe49162c7da03400d62771a6b22b3ff79654e9b987127cc592e8f87d96743e08521f7f96bdeb7c8b73d21fb49ecdce7de96a5248a50a47bd41804c777ff1e228d4a4dec5cbcf32950e012bba2150838077739e0c913cf9b9c4a19cdd2f72ce867b8ed6f816c68da6b30ba344c39684a985a9e16c6e587b5a03a6cc186e0333e3aa9d98d27cb4f93e59e39566c1c07025185373b107b8c1e5077cb13d60d6ddf67accdaea1285256dfe7d400958a4983a60ccdb2c69a693ca706e0e987577eaefe600d3587fe6da8070a337207ce0e6e7bf61e702e2a6fb3e16e7bbb606b88083ad7c7cbe4ffef88c2dc7d25d53a6801e9729355bc507fbf864fcd3c7c7a6660ae871a9d92a3f68f9610887478984570cb881012b30a0880f56f830830f443d9ce9818ceee6983c683193ffc66f6249fb9149b69509a69f185b713f2568abf7b36c9f7676881bbf4f5eebe327c360b7e2e6a33e29f4c3a587560f29f47064015c3487bfce53dccf98bdb3fc7d36d7821620bb9ba8b7059c1640b329404c734ff3f1f36cf6ce539cd00f171feb95b2bf8ec5fbb26a3fcfd28807301b0f2e6c0920334a211fbdef6e197a43c00d106003042881802bcd3d9633ef8aa1ffc73c95e2b2a433996843d1510700e300f500ddb643203bb060db81871d380388c000073080ab0067badbd55b019c284002b602dc703eca5095e2fe6332ac9fe27274fc615dcd57aeea3a4f7135385cb2dbd8705c0daa26874be70ff3efa35b9cc3aa2e25ab1eb3a1f4b0fc3e899a0f7ea61feb3a4cfddf0a27f4c3455e8bfd939e0e596c3a24a14312dd9dea4d87dbdd5a7ad301856eae3a4ae8874bd7798acbd61aa9bace539c4df89ecc72561393b8f5590bfb17a63ef7e9057d6c7a4447f58835531e364af06629da07696e53979b367589b5a98b03dad4a589367519a34d5dc8b4c90b095e7adad4a583ee7e7282099d04004d9a30b1a959a2e405175a602189afd04d23044d1012e8a68940370d04c2e0e0020e2578e0c31030dd340fe8a67140370d10db0d6b6c379041f343370d037c3072bb24021fba5aca61fa85a0ecbaf958c23c0445988fb6e55c17d28cefd399149b262ec5997813ff8fbdaaa5b8c6a97848f32bff27c526a4b9c6fba4f7a2e29eff73e2c4ffda2aa3a88cc5db759d73d44798a36c429a6b3c357e577e14a4d8a557f1a672b5b7eb5ce26aa92ca4d9a5f4ff58d78d9d5ffa33cb4248b37f52cc9726b1e09f459bbb4f76997e22e8ff315f795219cfa24dfa40771fd1dd493c6c4961bbcace68495c927e50af2a3db10e49b5e0cbddcfa19cf285639ba84454416c5781f6c6e48c96cfe8ee1e23ffe416b2095fce894a95ef27d5913d2ca67374300f3d8457f872d57a2a15e8d5ae19574cb7ab66a5488e0ac5165e793494a880b0465766002410951ea745506aa153502a75ab154a5a2bd76f85d4956732d156013fb3dea32c22dc32cadf6ae1ef5b55246a0aa018dd1cc64b6216fbf7c99ffb45dcf749af2685383cdd43eaaa49216ed6c7f7b1e8d2714833fe2439c3dcf858b8f191ae751658a0efa82aaf8dd1b073dd6439f899e52be7ea879fbdee97be579fcecff23e28d6a7592923e7d6671e458d554e56b543d90ecd073fb3f0f749b065ad2b94b4d67fb27569368bb5ae49abccbf34ac05014d3bcb2f94eb675c5976c81acd1a942b15925902d5a450b6432fa10d360446cb95825784fdc5df47913cfb613a7e3509be5eeffd6b146138a4f935ca309434fc02a9fcbc5c29885f7fc317ce8f5733a4d1a61960d0261a9c5859410909514c5c4e6de262d3262eb64d5c9e68139732dac485066d9ac18436cdd0d3a61988da3443529b669022080a08e0870a4b9460259bd258ea9a4b6f0420d2dd3d38dc4d2a5b7bb72f3e740d5f5018425303921724f0e55a43442cd7cfd0d1005e5c5642f4dab07949a13bc9bc2e7c391e0e87bb49a5b62e0be802e3f05fffd07ad7e696f7492f24e9bd1624f95be6cf4a3dfc46b952a1af75b30cdfa81ecd8c6d968c45a0f9343b6b100d2fcc80811984704d0a499fd5a450ad49210ea419bfc7beda4c674d9231971efb1e03b9ea2bfd1cb439db2aced9fb3750bfa2a54972f6b05182f8bdbf2eda916b5920bfb60609baf450f4faa15521617abdd26c966eb1db793fe969f68acf7ffd15bdc795765ea587627299c4589e84c82888061f5a5d71b951906b1564148307557157eb48c839e7aa75500e3927ffc67c56946c7d3ed69573588e5fb55d87453c5e7973b5de4f8a65d57ed2bbcef1e358ccfe94f7c12ace75f893a29dc1bbae731ddc7d7ca6c82c811c35552a47f94c51a962dfb7a68074d60aa5d0cb241577771cdcffa321683d5bbbae0bf2169cfb4250d6deab24d1ecc99f32e3cc399681063238d1dddc9225ae9af485c2b1bf7e6d752c67f76f48d559a678cc7452d1068a2b8be6fa322db24ae503e12f0b17ddaad76b8888d5320ad2f20a8a616544e46abd5e2a5790515190ee8d2805ddf8bf3046747f7ef6ca7d11e35ca950ae542894b4d657fa4df940b952a1bf4593057e6685d4057e66e54a85402abf1feb9a8f8f26eb7ea54d96278766996e11f26296e5cfd7dff0e7ebfbe42bcb946e2c69a1bdf59164fe095225c894203d4e926c583a1a187a04d04d13250a0d952952545168a03ce9a6e169a2c444a549544b81682cbdb1e0e8f92b1b83a32a753bd4835b594987d20a343b5daa8175b35da901506abbd275572a65d27115bf4a71b98fd72478e5c9159b2b226c31b0e12a9baf8d01278610ace0d1ed2a57b9eaf57904dd43bd59e1a11b7f92fa4c196167581121080e0e56100426f11c72f0f94ec5c773c8c16d6aecfd1b5e9872ff37c64119ddfda537200b7c4b6ec049e2d3e4d44344f363f47d140bed87a6fd69fd7cff5069055591a38b7a785a2dae662409febcd62bf51a8d65d1c2a47363be1fda1b3a0b3d3c18b3e0c92ffceb3d3cad96cfcf935b1283738f64932ac536e34bc51b7ac63f3f5be3e13f92db2ce41c87677525eedc2cf49f757f6dac669b1a7ffc492bce75b4ceb98cede70fabd87a348fd1ce06a552b96a3afcf773cc82985639f31e7459a5f893fe4be5998c7e8e65f5a77b3463fa7dab2931fb492e6d5c7f6bdca3a1cd2e33a69f773eae291df8f26fb50f5a4c633625f36c6bcd5671ee6fa03855a94b2db608539ae7997533f899aa825c4443ce61e7eea342e7baaee59c8dcc72daec78090e2558e31c0ea5102a8be1276d2aadc9d63920d6959fe851f97d622c943f533d2887bc0f5b2cebc2f37f287bd69529ae76c8dea7c1caf7c99f0e2e14aebb25cdce0c94188b6ef3b443aeadc707a7c7490f0a5b0f929e211b9440a0988152064a0bba43eb362fd4b813b779c1512fb82ab48e7ac16bdc5528ec5fba6c50ae80724477bbcaa6a6fb090b90ae94c9f015ca9f4debbd68351afbfb42ba125452820d342fb8f07dd2cbd882af2c5f342d548af4ba368f57528fc6be3eec93b94ada2751190bf83f961fc9a31997d428a131294d121a5fe171a5af1aaef7a0acdd2511d0a850caf103f46add342974d3a050c5ee124ea60900ed95690ddd3427f41f26d0944043c208746c397974778e33999f0da54773b3e5d8a6018008219c36130d129b1a7cb9da0ba7c6551e0dad47433bad10feb792bff5489ecd92afbdf2bf9fa02b56d149dda03b95b3a548e89c24b779564baf8ddd0fc58c6596369674d39c604311a0e79c2b3c9b0831418349b831d169d4fc3bebb7706ab3f1c3260dd0ab39ea0be87151a95c5df0ed6c2af51a97a0c42216956ab290aea5e51f729f94e3c0c42b445c25e382bceb06e093c2a0734c9864d13a13267e6dcce7635c93429f2df84923eb2aa2114273448b6d8991ae8ff1bf4eb029b182735592f9559430e12a25a3928a6b52488f35b617d6e86ee9d77ae98ced05256658ceb0bd3039d896ed05176470a10e20960b39896e2ef4747373c88c138cd956d0a3bfdaa1f955465bd95a239a34da580862636187eeeeba6940a0f98086486f2c4cd958b8a1f18046080ba676951191915190274992644b228224662411b32571a2672f941357cd77e2a8d7968407a742854a77a90034bd25094a72d3aeea2ed5cda9d83c039b03d1dd957a3547da9ccbe6425e63f3faa4ab72ed51ae5a61896e49c37fcd29b1a49f7c8953ba5af04d851eece4451b7ebebc4f1a4333c68b0ebc604b14344f341edddc63fb795ecb18bfe789f8c55ce59532f7d0da2d448a70f8f3fc7a96568814e1b0cc72baa45d09cebcfab01af34f8ed2a6c441a968ca082f480109427497f4a041a51f95b1a0d20cba4b32e8ee2079246d279cb19d3044f7d39baf8d79fef4e69a149aad1a942578ad15d6cb6468071c4944adfc387febd682c6c782e767af608ce4652cb3bc572511c6f2c3489ecd1223c2f24a8ce8f3689ec55856a9a20a982a5ebab9ae8a32e9ab1519ddbd42c10c18c410e0048391eee630f77d02b07212408e4f0b82f059f98440a50f2a719ca888e96e0d0dd12a769e1abf599fded883b27e8efd37658f29664cb9537ea4b8408a17526652a41424523c5059d1376667959d8f307969aeff7f6d3ea94e51bc8872459457140f4e3d25e831408f0d141340e100141fa0003de9e3c919ddcdc56c2a635be452329d4832cfbf5984d1f1c3fe77fc669523cc75455bed937f7284470f9e0f788ce5aaae5e11f6a2d9efb319ab343af1c0c0edecb0c344e78f93ce057468d0f1b6c2fddf59e5fc9c03cb31410c9c2970bae0707974634c479b2ba5e37743c38d9193404e4ea03839718013282727cd7131c74b26cd7e1f0a771d77e246e088a4b44851917aa58aa0b0407928119a80716a324413dbc44a13cf9d93f9d586ff37d859a6d92b4bd9c7df5f0b14e455d678771f14c59b3b9b9de59966693f98cd9da71e87d413275ec2f28c4570ac327f2b26484c54367bd86471b2a9c1e60af7528a5c857f5a218ce577aa2973aa09c1a946889a2b35a6e6bafa37d86b674b92382df9d2dd47fab48448091f642821a2bb679e685d8f24a5ac387c52a2e4853e4e2fa8717a414cf7e985009c5c70c1077d72818c0b4bb8405d48d2c21939b35decca25b5127debd4020f2d44690185e626ed4e2c6c39b1b042123492fc9024764a729424c8c963e0489cfcca0a3538adf0447f9f84b3271d533ac25c3529486dd879a5d9076dbed643d133c6b1f08a61d29de56b69dee1bf22ae4155be30f59912542d960fcafac2147f12a4decf4fba5ed77ea84298eeee930a590501a830a4b93cc256a714c858a510a6b9f15ba530aba24da126579b51df7742a10cadd9160a447c969ff4f083288428388797c810b4a700a8c1e153009ee89acf1f9f9aadf27dd29b6179254a9f4ea0e174c20e9993096a98309af0c2c9040f4ab0e25442772a21a804212464d114d53d1633166b10ede87f978876ac47ced9740f4ad48ad6fe8b7d52c6f693ffa2cca5579a0c5bdddd419f484022a1841150308210a71180648ac300e80200e009003074b5d363d4678e1a61df27c21622d8e09ab1de10c20a4218e214c2d7dcc4ddad47270d9cbc53924d055b10a78dcac974a69bc3f7b1acd2751eb392fb6abb90ce46f971297c3205f1714202c5098944a2c48d233f9c8efc1124a7232710c49c40f0a1ebac749ee264e7a99baec8392c4329ca280c9498769eb27996af1876288e56ca840997b1fd983071550c43ac21a3d5166eac5d31f41308424e46ca3819d9c148cde98333ddcde5d1a606d32c6d6658f381014e1f2cf92000a72264cc2ac75aed5f5df77dd2eb4e45724e44627022c20511312722349c88f834d77df26f17b31efbaafde4eb25dd8538e99f9c61399bcdb0bca252b96658ce6cacb26cacd2d769889850fe5d9d86ecd0dded0de9ba1b853e0de9e9eeae44434c1eac4e1e10e1270fb8cc3c0802ca9310322721617e9e840079a93aa947c5d6a90333a70ea4e0babff6a70be27e7c2c3ee363191fc9e7b14cf15ecbce13f1a7f54e1d70a72065bafbc7ce539031a720439c82e490e24e418a9c38e0e2c441adbbafe87930115b6b74fdb73813264c9870d23f2c270e60c814fc479242270e4af8d689468d6e2e943f73bc04063ec62bd86f39d1c04e3447279a29ddddabcaaed5751eb3dd4fe72d6fe23f35449da7c64c6b3fde420bdc4feb39ed6bd063392bf2c118637983542ad7fdef7b6cf3fd57ae93be72ed2b686f8cf6f5da53abd17d6a3192864fdd9dba860b848fee40bc08a4c6e12c5781a0ba0fc4c80dc674372747587783909b37f01b00e2864c5640c63417d20c880fddcd0132833996344060709de1a387ce647126cc99a2ee96a3cd673cf5071fddfd871a7f38d12dbd87fdb1c3fda3eb6e4ed2feb8f9a3577e9021e44798f6236c3f7a84fc1842fba13ed6f0acccf1f7adfa80a20f2e5ce7bfeae3a65d7c94e1643436e56cc58798eee6f8f0a1393eb874371fdecd01b1f6e0e3fb567b8ce9ce4baadd838866ed71a5bbb9af362fd9a304ce06656ca0840d6acd511bc030c26c60ea1a94e16a9045770d66dd5c0d7aba399b1a9f8f690d4cdda3ad7a90e9d6238c1eb3fc343d4cdddc5f0b8e55521a64d1ddb53408d3dd1c8b066177d3e0a6bbdb69606a6efeac3f9b811bae1938d13d83d99533e8e1647506260a93411e2d1988b1433228eaaf264119d4743707c4e2a3b93cc8e8fba1e7c73f7910813d8f8e93558a252d0fef8fc6c08dee1880e9ee9eee23b39c3ea822289fc9c2de6bc13e36e74a856496f35a2be067ea333e92cf64b97c7c7ca6f8f4882d2295cae58343ea42f9b01ce59a12230a290e69f651a95c2f95cae5f3fa3ed319cdc7352566274ba57299c9c38c1a668830a3831925664ccd75b03b4228d3461930ca7c65885ad0470b9e68410d2d80a1dba6fec735f1f1916a62d625c5622c6929298f3b4f4919bf1aa3d3822955fe160b8e60c10e2c80b120880529b00000769461870f76c86007143b4cdd9df43a4f751c15073b4fa9feda486fe29e3771d0c3d5b25cd5cd1bce63b68e21ea20aa83053a6c40c7982a661156633f3ba3613a4a98638c39c239beccd1811c61c871013992e45042068f59ce4baaa571e357edc762d2bbceb1aa43c2644032362b58630547c4b1471c248803873852710851810a5440850abeeeae732cca686c94e0fd24eb18836faba3baae1e7170e49182a30738aac0819af91b42bc71f48691147491021ebaede7558b023a50c001140481022b28a0e2c61d6e44c08dae25cde6ab1d6b3c533ba3390aa3326d038e36aa68a3d6c66c633bc19913dcb0a102368060a3870d13e72fb799f455d339a6b95250f589d7bb6e5c238735b2ace126988109ae3001122660994004aea37d75da57f9d9a6c6667b4798ab3edba15925ea96608912e012542981cb68ac92800a12742450410d39d418d35c37d2beaac1a551260d2cd2a0401a4169843002322378c0088ab611981ecf3cd939777fc9cf5cd9fb1a60d6bb2b7e3457d151e0679afa1bb6d038001a5bd058e18c3dcef8e20c26ce103ac388d7d9fcca95d2beaec059bed66d76ebe54a854440452064c61e66a0c00c1eccc029838e32882863a80c131962907144a6f7bd5e31b3b8f772a678090afc59c6f26fecaf634b63706e4a195e7b6d2cf6a98c1fe6d2f15ff193499f7a23326ec8a019438de6ba6aeb156398028da1658c202178230461bae75cad407003105c018219089c80e0041004f9801b8d6d4a4af7810f1c11830b3192c4a012061e61d811d45b184a704ee493aeea42e7c67c65989a97cec63002b0815107185f808100306a603401631b338689be2bce064bafc6a5a454f76799fe2759a1313b638a7c41822f10f005973b5ff61f4da237aca2a7bc58c38b0978519ba38b26baa8a18b255c04b27101022e94e8eefcad5184b9aabb3f8a196359e4298e8bde228e2d80d862478b3db4e0428b0868a1450b0e3cf0010f844f04c35aee2b577912958ddf8ab3c98f3d466b1c779dcf0f250d77ddb531afb25654162fc882892c7668f95894c1620358cc8045962b6a70851b15ca155b07c674a0031df8a1032b2bfcb0228beee63a507ec64b1efc283865e7295511d775dcf8852e63d68a2755dca00a36aa20a20a5715a6f959ceba1587f1129b7dfca8004385162a50db1466a618639b0288295a537420268f6a65d2c78ff652c1f295e1084349c7a1a46131338811224518522421054c0a231cf8830319e0000fdd753ec25c66393b4f7163953657dad5ff3826dea453c94f04af8d79f2ca98f562d6fd6779e380120e6c1bf8a30e2edf2b5dd5e190e6ce531c0e69de40ddc09640a280220a22a280018a33449f745b7d52c754de2e46bbf06df2e31acfdf82e20505ea09173c81c413f5891834d0c7a60138341005d7e56f3dfea4aac328cae2521a00d2c0113a9cc8c2891b9c28eae6301e65f89a345749f354e7ff491c8a8e978ce2d6c4184d70a009209a906d4db890813532c044068ac8c04f0648e8eece06a786a87315d7cdef1c670a068d8f343e52e729cef11255a7baf23fc74bc0cfb4f314779760ea635d713fcb4430bfd6bb8ebbd67d260bfc4c7daa8f8da37c6aa678afe5abd667b21ce5289f1c1df14ed667eae333e5c331fbccfa1ef6f171d4fc7293336b3feb971c9dcd02372052a97c7a44d0e3829ff0e45c6159e08905842c9105e8d12c419b4559972852a2054a484089295fbc384a89139228d39d34ab4c62892428abb7243c7fa24dbd1656e39972b9d65505dea8c0022a70d35c16adcf6b1d355d66ea28293724c040c207247a8ef0a4a33e11bf6c8ab09ce727da237e38c21e7182111fe8fee205556d284a9b51571ae142776f45c0d1cd15d144113d1471632b92cc1d92cce394451421d35fbc649102435000060ecb19058c1031a6bf7841c9d0bb78e16c3ad0e332ebb7bacf315b5325feb742840288e0d2b4af44b830013dbabf784175f1d2da26f0c30476305d7199563b9b0d71660826be78f18cade7a82e5e388c45c7a1cdd887f819628a102c10420242fc04914710477cf1e2933aaa7e52bd9f6465fe49b14a21e7462cdafbb22aa7cd3ee5df1aae5209fa741b8282786d415cf12e5eb85c25e0870498904051739d8da326ab4b9233714ae9be4ffe74b38a2e812d023388c0115fbca08058a82e5eac9d72b6da12019eee0eab06a804f248f7a116328468460000000000031100204024180dc72362e178d26e011400015fb06cae529e0bc43cca29640c31c41000000000200020902106004b093cc84a4d5305c4eb7f5bf2761535e8d142836ed9241c418f1022738aeee60b399142a60806fef7d45aa6fa74880e72a4dfd5981eee60e1f679eb765b8811e367899f57c798c1265921fa3b00e369afe61a3b1a4e7bc49a1373601c8824a4a140c1a5a5e67e22a64f6240550f933d83796496623c29e71224986b68ede731d149e60b8706de8ade46f0cf418899d3312ab01d380ddc7baa431598ca46729aa93d3e421e6b9cdefa65bd9bf05e1b6f8535e96924f73050d8bc49d28551af05c87d2e7462a15f117b53d99aed6b469c54b7f30641d62deda01ff2078c2ca384af8a03eab29c71a981ef090dd627e14ee6ec633f8fd2c27e34329aef33fd4e20063169ec8b1d3e0e5a97b0d8b16eb5728972b33e8e67d45c7197232c9bde9b05011cc649651690249e6fdf2847a3af53de1d955aaff3103b1c6723995a99fc51f47cefb36f7fffa9f7bd67e82e55f5a459f2298f5f1f3455d17327f74d3895fe88ea24b47158d5e7feb918778f6f93353599499ebb038adb55069f7677642d9a24ca462df1a275209883fde0c87012b2a4603785ec1feebdd4de9f46e04c0dcb04ec90c97140ca89edb27f514d5c69b65dbc233a1780623848a81ed775b89d938bf0db2384d9f246c60243b06122518f2c3912674a1e29b9e0e5f671f5e4e5462ba476c0bdb09c93786d935178b44621969e07733ad6b8cd29d373dcc6c5be4b0157dd88ebf9ac22ff54e77ece4f05a9bfc7327db8b84f9d92fd1e40a01e72143d7f039c3c13ab4fe255ffa052295d4aabeb15247e23f0a4e318e95eb99d26cb4fb67d46aee1e0bf512f539840eace4b992bdc76e5c7dd1dc9ab1954d7e35e6e814200090d87c92928e46cdcc56f4cb16c010a276852714196dd9a31bea9b5c88a85a4fc9fd3637abe78b581261189d31c8c19ad650109dc804f0c4676867d2cae003edaf0d95e1eeed007b29d9c7d9e6d48d55515f31b2aa752ab1e8d518bb04cbf06a44ac3ba9ac7266d59b3286fed795dc3e303a3d84e1d2ec56ce2ea8a3ddadfcec68e670f6efd0fff715b8e9d4dcae6dcc2b5241b6728c50b2289ae59597b49090c1e72485a94249eed9a466ef121f06c24fdb5f60d258bc61d5d72b1eb208dce3f221d8fc421cff1511ec84c7cfe345f9b69f4d67c9669c4c583e289ac15ed56e7c7516f5cf9f9dad9e05d1895c3199bf3e37459aae5fc98a7868a363f8b2f185ca078b5b0b4106063cd7cb35ba6d8196802d95ba09ec91e542034d0eef765cbfc55aa178b1df31f7a67a0f4702a342f698b5ff7785cf438ce952542f20818389b2aa71562b3731ac5f22527887dca6962373239e9d14025959303e1cdecc79cf6bce7c9d02664df0259a30d7d7c6ed07c2f33bbaacc64eb654458c8082ffdab92a08c609025b5dc0f22e4df647e5a06027c396c8e3d9ccab65bc27a132998f0692fdadddffeb1df14e21e7161a49292545bc5fbdb9e6b19155ed9117a67710e9757ec07d703efeb3b1f57a541570a33e8a88b1a7550848d50a6384f0d22a910a9ed918fa50d9946df651a1befcef0daad831b66e560262e10bb33cebc3d2eb30ff746907231d6667f1252dc9e420c7a1ba16bb45556d9b01b4477614e3972064fb113aa4303693471d6faf5a0bea858fdcfe641419433670487a792c6e1d24da4be4703370c6eaa0109456c15c1f2fcc3a058a3147d6338b54ab48a0a60747f37f4577308dd83030466ddc48a66d45bea01ab06a70105f026448a64e7800dabde3ef0402ff3e9dae8032a851c2ef3f3c06ad4e3447196a6220bdeb0755841939b6002c4dbcab7091293247bd067a911d8a1947d4833b3dd1624e19d065227e9a11a3021aac8153839dd53ddb672307b9e989aaaf8c6cf59ce98f829464e9b64ffb41a4812570ba365d942fa5b0ae29b7a90d4419cf2360e50ef81b3bdeedaabe922578d42eaba2f5b8365d0fcea4ace17ee40ffe41ae4699707397ddda1800f6ae2e8a5b71b3a40516bedea0928c71d2dd8e588e92b7f665c7e0a52a903558275768d0065ac7ad781133d09e1309edd6102fa3b9d36a7bbca884fcaa36eef511de5120e48cce5de66dbf7269e99e94b24a1701f5bae359acedfe8e85b7dbaaa87e0308a138fe7c54a9e0cc9dd4f0b9e996b5b2f38277e72439f44bce86819897a26996858b1ae23ef34287a05c5aa838a8422d1f1ada4ac668d0c81dd2922857fc62637e3493c14f6e6ff638654d0f0e03b74128b392c7c699eb354670601bcae08cb52cb643481e38ebeab200e2c7c7d00c8f2df2f6773a5c84610777bb74e42f23fb6f4269593828a9d69516ada90cb1b84496ac305a42f40c3f5cf122ec9d87f8b64d4d903ac0cd2310bfe0ad392e68efbcf74cfcb4fa6ad5777ce7caf927bb2d5103f0629befaff56e9dc41641fdf3f254b73f8805bb907edc7608045930089da7e4fedbcb555fdffbb061434d1689d59fa9119fb373b30384d0c11dfba2c7e2691d1d6f8297c9e00b3027045447e4ec6cb3c512f29661c8be26af53053682a60d2104e3ffa631e8143e8462a3811ddd3934b432761c4ab04fc3cadf0f1f8ee0a437583f013065f0b107a7c77d07d1abd278335071e7eeb790e06fd239f5f8d357ec6f41a91214e07316767a1af8b1c7dafa15917f52330e1e0edb2c8cb75e5e611c7b84e367c9715114e53de4d6aa61bf7d7ef66bc7f83622fc9aea305e1f281646c8d0b4f452b20fcd85222ee7ea34ebf7a1912c78c0b701c2ee818363b59e94a351b2ab67840f735ab6db715dd5b7448bbc5e568be25195e6d05ae1060fc95b63e7aa3417ccd3e2edd6ef0a307f6d083861e986267ca2eec8a7750a530f91b01a0a3ea91bb87b0c6a10872b44a10b5d7b3100582e6d534a20aa14648850bb48a8e8c2ac299ea3f50c56f413ab266ca6a140e92339f0acb5bd77b0863c16bf065f7f3cf85f139b7322b0d9401b33fccddf50bc7193011c2b2e0b09a7cfd496b86ea9a6df066f6707e945d5811667f28cd8eedef2fcdd8fbb0e67aa0ff6190e019769fcd745c0dccd5b402dade0e0644e509076f49084034dbea0d1c7fb18600cc4d755b566ccc10f4e888b6c99b20e2e3ef0447b087c0b2894c2cb74953e8ea0798c11f50e0e22a2e325a7ac5e1776ee261384547f31665efd2306b638e847ec78499bca069d32e6aa633031efa6942f1425f6f5464080682a0051f374784525d5679aaf73c5260e1fbf2f8892e9d884c0530a0c3ebade73e4912d7066a4fa1269e05d3145218b8beece749f0a114a78d122d60ecad11922c62a01d7e73e9310f30c27b13472c217474870a43d5271bcfb89a8a279b665e643c3e781b415471337627fbe5206ac49c9b154e60bb7e1d50644d769216d8386d0fb55865bb127dc9b20bb7db9bfbdda6be7dc4d1f077e6e46fffe3cd62df49f2e38807c65bcf7b8d0798f2afb0edcc1bf9e98f17548f3f125ef329fb69e169ceb99ed6e60789846c8b6bd1a6a9e374fab3db8375a01c3f8a4b489fafeb615faf4e9ce918f71cac85481126d6f3a10ba773dd4d89231264fc3b093d317cbe1a2a929fbeab87c8d977088244e6e491e8a469a1f4d0e3a37a8dbf7e0711189552b264b3d771f518f896c1e1b256691b2989a654550a28131d7b2040fb5583ec939775252569ce152b625327a4d71934cf91fdb6c0e203725df98fae79a29c1933d3f5487cfdedb8ffa04de3168c220efe529f68421ebebd29ada2c1f6b0ae8d08e48fe60f1cf285aedbf3d0b2d0215a563af5ce19d16eb1d11e82d6802fc854d146bedb3d209bbeb81ee62fe53e51f7839d005504e6c8f5441a4d0eccdfbad86f46765d8003d863095c5ecebfd71c86cde0b996da39c071808141c6e00aedcb6569e01f64f6902808c2c216a594db3778a82e58e95ea9900bdd46da1920c2ed027ca536a2f101dec65870cdafa3ace67429716af6b9c9712306bd7b44d033846c96946c8dd138f7d6402f2466dc66c5868e432b28cfb291a1f75d4604dc89f40c3e12d8e2ce5e4b86b52334ef12fc03b076d015543371b37573142fa102a8e21e445d9a2eb1a7c03c3bd9e3dd739ed769d8aaf41b425aca53e1688874ecd474516fa801e0611170e3930389cb88ce60fd0a85f8094b7a10f943f87417d150a3ff123d8c18d0f5a0c08888183f5c286729e7b4e6c33d1fa617c2e3967b7698989c744a6e19ad78ce4c4d91041389809e90e82fd5d491c91965768b54f9fe5101e9f9c52addacb2473e072cd937cb07c9b7c7ca6ea1bdd187f0a031eab9b119db98ce09d6f873ac32f3fcd101f2360b67544ff7fa4e11b07f3be27f4891feb0af038a88bb9f75d47b2523587d48a3097439f5358fb40a33c57590fb7464a92216e40b8e287b94530b5b0cac06743590f9f6c0a3c841dbe747da2ce27498656a5cd1ac0d4cadc9a9a5296c66ff5b56c592bfcd4cbb2fe54792fc131380fab8398d1ad73415e65f8ffb826fe0b07b18c8e305c8500feac0ec3186c92faa3e12c3d36692b59fd1390f1f5522c2131d25770e9d736d1c359b906e7161f06d8cf25562ef4950508a278ab07298c410e86bb2a8855a645b26e7d938b0f3f008aab8a3a58ce5911a24a150209b527daa1f55c3456587399a3f8f4b3f46bd07883c1538c0c3e4c664bbe897b836563e13d7f0dd3f2a92cda270b56663b8370c30fec0adc98009ee5675d38b148a0123956ec007082c4e5f9f8d4325a9ab433bdc49e5d6db32ed30b6156e24001d8d3f7256b571f530386a1a83717dc6e08a571eea97d6ef0972ec477b0d5872b082724ddab3252230cb6f4f225ad63a62e78f8a15429aa4f46df0e8bbc8df04c9033b36d2e55c36e418ca75b65dcc3c5c534f6d4fcb0f52369e1eb265401915e509ca583c7b20873d1c3831c858d4e7d689f9cf357f5b4ddeb6008f4a973a23766d259677652a9c7b773c99b46f52a2ca2281baa0c3f87209f9c6ddbea8fdc20f76c4517dac3cf313fb66eaf5e59f74920ce840588d0cafd22a154955876071290b20e8d0beaac7e898563295204f8bed979a3b756b7749574328aab9068b057aad6878fb076b51ffb9675a7649e93b02552d32d1028517128923e367b6e50dbd9335904ce9a4756700b2f89bc8c6684f5083684446f4e8a16432165ee831f9aae129dc4506cc7dbb922ae552cde870b849fd2617298002a8968b7ac267b199e8d78a1db64defb581a3056fe7360ce32215bd3e8c5b908e79cf53cd06af2f0cc30be6bb147655922799d1135cfefb81accca01acf41e0fdb9436fd0597a7ee5e86d4d40a7c1dc914bac0da0b5c51e8cdb1b90287a2aec8e8ffcb7336aeae6443c818ba6558a2ef6c57acad5d5247cfd9edf23ffa54bc69f99a7093594e85a521305e28174e438716c6729e24328909f500d41f4988f60ec23d22f5da93ed2552dcd26893e6900c57377cd965b2276c62c132ec3b4999c2206c17a80805c1c9617bfdd98208abcd2b0e1ca5776cef532e87c66252dcd789950cd1f12d3ddc4cc51e57e3a96fb139b3d4fd9fb7385a94309182937bf2eca2b31dcfc0e6a1314678f1045bc6ffe050dbea06809ca84b04fa0f758702280fccdd165f44de9304edcdf8173253688ec695fa42f844223ed500580cb490243093729d75c5fa6ec330b7d966cd1a78f1df3ec1c4f481e72371b079021dff544f771a66df32b63cce95676139dda6a32f87a96832535663fd20566ad7eeecb646fd9d36ef75e0280e2defca5fd7d0fc79252646b4782642c18fff7d0935f96d39d2dd1283078cde019f4d996426c82d580edc62c21618380c8a2c0cd3138cfea3d869a1702dddc2cb6fc88465bd0c4a659bfb1589a1d8f0efd92e937aab34a9de170c5a36be78f22ee1fafd09a974ddd8c71475749bbd1b8a9a406a463c45b19a4cfdcd9c49663ae7ca5c113f0fe49359eed863dcd5afe10c0bdf7f8ef87637ea43815cbfa4272b7604a488827556c2bf1f644accd1c21d19269e088c1e41f0e3377ebec0ea3f0b8deab4bce9e8d174acce76f56ecd0f7c742b0a3fe8c134b2e38d4c98985ec2fc7404f83403e73fe1526b46be24f2dc28bc948779c4f61557b98e4dba195dc732906d04a3e43bb98c266f81d890fba3a7c13e692b1ec410e397c5a15b38b089ba49dcb6831370d00db2d8dd64cd10ae20b09ba017f07f19acbecea3a0faab5e8727c43d626a522c6651fdb546481968b7dd05c8b55b437a749f39e186edf5db98b2503e22e8fe25bc03d11ecb07061a51b652f41840821345e73cc4745c133e7e2647d7d43bf3da9d8dd0a50fb74df9353e573566b880625d515e1b54f8b6b52b588eb59638f2f3158431ba4d5a46b51dbbd8199f53a5946a902a17297af803d050be3512f273f6eeefcbc8f52228c52ad8c2dccc9835e3d8fcedcfba1292e05259cca1e7f7918fedbad2d178f7a470a26be5b429e024833917bf451a7f2f572c4bb8ddd7e9020ce16562563836e887486fa753cdb00f53843e534f38ebb55ffc36f4e7ecedf44de69deec85b265994ae9a687ed38273f79f919e880f007fa1ec01c2dcb29da457151f6d0c5873514202a49486455bce9727793a833a53fa8805dc18cbf8dbb57f9e7ad6971b82e593d7d42e7c074a67c170b14491e2c499c7539906f36d1c080ca2714b404c7e0725f578c0adff004885e76635b83686f1c9d1ac5388caef07ea917a4357a5ed832d009120611b7052002bb367f6350eefce6fe36ad9c99f75d4d485721c9206054d45a61080915d577b9d5d73f91a9f12af2a98106a9abe813cc714892abdcbc659197cc23f8fcb33b5ec18252d2d58850114dd39111e33f220ccb82cd0e4439ff41787e4e4ae31846a3a75ba76639330a0d688d7403ae995e3ed18914492ae654f1a0fb8b0cd25388a3a5c877e3b35ef8d52ace82f8463edaa202c054b3557da1c86324ccd35765a855bd50596351ac3d1ac50f332e4c7a144e4c402a31ad27cdcfa183489914b5c2b4dbdcb056a222ecc08d38765bc22619007e4b44332be469b0c39b5c1b88474e42d69cee538d35ebb1c55227553c5e1ef6dcc75bde097bfdb84051be94c1e01f33686e53a4cd43b7c03510cba28d86274316b023695e5b18428656f1ec9da82a0d2209b0210610426a83b30ab85cf79401e717ad99e80b051305f8cc8dbd3b2cf8350283b8c872fc2eafac94ef05809b2050a07e15036aca3fab318773859d0cbe0f5ecbcbf789494f33b4fbf59164f81a55aac9acda674df8b5e71c4f18c823acda94838cac02e387929165a70661e1bca068cdd52e98039b678b0031d581c4c73d94b2e869126b77c8b8603755df79f697c36b76978e7f9a8070a543fdafef17b7fdecabd5fec046447213e0a14d2ad8678c750ae3dcd30f3f96ea8cf7e46fcefef7f6e5dab75038326c01dd0dc792f56b2ffbf9a267796820de3c5817964e2b16c267c4b94ff5be2d8e93b0188a8058b7ed60d3cec0233d80ea20b76a12ae9f3feedd49e834ea0028ad477fe8e4829b6f9cdc802401aa26e6d591a5e4b199ba2e8f00cccac08836721afc722a7852d12752f784989d3f3c8265667f845a3a7f14ababab043949a13e0196a55c9f25876bdce66be91bcf4842a33fc084bb24f2d939dca004021652f6da963d7a125138ff6bfbc2608087e82b15891d3de6389a7b812e13573ee23327b3a56d3197483e4d5ab72cb6d5d3040bf34c68c1871923b596540460e58c41a869dc52dbdca252a4a44a295e5ad6a2996cdc41c0ba0b1e04c870767173c53a8033e49bc33239e4ffd9250ae43560b7118c1d7dc1a246e2a5b9bb72d6a8eef501e46d68153c7282f4a7e11d66505a8314de7d38418fefb9a3f432af722019c3fe5c6f20565100e287546ec0fb290f3da4d038bc179a58f74f233585d26305e79dbf0b912042c5be2f899dd171486e7095dc6f35e12520d36b3204889235bd1bb7f6f899fc554da76c80f7670b256270b6eed34b9e9163ccdd1e2c263e7a80d5612094e885f70cc67a16b2a4fbb63f15849ea79ddf48690d4117d27458136f4df68b647813cede0ffe00a303d4e68d94c2170baa1c6d0e1fb4ac204b3f2238e3cc7899902440a99bfe95ec0852918c4f21f94d1f5d40351c2195500ffdbf54717de0cde06ac26dbe84a3e037724e32f65d4820e7ccecbe830fa87885358704fd00a740007222f4876cdd4f8ce8999fc9e5ed5b3f5c8046f688ae127e3679d27eddfbae1fc1b9c191c17473bf94d75e23e091cfd89fc80ed1d9893548a2da97401021ed10d80f813a418140c905e82bd0f37fd364047741151b0b6d1443c8e6d145895e6b3d51ccbfe98f97e20a67ffadc44c9870e55fe9d50b6afce10dc52932c8a17345de65a312c5edafb15541c6bfeae19feaa62e6f074a7acf2d37c374f0b87cd7859a03e0f08cc57591c732acb3e5214853ccee98ac0d8f4ebb9eaef5d2cae862e2c5813a25e73f63567b1007b7b2d321901c91113b51d333ef692092d7c22748dc9f827788fda69d5751a5ebfa68e0588d940a7b447b5701c6c8a48f903e2f14fb1d4873ee1c02cdc27adb4ae913f27998e25c22e04d39210aba1e8d5694a5eff159c9d5348a62e4b10106fc0ed188bb4aaba01c5591a7dc15c6d98cec77b80e1bed1786747d60715627793f9e0a80de2e505dd0b000b2cd451c1594ae8f6182d82caa0234aa1f0c66737dfa25b2016b7df91057b045f69180ce393a670cf20897d5e5d9dc014b700dfa7bdf07a344e2cef9d6867818afd7aa636b413545c8339f31ca5d19fe6b2e8ecc0145b4a5444fd77dd564f963f4f1e0f45c088fa6473dfdeba653447b22c577cee686e5d4df8297754d65795b1fb14b5f4b1000a80bce936502aabb1ffb5ebdad814e028e8d8461668e0fb96aec5d3336d280f099da47dc7c29a4b9c371cc401da8c23d5462ffd1e3cdb224a6d45c9381dca8082b7751eba1c503eb1621baa739cbc779852d9d73b5b9be7aff4a11e95038231b1f29ef81a3499ff5160a1e49c23fb88e1f8f5a89184d9a087c0c5ebab5e252e6daee2a57bc660d739740cb7b755a80993cf3ba056e85f1e608b697a55a9fd0370f9ed6eb7759266a2dbef56148e9ae5f262797492a126ec613cb2a922cb20dc3e383539fbb140751aec327ebaed3a670f4344b5002371c3f108b0df75ac2cff5a1f3a84eb04f9d1ee7393eee23c9e7c2fd5914a657c19f0e55c87c81bca5f0e492328af84da73400aa2e331188509d75035ee8279d15ab5b3085241b8397151d8186e8d78a6a35c2068c5966cb14b2227fe7d3230382c77373067d8a7b9722b96daaaa8bdfec74086da764ed14e3cca927d49cea740740ce2be1d8272d9df328e54852b2fa739a8ea0043989fee207f9d137581422b9cc0dd94d0c6b42227e0919e6177e47720c9dffe6bb5f449daec1f1083d10e79efb2b76f97c558b3a2a949d7c988155bbd81c6e95d7ad0c0a58dabf18d551ddad942884b482f98c443b97ca976f236afd03a1bed1231daceafc9c4255ba20a0fcec057da803c8dce48f89d4e70c474786774c413bda8092c8659d45ff613e07417de6449076e5cbae3f7f17ddc4be5a64d7017592f915905da6d858addc4ea27660f5afeb33b57958a05384dcb52200c787caf2981f52d0ab2b5b014b9e73a0d635fe467c02d8b88af43d2636cb40c59de05be1026dfd97f8b180b4bd2cf7f3246daaf68fbf8725ca605c6b55e1b4ff4de7b08b34b3773fdbc40e36893d7dfc43dfa9033a3f26981365d1a2806f7365b4867d83208b16d2e8aecaf6251b9c988a31fc09e8f93970ce909c4dae8bc1bff691a7b9656e73d42fa270731d562c3189560ed8b3f5a07a47ca3c5421d60425960265b92514d530264bd9be60cf0df55a69aab6b7112c93b9885e8fca6f08b9806fe324c8e30615d4ed7908d0d369394ff0757f23f49742e6a8c80f8cbc12eac380eb7b539925ff830d0d59a384e0de84ba346f4f5bf6533f8bbd4a57a2124cd03d3a5e22cbf8f0943521d25f106f72ed826a1a21e6e7eda46cae5e3d0851e08bfdf92e50afb9b038a8577f87345211c1d9fb6c5b32760569489e1a09856263aad81f4e2ebd97394c8fc4c498c7eab877378e5438c02216e3c845e76617bf54df33d854265347ddb643766f6323e5a7bb38693c2d9ac0aa9aded9be4cbcf5e7637af211675236ff646528f3ff45df438f643a5d4fcea14a050807e92b3ef8188e0e282445b851eaf69cc4ab98ab0befb0252451b330792ab2fde9f726f2401e0fb9fd20fb4bec345b5abb5f0571830eed1b5f9cd8a9130d73e50550fc2d62e1f17067db0ca54eab3f957614989561b8b6f93f87476ee9f2b4bd70b0856f45f0f30c3e2bcb0dd9ef1ffdec2633c20a47e1c7ebdfbc8e07f74f8f7f44e9b050aee642ac2fc24ca85cb5f3aba58d3f8641226cbc2e65ae12ccb070644b6b9ca6be7d9ac6eb523ec37d4dc78191b146a76a51a7808138851a3189eb07baee639a8a817feee3651db51d4cc472963bf4db6a4eb4689c056f9e348a7612d2cfa1a8a04f33faa962849992e868874c65c07eed2ef4075820ea47bee9850c9f2c134aff4e22adfc0c62617f684ef9b2ac74fe8d61f8e75215f13ba7ab5d6a8d6014f255f259a048faec9408654a0c65dc9ab8fce48f5c9ddfbccc3713ec270e63f5a0079f0e9bd65a6ede7f9caf20e42ff65ffe2fc77e47b17f173492331f44302829f31e827db0aec189db8d2973c6eb532b4d3833595f0561a761641aba4f08ae477b71ca447e7cd7d7a58c1e4429760034f96fbc2199af32cb1a8cf9f8bba627c27844c74bddcc58366cb23fd906746172b2bfcf164f39c384593364d5455500ba1841550c05b7e922133f2e1c22ea80bb9b7a579337c74f6d08104b247bab82f74598f0551aff0b831f7f0c67cfc2c0e812428e92b5585d67db0ecb6166c79966d7fc1fdb780577f70cb127b4b2deccdc37e3246bcbf3d893caaa52c260b13c12428971b7dd452311ae10a5fc403814750759008a9d263244c9f5f403b09ca1e840596ace9c59206161f1eb65c0936b877ec56bee6c6d107aef566fdbd2d877cb07b2db269d9d0abc45332569b9a558c00fa18bfe8b8ca1ff4579c5c88ee5c929feb0b4e734fe3d7b93227f39584ef4733632d734687ec043281ed0fa4a96a895b7125e7a08f834f271e25e8fe4e824551dacfdd1a9785af97518d320ccbacb5fff125428bbda03a1f6a17d2729c1fab6939a490e458cda6202028e1dfdcfb827bc3da93506e59163aa09a4d675687852a14704f915e90b76dfbb7993e6ed0e4b391d624808b6c6e273d896e78155989c20dbc20e03e2255a58cb3995091009e02664e410f6ef4c85620018482881b1239416752d1c16e9572c701c7cbdafde0154b37d6edc268c875df382efeeba240f0ebf0b9a3d6990ed3a2efda2ee67d465b35fa7016117cdeb0eed18d73f466f3db6efcc2cdaa79c4f3c7c48099d563430e091c490d4c26827faa3496f5755cfaf54d546191753aa8baf91129e95eb36f555ffa285fe633a57845bcbcae9211a5a61308860a9a0d86a5a8e8cc209e84a546e79c8c163c47431aad5429e229c827393f99699e3eb74e36be71cc24e1b38cd65ed5a1a5d1e7f65979145cf766debb384fd2beeceb35af998d340ba4a6f70a6c60870b78c930be382830ec09866ed442a880c586d3b2926619db208fa07b30ab32622d8b2dd402952b8b58c15c9c568b938ecc65751d3cc947bfa97706c1742ac5a01d5f8af7237b2e143aea861a3ea4cfe042c6777b40eea3658556d241d4aa1bd5e1ebce6c11dedbf58691ab32abb330ab0cba88d322c6c7da346b4dba9ba9caade8cc4c02bd43f08ba89f65128841d8bb8ea19b04fd919e56706d0d746797cc9e78e035b0e0b98ed44f0fa92d979b0bd2944c76119cbf2b013d57513cf55917bdd6729ccce4bf899c36d4a8eb2dfd63f94dd00bd70c271b6d666a0b34fbcc901df0bbae4b45b5cb237d9ebcf370c2b30df3f1ac9f85ca78e1cfd9297b6bf889c5066c0c3d57e00149a97d742c7bac261bd32712cc66137889f963ec70a318c1f8a41c3a64797fe598cc0eec5ebd2246de12add8e20149324c030a14611ce8dfc395e60d8b8f978974d04d81e7e0b5d50d1bfdeae3c2fff0e650b0de3ebe4dfa912c26f5a326949abcd318e6a80d4060e5188c6abbe57f0d6752bcac0af44c03d255c57ec726ca0b1068d61bc2eb3f819c6bba78ed8adce5a99b4095fd263ac26dc727c2c2227c94c4ed3e3bd532eeb617fb747bf7d64356fbcd29ce1aec243a36a27f89f206fdda20e0932e5f2526c08fe50120a6741db5c266dd03e53a58157960368d2fb9584c4590aa8635e02a784b18e6c8cd6f4be3d0a809ba9577e213c6111e3f8550c7d569439fe4d8cd8ca2ffa3948ccca0c9b19e5311b78b4dc6a46a8a9c9edb3744c0f08ae23d4f5941753809e1c3d5b6fdc03cf4a50dac4f953fc55718ead7c59055234ce67014ea2cc5ba158d6c4e6ad451f2f4c5271c36011caf04bd030fde81a11ae53116a9a426ea0191677cb282a1834ca9dd22e32a28bf04ef2bc6b584aab1db55a039d2adecda25a718b10fde7f744ac856e56eaa3eaf465fd88e71a9f41c26f35ac32ece64fcb239653e01b945bc1e5b0fa6ca0c53cfe686b606a010f92a4ea65872bbaef2a47cf9b794895e31f91489bb3124ad69383b66491e60a33abe19a92d9378c7f3d688748e848bf728efdd18effd42ae41dfcffc59f98c66c759e208975658ba49137c238948576154d8968db1402e160331cc765576eb8f6dd5784833970e069fb452966dcdb7aad3b98e5f8c730be8a2311e807534ed0d508987addea7d8b51d6e1329bd7976896f2eee1c54722db8bfc635a9f07a637d40cc81185cd25fa695a08462ef55429e08ad21f033a638eacbab7b77edb2a177c0d746576c40e22f41cfe0cfe4fdcc9f4021b06c340767cd4abd882fc9d26d6e895e431ffd5987906a7342eed68359d4175cc6536b1f2864e3e131be49f50e8dd3107b9a65703960b5579bd1eff61671bbb0e72380dda9dfe18874f1d20d119d70ec570deb3879575f7bbdca6bda3d85534c964b95f94aa7e638778d1b9132883725921a1584790c8a621ea98dfd7a73f5fc21d1df52e07968f9a3db703264a3b778d537855cca1533738416478768b679403b2e80b9c6669c6a962392392ede68b7d49bef2bb2b9c8552ca55d5ef9f8f3482fc0e54746d0efef18f4ad7e05df6798e886e239bc657c16ecd848e11caa4eb799bd35d3dea8d68a3cdb8de17ff805765b366b9c67617978c6f9f9c53441c3643c48fd5f081524f04945587eb57c2ae6ae17aee1e1ef213ffc8349a383f421989b04e4bcffca87e62b18839cff6342b5df1c486aded58253130d9355851755e1059420db76c0d24c89ede19757b27d0e4abd1bb9902ad9d4605479029bd25e7fc55edd3280e8596cd8f3164e1376fdeaf6f4bb1766f87b502ff58856b13d2ae0a3a384c899183f611b82a81d3f48439ae02087d1eaed1657c6b595287419280ad87a4231e144709deb39b5b2bee1f163f6c7cc72202e076cee2dfd958b2c5f4b52a00161e3b802ff48064176a0cafa1e1cc247b01ec6d753635a45373314380e5689265b02653a2d0e131c209d6bb124a06e64fd0dc8aabba4292259c4bff87c2efd6c051e29cb963de012d449f563adfbdd7bdd534f5a64668e95455aabf7094a0646bfa27e6f4d8f28616dfc04a38166cdb302b74ac1331beb73a0340e4492577e638f5b883b1f1f5dd6b98a5ca56c1bb92ea93a5fab08ac82b8679d83aebc2e6e2136730a51bd3dddec7877be936637c74b182656e5879db81d96eddace4f06b47f01b416a2e7d8d813105fb1afcd8b595e312d67237c9ea874cafab63fb07ec4b226d2ca460be0ff2cf25cdf63b4fc1b66d6c16b7122305f51dea1d8324851a925127d0253c4ec89ff4c999d7533df9cbf4135e45ed3386ca73fcb95617dea49851e26ccf1a549d9f81b13209ba45a6e6f6f0b2deb6cd47e6499469bf47eeb7dc7d61dcddafc9e13e61aab0f20c8fd870f0f658809c5a4ee49b24b81c3d4d6120fde71449a80551b18fa6cfc5a3375b6420848a9f1f049912ef215f48afaa97944a9a4e2cbb84e41f58d406b783e6d4907f33f10528412d768d8885e89bca05adede6a422aa2e08291f4500ac5158044c5c57a1ea9d8bf323249a58523c17105accad3ebdde949b4b89c19f3f7df7733e333e94bc161793fa44aa2289b60d1a3ff98d9886f7d1536417bb528463cb55de19a23946ef0c7f8994d54a38c7509fc9a7695102c59e52ff1bd86ec4ac3b1702ee1f935b38d2dc7d0a1db53594be9b5372da8b5a137d8af4cd369babaf6a638b277c3dde0331cdb7934ce96b9f1ca1a8302a024826512c726bf4a8e73b926e1bc855460b7754c47387c3a2a467f896b47301e80b743d1bbb5beb38de7c823f45db3aeeb7e1c379973985464814b52ab4b3a8775b24f3596d7dee7085d586ce267cdf985410733bb7a356ce34b069e6535e08d04ca696e01cfcb3b86d33a7f70fc8d548592448fac239ea83ce4de6fea04fa23d8d38ba98cfe366a1ae3fe7f200322d719653a8e7d4dddd6938e1ce4a66402febcd03dcba10c48c423f82d4ec9a2c737962e5be2bef7a0899715005652fcdcf529aea8bbf5238b9ee4e1c53556507f0643a1a9f01c3d510ee215d3518c302eee940e5e1a0ef351efca9e9c4439836c954e3b723b36859062249e66d64e96ed8ed316b64d3aef3f4e2d934ad114e680ea1d1091815d5673ad3e23d0f3781b4f69a7cf56438c2e9f93a29d01844689884729021300b8883604a2d97860779c6f125443d11a55f1a6bff9ce73baef69481f08f66398dcf41cea1c2d2aec7402d490f71af427d15862738c95bd8c9edde36d3301d54f33419e44fdfa9301c54e23f846f5d5a94d03814e12d1c74d1a85b61844bddf28c60986a87e3eb0775e288025665e30d952e8b100e1c3b3e54ef1c6acf20955b861fa5baedb3582d80c488636e309e6dbe95f334da443d83bc8d42c610a733f91a3cf1784f090b44155c39656f1d15039f74cf8c1e257b6dd89884baffc563c7fce8ae7df4b315885f3adb92d5af4eacf336ba275420824e26462e6af57d8c55698e9f1ea0492e4f5852025fa69fbb9011bd652f2554fd0d5801188303b9c4919f98701e8d99342226f9f1993d78202719546810a2ca87e3a4e7f57f88ac4371bc22e270b6df3b99f57abfddcd028125548410e9260e5a3e33d12c21b0184a65953bf4de6befa0a039bdb1a078bc0036c547558af6420a79e57530643cf70638cfdc5c36f6f7a5ac33da473b6c8f66aea5f2fdde353b1e7a95104e07ecdf35d1a66b96ca53e7f050c3e187684a03d2f754b29c07f6ded5ad9451f81c41d32fde6c9e01e5c51363ced97157cd58ddf92578bdc8d43f214e6e7fbaa1bccd579730b39ab7db66b1baae902935ceae1e60940efaae2855caf0d446d70d75a58ae803019af90dc0839a1ba6f4cf75d0d8adb1e1fdca4121fd5789e75c2f4d1133374d3a24c202b11d6666b4d2f089c6a24fe9585a8db899691b68c7cafcfb7171c402840d548ac42d14ab9d26c6533266a17035ef9d41d356bd678d1174af5e66c9985fd895a3cdf89d52b063c95b2218bc50a8db47f557ce13f70188c7241e15eecabedeef31e500c217d51c0636d87909dd34e79afd171ed44d462592ba106510c2dd79d3cdba83878872418d77ae930da0ff42df47f5a1cce59d454b4129161de201d292d09181fd18be010de18bd09e97da1cef6480480d1d6343db36e86fea147fe6013c2b5d5ce2a2afb8c3c5316386785999cb51fb95797e7c4bd83b92ce9d42750dedb10b3eb2b07207578ff15aeb6fd1f6c51b15cf22a5409a221b2194ce103198ece25b8ccd2113b4f16fdbd9fd9a06c14920f89fc624b78c6348ba8f2ea5e50c5d106e56719ebb6061c4b8696bd94698c15e902f7520e7000d279acf2e8ed93dc4062217fd0e0e4f512114c7efea979bd35edd61613ccca0e205961fed996403cc319e0e294f8126fa61181093aa4c208f58493cc2a42f88eff72a880a62cb903600ac6744be7fae2b726e03ee8c5beac5397c4de1d9dec5fa4f22e25e8574029c56cbb2083f7937d458b9515fe2e39bae91acab8283e12ff09c6a42d4300ccdecd679c575f35e4fa35ee5b5765e8a2f6dc71af8ccdda356dfc6fb73fa558308f4452fb3f916f7dde063e119c76a30f492eeb3ffe81aa6a622e26bbff866443089cd4302ea76872a67779a1135eb1d91c206c5b51e987d1d528ea2a70cacbea4423edd65847c80c69bbabd3f61c8af1028bb518e2cdacdd4bca54817b86936be09d91e5fa4013387a77e308ad339c88936c6487caed3c5f830738386be792fca62586ea2cd7d65feaf67d5b787e19010840945fad40498bcdb6cd44f7180648ef7c521de73f87fde30a6a36a5659a982ead4dd91cfd70d419723cf9d2878e4f9e7b649dd97c15ccbc4895f3e49ceead1b52761d417ab73712bcdafeea2bbe1fa5c446a7d2599b003ca57d374b337771c9c5bb0f66e518b6b9c7cc673475d2b72626cf4cefc3b325bfc725d0df99345852b890e47840828b5dbdfd14aabeb1c52b3c15715b85eab49c92c8e07f96a4d2766916af152c3cc3d669cc4cacc09bc57dc189c1a19d12e381d9845fea123268cea73ab62bdf6ceabb51284ac84ef41a315b8797767f3dc588a640e0eb5527a75a11fda51fd32c38747e945203c873f8de57086cfc327bbe99bc4fa015c7ce3e3ab0530932b93238be7c75b1e77a3319cc85908e47172ed69148f52423346f10c4c2ccce90504b9c6927c3622ae608da329f51aa3ea540b464c74258945f6d6ccce628c77594f7e44ff07498e29d8fd54bd86f77af9093531f85462c6ea6f04027ea985ae1ae0db02327a1f0bf0e0f16d6e456d3bef5270f6ff0390253dc7fb4530764761c1751358c14f85fd322b6194aa67fd12ab2068d8efaa8d0ee458e5f7772f365c88f78e4926601318947096449250350e224c402af2cba5854c692dae3b93a7dc80f887e1e5169a3882c5e5daaa8e05d092c601627cc349bd10f6e4e617e221cf5e2026544b962d821b69c47d139f75a65e1af89f309d52a8a3b0ff2e39a25247bc6566dd2589d3b9669003c2aaae9041ea7f19a1bb92cd42c1e3d1c753806c0bc2e367bcf13108629a4412352f1fd2a28e5e6191de0d14f19f5729f8496ab67d9b066cb69b782818cbadebd60aca3a26440d17f35631bb581ff272053965e4148dfc6e15811bae26584272a755cca23bba1377a20bd35ae07de440c46351f55fe06818668a30059bed40d20ea2aec936bfea950a1de2fdbfbee5d1bd8ec833f97621abbb890761a65361da83de09efcf14bb07ea352ce18ffe4b50323d61e08758c7863ad6ef00bdc71c91d63c6bafd99f921538beac9662332d3cc7e766b3c470b1d31980027a115b8ee4ab0930fb73da71d75ad9d9918a83e2ccd35c0a9f7600878277af59c5e52ba80852c80f2d920fde46f412ea4ff3dfb0cc6e8a5008a7dd5dc4124e2731aa06030436f16d7fe7abf6c3f6fac57ebc7297f96c3251f9810fbd94ae7674d551ee5018afc32f53d7b4314d376eec62d6d49323c20c2a4391c8ffbd02d9879130402d70d51e0aca0a3a086f929a265a89e8b091929505057c8664cb755e4da926839cc2149c6dbb6d412bed24cabc72f9371f632b6eb26653442df3808db19811b2d60003121460ffa0412c878c03d40f21135ae46027e5bc889fe537de8d5b5332d8a40535e8ce16a8d59e839da2ddba1f888803141676b86a0c899702c087d7b5a7b28dd3e2a5d4f98da5eba1a64854ead33ef745aede5cbab6a45f7aced600f44d0a80044d949932d9af53432e54ba33e66db11aed9be41ff1d7dfd9db4dcb79531a845c6635666f1f740a837f2876f07d098f413420a87e6788fa97046e8507620bdafd534e0e345ddc95a92c0b2b01a717930292f3e1ba1eb9e2576f0157919fb1706d781db2205b3c059e9888e799c81224c5526e31cd2885d6c25866c76d0c5a3f9bfdc1e7f78169ea15f38928d37415c26970253676de8eb33ebb62dc44336680392d1078daeff8c85ff0537f9ed7f4edeb8e8ed8d70c447ef5477c643e79bae8407d1fe45585827888b67064c2a74dc40359a35c40c7c660d97def845cf0c122316c3aad26aa13f912066cbab369c865430d4db8ebb23af30ef8ba1287aa881899cbb13e040731b22f3ff3a27850680641e2d96bb9ccc4bc5dc5ec6e08255c1becb6025d81158d4c487df3827dd28a4c18033a5acc42ebecdc8c35c157edf9d8e040b932cf563fef43425f9816e7702deaff1703382d869e554f67d6a0eb6eba17dcd2d630ae60a7599a0e8f86855873753f0e9289a812098559fe08b69db10361f3704b8e3c5f2cf21a7042d98cb04fb71b56bd465382cd840cc72ea1752ce37d16773ae340a1ee8e5503eec633c5dfbe98521b84cd32a10af8ad0c486505fb5db3028e77eb5033e26ab1d35c6b3902055d5bd5908921464f37ff61d0431f97742cdf8e584e58fd9c6a0f05368d26eae4f5c21a638e5119ababac981c42ebb675e71914d591b150a4583a7f90595faa0f9ee1f1570c0b855cf008df821a3bfadc328a10e986ac68c9d7d99bac9b23f09c9338bfb042cbf503c6568128b7ceb4e1a41c7a6427830eed07787a58c5f9bd5031ce205d44c3fe0e0f3ed1551cc0bec648cf10a6cc25644817e98aec4a5f37a965ad58d40a35ebeb38119e4b08705b42160c00cc6e8ecb0051e8d81d2c8d8897aa09f3101a17553bd4637007b000d78947fe94043b2e81c994bef07a4d9d302325fcbd34fd1a9e64eba91b4a630dd6e4272f3a8f792d3b8a3953fee9b000b38565d7d5b33ef523c0d8f2dbdda9c368af819adf2601149d33d48c1690e754b85cea41ae8c33b410a769d7c5d98d75d2a7d75280f6ad2bbba5a5b8f8bc0335ef0c6da524feaeba26b18fc453b71e6e0d1851391d7cbcb0a4736dd55246ff8a52cc8fc0a4b022b4982c945d5133a461e600d82b98bb8565383335befd078cdcff6aac0f4ebbda71057c3a1dd43e05d134b5ea3f82dbc479d5a10d2bb2b37510d26cbd20f73b6484640ee14bce97ba92ea02e570195506d9b34604319d23cc1224dfb0964af155b03ff750cfe834b4aa84c164b6d36939c2f37f1b32d0f845ac420ce20e9869fbeb900c9f9fb65fd5d8b68f6d0a8fd9d6d4f675603a8be170a4d0ed12889597467f5e346aae25165d002b55b55b5c7ab0e44c9bdd1fd699556c10daab12b046ae74934193f1eddfd39257a1449a131108f8bf41e0a0c587795cb2c170a59d9618501f2168e395a7ef093138dc510c6ef7b73fd6e61a2bf70025b02dbc24b5ae857cc696d6469edaeb9f026e84bf73677053437240992bfc21d238c29178c10232f7e87352c553ee38e46a419cfcce0f5f2a9a08f0096f2c54678f6e4a074f6bc3ddb44e6e58f9bfa2e4ededcdce47b6be8d397dbc52886d73ddb17c67226f269d41e961929b2cca200afcd29ff3b0dba1233e931288513685e2ecf0bb71be333cd876a877ed2b65be0780a5d54117d1838a566a223adc7f76c53caa9877021e67d9be47f14824464ee76ff43e7082033abf0f81c917cd529df8c4c16863d64b194006aab7d1a0360771dd4b7d02a5ac177af6d2fadbea4d80cb6742bb5d7ccd887c0f41733b74fd2140fef0f8c563e238fb2cc01ce663a45764c83dfe658f4761c2ea535e714b2ef00c86a97227ca07a73bd5a802987ede6d9b96ab761ee4d71b312d87b0a8323ce8935bd9538424484b0c685cb254220f21a44705a969ae557bffd1b65ce5db52774c376acdebcd7e2f8e17884c1d63a55b74e39488d5b2a4321395c70dc514544b3834fd141b06793179f2d54834b52d4ee16a65f8dc5d9dba3fb4c83f9efed1320e5f44192bd6e74b261cb6f7c735c0e3ecf0be617b399196013e37e204d73a20721d768ea39004968b32cfbcc2448fad48595181767e7644d04629b61a16ca7026ff06e15b18279f83c5fd8ef5f49cbe75ea62d65331faac9c58cb9c615d3e3fc491bbecb9770371e4f61d19861efa900b73fd25883841724c0e9d6294bebfb8d94621b8f5db3cd36c2f3ece89cf7ca5063226756fb52c637fcbec0e63ca19a21821459dfe073096e6fec5ccf130243a67a7dc2226f8ccf5989c25f18f1216e06654db6648ef0719436a81864270a7ee3e3487ca03a5e0a1845c17af7831d6de86858931e870a470a7fecc48d3e456694dfd09466012c52c448d139b5d54a67c61cb8f84242e8362d921122862b0da21d0e0fe12f5e0f76976f57d52ef32a2b3806fe196aac320d5b2f3310be299f6d20b68a98dff84e4e1d70358b0031d5311520693fed02e7820c014fa39bd3a40a3b3f4d55d306114f7310293502fcf72020700d54f6aecfd23108016a4146268febc3ba88aef8ae2fc8cc7116cb0df1b29febe787c3acd1480f9d14e14409f37bdc1254d3c61044eb57d2c5be7a607bd561a07cda5bc21e06f358c9046c375ae85694a9249a45dc4dc4c82d7a3eee044f9c1c78b90e375b98a22a9ef20717f826b5f78183e92ed252c243306f8afb5fe72dc1db2f3b284e52cf54d496a4ff4f3f5297c95851896fcc3ff7b003cb0a3a449c036275c2b9cad78877655e8a8a600b15b30e0185c4c1897b51fadc0577d2ba0ea4aa976f50853062c226e614c747cb5da4b414b88e32488b6be38638305721050a0fe4ce5672952f65eb960b4b5f190e510609d6acfe3e6091b5db150edfdb2085392d6b152a83e4cf0e8e44a957deb483c7544b6c682dd6330e6ad03731c717406291776982d900dab8596a3738244ca5f84e54c06bc6dedaf02e2decd0fd74c5beac14d3c751e0e7c87558fb72a6ff3b7aa9bcc27f69670b0b623524119eb718c527b205cf59f7f32fb0e4fea969ee7a6dde04c7964bcce49ad23c74fc546d5216f2ba9ddcf483e9f734f09c27f3c4bf07b3042397420366fcef0981246c71c1edf6f7096db51fa8529e95a337cf4d3e1ef4e1cdfef0f8ebbeac37042a4a296cf2eecbf01c3ae85ad66481f98ada53c8fc8832be6329357665faeb2e0f5a45d28f8c4f93c3d548afb1b0b2d855fb3a2f3e6759b36c41c6382880117f00f7be7efe00c06bc6d15984f972ed8641c8e7414b8266626c476d92a6b22962a71f4d200279b21f37246dbcde03d3b4810853cc765dafa6b28356cf06e4a144210397e6a3ec1fee9bc6e8c187f577c7e9e0ad1fad93bfd53e31f75ceb8dc77c33a5f6e86401fc0e5adf636bf64c84430b3a13446e39d8440b451c75c9dfa007b12c6eecf0e1e3f2c084e9c6a009b073506684c7be61b1245b2e48fa00fe21889652076d6112043455f8cd3e4b6d953496ec3e256d91f737baf2aff3f936c7b20e4a22a4f0a01be6239b48a5d813257a705ba97a2cadeea8683803c0f2b7d36c015a702cba604c83ec89147ba4521a78395ffd98b48631af434626b3b53cd15c7fc382085549825e822f8c45b39dc08d438792725e0e05edc1d1c910b73ca5ed49d8ec0eb7cdc3ec6b8b2b36778548d4afb59d7036b8ebd0e4e753c918d4ccd24987cee44a0a4cd55433765d11aaecb409bb74f56542fb18181e3488073889740b875628608d45984a511935d849e2be742fbb2abec5c104fe3ebbbbfe650b2a02ba1cbe950a69da179fa32265c6a3d5d8181a3d719c4a56291573576660de44237a2ce71acd743d9a7115fa836edb65c8735244433d816cff91105b3c709dc283f28d36ef32030528666d6a023df1c21d696cd0b1eaa27ca62b12b0c58c00b156d859e83ff73c2fec03f6e86c28922a385432d8e43c1a90e8e213f7ec58a51f02b000edff34864931aca89b663c759830bc6925d4b4b97fec86cd88815b917caa727acbec2b27781ccd67ed02c40f2168f15d3c6920690e0800bd04222b0158a7751c133d659908ba588bb76ea7c891864be83581c941e0a48d03546fd0fa30ce4376057ac090a0ff922aa354df387847b4780db24229a3a5e2030de986ad2624ce2e12a2b10021e10e5111787d852bb9a0d869443c32e4f04534131df816be1f372040ebe3515c26ce6aaf5beaabd7eb754c2eb8e949b0c00633462db451699e365bc5f78d41e80becace37d89ea68c347c7ec7fb1c3462f6927259c569b16322f963b5713ab846df642f18bc09c327b4704debcfd770e7c8de20a972242d8c4723ea5ce8ba2a22d7c75fbeea8ddf922a602accaa02c1efc16ee26f7c697d3291d51e4044eca9e33c2dbe58b405c07eec0cc792a09ff70dd3e9be5c4a647267da50c11a062941a0313b48489a9e8c5a370124ca1f8809ccd020f30366f8c0c2fd34b61a76c9e73850edc236212280f50e2d83c8836180a5c5b1486d269e41cb6cd9994a029013d41d174c1e845b143daf516e19819951617d6c2158ac9f02c06330077371e11c105d6ac4d0d70986319407529c751562404d0a96329f54e40caf3828e835782a3d0abfdda192f4c034620703b0061da52547ee55c8d92805a4246e4414886824c5542c5ce4e4085ee4a63dd552207c0e08ed08c53dc011c8fd528968453c08f7764308bb3a4674d07c1fbfded93443e8f6f437d609842970e85870dc29dcef3d86535f97a807eb4061e3a9b04c0c58a27c002d87c00e0bffd42d5d1cbbae1de7f30382794d96df6cdfef0c7cf367e214be0ae9f91b4555d0033926c5d16c38021fbf78777440773251007f38e65f33eed6a4b989214c177f0f5a3753fcfb8271af3a26b8e3336c959101844ebf9e93e2cb64594767fd009820d6b4ad3137f8d345fa17f76952fc7cd2dc6345b9328132747bd8c22bdd9f94208f9e87bc7c8ba3b6070f871dda3fc1377f9cc73bdafa01ce36ddef82d5d8fd450048f85ea4d67b332c0817857c9a42a1452f50c4bfdaae5f28741f7dbc2a86afa45decd5fdddb00cb6eb8f2ba112eb8dad23905b3b7a6eb09cbca5bbdf5787af74c7eb8135f4b5f4ccdd1c1f16e65c2d8213fa41c23522b12fba916d245ede2485a6f663426f39d0bb05ce89effbecc1550aa03c634150bc8ccf467717518fa6d0895f04f1ada8e14eff58900ae76bf727b341f2e570ea9243f04688f9e542d52e51bfabb51eac7c1a3349bd0b133aa242e30d76fc075d04f38cc32210412adda87fa5777217d3c708cef973d034ddf406e4a1c87f36c60a1dbf97cfbfdac593865fdd76d550292b040d527872b40e609cf42d12e7b0d3305b8234a9a1c85848b81ff97e8bc0c365bdbcf98a5c451b6c5c7c7a9a6f2b5c2f92c0d4bb630ee50be669a04b3fa499d0dd15c798641639b17ba4ea90992415ee71381fcb3b0a0211d2125b244366d459d963af1b4125a3a3289a6484eb6088525943cc524cd75c50e47b1e0079dbc37c62ae18f868201d5393fe3cf5d2b80a17337a4b86a47ee14c3b7adfab9bfe3599d5c83c4125116737ae8447962410fbbf986290525589ede667cfce59d5c04003bbd378c01246fc41cb139649ad0b21f72f4483d6bbc0070c05a0b4c6e47d3fcef7f25e38b472c32d633ad16c48574960e1873ded5f56df13bddbfc0dfd94e8fdf1a37863028eae2c4716dd6a06ae7072cfabcf11ee94d0ab55a12ff1b451e93a3887ef8fb75f934a06ea15ef00531ba3d642576521836ef85f3a86762fe1e8eda103bc488145bba7e0efae376790a22cd4489391d546c06c3b44513aa97a390e1abb624860572e72f284676ee478cd52b0679063b8a0de77f84c32110e66ba915e227e80a99c6bd4e01f7f83c93aef7133087aaf86f6a7d9164e59502d836f4eefc62f9b7283d2e071cdedecf9087580f521682f17a7bce844c0daced391b92f33ba7508169fdf3db0ac8200c9d7be72c9925888260e2890bbdd3e6008501f5850f28020cf2f13ca7ba0b12512baa4bc400941aca070c90214a640e82d1256f3cc7abeb6aa87a1630cfebff9390f655a6aa408b376abe96fbf63e03f287b74620ccbdf6a0b0c164f790fc307c7af488949aad0d51095f27ecb85d96223d8c0885b16060dfa8aebd7e553f0a5da20b2d423d5833b277422b5b1b37d300e9bf43019d66d5a362b4e9a1ac8578089281a0634d52816f2807ece223dc4548d851c2ed251fe8ea93315b0e79b0b2f738a01bc8c112ec24dd5e82a78dee13e3de273571d361df70cbf9e9bae30a5e56164bee36a76225b235235a5357aaa840727c621f39f2bb9bdafa799ac707224e91cf8aa1344e335388038cf6436b856d74cbccba69f1e6e6e270e36156a051c9e5c5285fcf76ff0240b7eddba9dc993087579c363cf762902cd165800129687413c3412223e6ebf27f4ed9cce709cdc65e87680be5ca7db498a3d2735c5406dcc466f18e61dbd269949932203908985ab8b1923fa5652197c49e63ae7fa75ee23106bc479c23014466c7625b036d3621083661a6b0ebc6a9c4e0ff4939a534642ab03a4f45931057de1c8a0b7c80c1bcf4f5c60c1c5c4faf2c076717623dfa9e104d7365b202ad05e04570ed2d5c6011e5b617f723607830ae0e173f6a04f2ae3b063a6e22d3787632ca659a9de8494c57c533ad572a9faeb395ea0322c46c55e039807d4dccac349cee6f0042a793fa5ad5145541c3f2550e4793db2366108655a0707dd1bb4a95720acf28470f3a4e8b389f8206039f297e3583ea3cfadb3c4c8ece3c151fb60ca05236377682b7c12153849612adf9cee155899073083d1507d5f6f444b247e2718a5eb072d51ce4221bbd88aad13e2fc735ae85fc9d3b95ad2cadebea20a1be42767405a134b7bafed124eb912fdfa283359c2f292c7e20d9217f2543503ddf88188bb3f54144c4ebc1d4668cfb96e512af10cc8dbab11f075059435c0847732bec5c37ce4d3df5d91417892c34d4888d68257c247dc761e454223302a805fc16a6192059ed4a3741678a90d9de2cca785d6a94e7ef2fc0a5169439312c130f8737559453e5f9bb1ef8934240758d9a7d824d81aff770d00e80b75b4a6cc1d1349330ca8e8eeed591475cae7b528eea0eafca5a5d8142f4ff5b49e2f02cb5b97b1153b2e7e994f5699c7236ff7d74c87cc2ee990ff91cbc6ef6e5ff8601f640e9191634cf7cfc34900dd1f326b710e6fb05c80e81089718dc4672bf6db5a615e705247d2d14d2bff19711b28a2ca61359cba62911f8a75547fc95f2ef0596f0516b5425bd364c616ef5c022e845f5722cadd93160901c77c1066ea2f163aa537ad7d52aceaa89ce4610cf533aa0f0aa7becd9fdad6994f75a23d9dee921737da9a5ff6bf907b3d44f956cb015661ac3a3cb1b3d34d00772c434ceb7fa8e0ec64f67669f3fc0154b3d1bbcb513d696c03f05e7fa30fe1d5d8a98d0e18f269ccc43809dbd3d462bb16f5f63963ddd8ca970ddbc533908a2cdaef42795e97d8e3d949bc67971a022edd7a742c292791419de353da1dc0fb3a46ee0538126b4ea82730c9b858a36a81ab858bbecba76ea9bfcddc6fef940f5d8b37c1416c8b49fa84dc009d3c94c35a7a420fabe6da89bb0a1745af9fd3194ae65e394f3c4afd4754e6947603d134e3604b8fd693eeba6b727e46ae7f071fcb4a839164fa7bcdaff20166fdc9809ba870a40c0ac8a101cbd83b04b554de2149714d2ecf9d64a9c4dd7e0d6ce3be3875551548042c286edc9ef3eb6ba0a7bac6c08bf7cd52ebd9ebfa9e72a333026b081321846bd292ec471e56e8effd7773b553cc78423ccf543da2b19032c341a73091024d9e50964da077827f45cb17cd58b7079980f1abbd193eeb28e5f3920375730df98320b315c33326b0e68f4d8aa2bb0a1963a0f0d2aac42301a862fb0cbcc07749e698323b4ffec5b4cf688c3345ec74a81d4ef81fc8d7f26e762fd1fc73968636b50fe28c8e2ba22e042e921204ba9e4c26e7718f99fa55bcc37ea5459e09fbfc08e80f6435c63e743dd46a3f26747ef68c232efb00d71efa8a787aa42dc1ad21b08b445dee75e46472fd127d0fbeafa4c1f64a2b5c889e73b0b7ad43cb1d114c3841b2b44e60eebbb586f5552c4dd8ab043a7f9419379ef7a7dd4a4982dce79b38da5a9e9d64152ccb83e2391cd8e349106be97849a03e16587d84471fad128156bd9c27d1951e3acf14cb595cafe654236d97e6262374dae523f48c95a3c48c53f902fe616618c1c121273e49b9662d97afa23ca2d4caebcc5beef74f41d2bc9db5baad601248473b453bcd1cd3dd9f4c457b7a106aac8787a3beb80807574b4766faeda986502874469f9e669a40afe1764a77de64f0de5a4d6c9cab10c5965226a47f79a0aedff1f9a6c839664d85e3359833de2500b443c921e18143162a9af2d2079abfec5e6bd6db1ecd8ac49fae1e2749511f70b68e999797c5da87e63971e4219f319b8ddf96029c7f25caf6946d96b90b72f28be5711ce13228cda7fb1a8af486d3127ba1eaa9ad7be8026be0f76dc24d856f21388f73221e7565eb757a7be29db50273c0b0790fe015f41bf967f6f3e5784b6a161c96af1dc817fb31e7a875d8fba2e2bfc617bdb5c9ccf124c6a73bd87429de4de58f470f6a8789049f03abf0cc882524fcfcf6d497024c8cea894bda0f7106a4b16b26ea5f7b367c77e647aff23ee66e7e88f64e2dfe42072acfdb3e869932601bd7647f92e7d30b8338d38f9f8f8000dcf09de07eb60ff4f6446baed4d92edaa858c8cfd0f7bb18736c6fbce6742fdaffa04bfe816b38fa7b91cdd3fb4ee392f8ecb978f812e0908a7720296218322f87a193f7eb45f5adefe1d91648dea2cc0f41ba9be15bfed6730a6b0de39653bb3fdcc5a51559fdde955f5cbe4a5b410f22fd0b723f0abb604580b93fcff5a176bfa23b2354dae5433353f93df8899d1df2bdf8ead0dce347f01697dd7993a7620d5d99f681c1021f572aa6d201b28f51a5db9addbc3261b75157b11028a0a4eba2eb39ea1e4ee66beb202382fc273f08a9c036d213342ce9cc7e4cb6e45b180518abd7bb648da8136dcaa32cc385f01ab19738aefc2e650810fe85ccfb96718394c2ba6b477620d09481793a72d0a25cdfd7d7a221b445a3df37a16231fa28a53496d8199209173b704ac435a95cbea8fbd8af137ad44fe97cb2dcc64f53102e779aa55dc50c31c5e137ea1b143fc69668fdb0cff5f58df2898590a63255625c35a0087e77cafa4b985fa57cc1ecb31a28efc4cecec891887daef5b87030ca2b98bcde30f1033be0b742f0c5dbce5aed555f96f7f73e588ff99e0687e0011ad4e19ff03995e1de0fdf5d6a8125b72f3725e23b0d74d0791e6577f170e2b766fc0b1c21af2cef24496afdb6161693a0beec234c167e387b413039496bd6ca4eb639249b388b8dc1e9643b2528269a5a72dab96789183642b85021331f8ca2e5dbea8a0408403ff21867010f2adc5861dd3748d9ec6b0dc12c936637cb76467c88b3096320bdee1103127e62c174d2b3e278e64add0b224a57f282b710ad5cb3751b12d97a0fd63fd2e946c3b0283033f667ce58dbcba251dc09d852b779ec11982f65623ec2c292f94e1215467a0ec5a9ce43a47856f87f4687f7ece0fda833fe9675bbe0aaf6d29c27154cad730e65d143f04a094b1b8a49857ab476f47c21bec2665e97b6ae1b3705840358c561c5d957ffadb25178b7ca445bb8908fedb571bb7a68b3cba8bbaaf6dd296ab97a8d360f1e488c3370594dab8cf44bf140253479265da518c2fa3edb68226ff00ecc47e0b09f8f9a4f3c9114f81842dede81b35713b0e4017d5e25aa78f3877dbccca97e7dc782cf5461a6501af7d642dcd80e5a137bccab2cf856c2806dbf92843c9802a34787c40750c17e3c0ad566d762684208abeeae429ac1798bb7ddccc58625508410e32d52996d9077193dca99f971314557d83048080bd8985b8451f872efd90520d47dd51065457881e464b5e46aa38d66158d29d6330609fd809504800ae3a6e2758d37ee91bda0e5c4392b39c8df97b0103db71bb6736301a3f99cd7ca0fa80eee1ab45518096da39488b3d6effc8cde55e1c1283dd602397ca8c47c6363c1025f62439b48e528399101f3919114331a44cba1f2d28994d5e938bb33b1a0c7c7af51aee39db374fc813bf125c5eb28e449c3c1a7ba69e8791055151f067327ae98dc9ee619ecce4ee0e61937564061dd3d828ef233c31206ebfe627145d717a14040e46527de106bc0d1aba04f652b4bdcd0815b2813bd0a4bec65c37c0fe95877a140b5377c606fd1c01ed1944785879e4b52d4a0cbc0fc2f94ddef750636eb4d95c56db9723c2091196dd4c729777c15bf2cf3b210846972f351539dc58c79b69e0828b6c87a742824be34025ae6fc3991a5daf4b50c5faceb4685711cf1a837de51e290ed08ecf77bedf9926ae14b223b95958cb597dbf6b3952dff1c5bbfd1e38daf13cd28e28e049f241de18fd01dfb919a4e603109e0c1e50418e02ceea386d0976e946043432edda3d961433027a9f4e6fcf64e42c99bea8e6b8b8757f23652f2a28a74ddf0d38ae285a9eb9af738cc153397af6db375dbc5de685a81eceec17e63233d24f62c2f025ee421e7351076170eedeb199579597d652030980f1b63bca8fe3c7b6d4e04584e9789868cb0734e41ada04709c661f02e26fa07c3a6d18d4ac4a567faddc60cc2d036f31e4d8f78f18267cb472c48d1181adbb12c21e9da08e41090cdf1abe5c6f1aed92149e72bdf2a961f0af983506cf8796a48b44a3924b65904cc8a43394b0cbab42490eedebbd22675d04c4cdf028ef6ddbfddf2085a573b9e92d82c017fa2eb5f2961b647a41b25fb572a2eae99fd2c686465bfb2078e5f04ca7349623153df895962242981e5a4f63fa4311f4448b71be1f14f1a43dfe415a4c704228915d2c647a6556ca80fb0154e2e34ae1bef641fbe7d426e2ebb9543c5edab5234f9453fc92ed4a5cd058215095b0f8813e1d49c00d262e9e76defc4fca94891e165c3737b96b0db224891b3d4e859253885506084bc35b98a6bca59ea3b979ebd575c436df440f628e03df713c5034877f389c55963e9ae60edb2b0a887abb4462f65fc4db6820dbd681657b48d0d521731d95c212dbe32c2a6b8a2e9a0220dacd3c2a901b3375d55f729a78a3194d8e9fc5273975f86937043e14cc629e8b92aad274811d7daf87674627f8b6212186e564099c9b798b9c45bfe5a4190baa3c9fc5dfae9124fc6d55f79e17b47a8a5c256da4d40508683394174c9ef8b85571c92e4fcdf5efe38e8a4ce45cdfa2a39746e195a8073225d89a893bba3e86801d9b1b7d8eea626862081e3c868fa0e6a1001d6c9e58fc3669aeda4b3b6c289f6ef590d69fc29bdf7ffa3dd2688fc4b78caa091709c22fc487d8d98fec64df00e1207581f946398c5a020fa28b001056a050b247014811847ce93a201caecb3e96c803901106bb3fdd2a44b71b33e330f1bf82217b2fdb7fcbfc0700270e4aca008aaa3371801207ca0812b04f9a2b901b67d336355e8a5d65a46799daac8d3fe497e769a8d2c12f81efe3b29c30cb9cad73c85172f45f90f925aed04521483a62382bccc1f002ad2bfd407ebe08107bd661e86fa7e609b011c82878607b1d345f98b213d2898149442ad76a14923370f9b946a24c0ff67203b03def18f0b19069227d0a2eb31a173eb5fc5b2b868fd536d2648fc525e9fbe660d287ab3de9782ab7e3cd3a517735bdd99bed287a733724192eef29af00ad8e8382ec99f8e8bd03f364d71f9322525249bc810d646871315c1fe9522826749d77d3f220252394fd86eee78da263a74f368be03ceb2efd63bd38e40d137c4eb8b2ab7b161e494777fa6f5d38b79f244232722fa616aa1a57b6f6765a93d93d21e63219c3028b013e6958c5b2356d53992b62f0ef84d9e978142623b4df6b5d3960671e2ed05f564645b77c75bfca019c7debe6e918caab8e8d5c7ba622b8c6d659071199dd9dc553adeeb44294508514315a2448278ab20c48235469110153f3f18ebb5a57a5d655d452917ee4f0d66c0e154da88799361dc8f2ed0c2ec67f0166b8b2928214b946a7b109b375519f4b568c1309a3089aff17c13a46540eb38e77ab1b0c23496308744f1331560fe7de1030975b47f7841a94ee1e45fc5fd3a0ba6c6f4f5accbff57e11d7cfeff5cc0f66a37be75cf5888919233dbf8598f269801d99ec10669359a8500bf750def74387287855f58049b7cdf492fa772abff48041d21d04886ce1ec52d3629b10cd13d3835e85d5238ea16079b67990f1f9b45601f46a28b1bf4af6b6e816fa4bb34d254dc3f50efc0ca1a44a80815ba3613e91d7861f674d19e36cdea646e7eeed5a81b608ae5d9d00bb4a86519e218ba78914ab1d1264a8d8aa0732ec014c64c2b2e690d1ddeb1f84f0d99b6914165dfd5ed421367a588cf0d9b86a59b16cf39e63df9b081a1ceac0c4f0b2247b7483baf33e33a7afc802122219578b02041524cfcbae4b51b835d43f8c74ff1522f67aa5dded3a2b16e01987c4d13d40a7eb4fd893eca59b199b7d07f447e4a35b830a57afc50fd7070ae389b5ab17e5f85510d4250ac7c1fe9cf9cc717c47da41f33ff8319c5ea74d6a4995f05109f7d36972803ff052986a5758cf69626adfde776ee5b7cd408e148f198696f452fc84324f0d422c077f2d3a8270e384a6392b584dd331a679a00d826e84739ce1c23341bbd81deeb131182002856db86a713d56cc3d2dccda0b1681a9efa70e4c5ec8c4e6be8a3e2ca5c9b1758540c1461dbca7546d4571eca2ac1ca6bf9472ad376bc1028302d3ca8502592291b7e56471a46562f6313057ac5d88cb1c3143c373e89dabbe99297f0d5c83fc72c0226a97b0abec6f606b872b150d9c72d92e10cdd1c32e132c02d73c9e131d3999da88ef1b5b60b3a52a04ddee9ef624ba1114538a4b62d68f47099951033089a8444fcdc762a44851428bd4cb4ed0ed53378ded9965073f6cd3041263642e6352591b8240511d69f7b36912d25f97d8383f05881284ee7ec3f0ce56736800eab9ba1f7a6734b8bc9947eaee0279784be1cf9a0dbdab2f4a1bb3ae3317c74cf2983cdd1b317d4828ca93bd8e8a1e98a942f49c7910438df933720e9146f217022ccd238f7ac41e65779f6d4b943aee613b1c80f220194d622e03f201d71cb63b19de0c3ffdbd8ea5b32d24b80b88a93f871982fc4d793c35211d69191367f46184d48693b4be13eda1704a9232acddf70e1fd2847722b6f70358517d27d544a6ec6c59a8bc1ba2bad7d9b99287de6adfe51dc98171be2ce880efd5a2b6b98e36ff2d29c07b22bdafe4df303557a00f233f7e52170cba83a9dafb95f8438b6f66b3140fab3e4fef3c7ab7c49c9cfafca28dd675d7f6796042196981b0d3dad98a512c8d6e2c7b212dc98276389719c1ce6110917f82fe1d9bf9d8dbcc0b6984f1a19d64d4e7b6fe48fa04925335bbf093e97e046e77bbd3546584a336f99ff870246f77c1965b893fd166d55c874a9746e46f78f6f4b42f67b10fabaa4979962bef8f42936ace06687df3a69272ca14b528023b422960cf3a4eda3fc9c0ad0ab33fa99c06730f1a67dc59d8f27ef70c1dc7fe4eb8b1db08df75c639f767d592b83db4c74fdec1b5471d225ba76d1a304c8a89aec907a260ad77ca16b7675600eb4397b1e624fca070b8f4c9d314f0013be2349c27e9b6421e3982dbc5a3a616f1a5b492fd91193ffe421ba935044ff007a67c7bc37521c7c9ee6c5f1fd48aa08ae892ab8287c93429f2453587f22e99eea19df7663eee8bd8990a08f1b33c320e3988f0c7dc6424f7e44b68cc8a726312492bfd3e5e03b5bb2eb7961ea698dc04e2ade31e54ddf344e7c1708a79fe8ca087f261b30875b60ddc398187c18649a21c00f9d8be02c041068080272411fdbace58b7f7f147e05c62f52af80171be1177b8e5a23f685e5b9e60b63a51dfc7aa3e9568ac02ea959e5f437b8b7f1cb63c0b8b16bc93c6b599ae576a2db1e52c043fc59002e20d665de33f46cb87b76c526a9d1f2940480b747b681a7ff0038fb3f1ee75a41e868eaddfa3a7edb4a7ab0ed22afc714e156cb083620742d2639bc911ff6949943bf7b33334e82d89daedd1c4733ddd97bc527f8c435d451fa9161e5cdf0f6d0e2f55fc5a63e1fee85d17a42b3fc4601f0e333a3fd4e6644a9f080e103fd4c5cbbc7f98a366d3a37afe724fb051f0b4974e63ed46667e8d18756c566dcf059201e575a103119dbce2c0827e64df1c4af4d98d0e63627ec4afcb3c455555d93d499237ebd228ab37fd66afba9eaec7950b9f8e2aac030107edfc0dd58bb02151034002d611f5441beb63c856d00433358781cabdf0cf02820adcb3974c01da78a9abe558fba05d1bafa70ca80c9ddb9894636a48503ac4ba48f03f4902ba1a3d290d8233e8f0b2c1b0d0053cb8b6642d59d05abb41f291f67e5958c6698ab2a9b34247f3441be1b3af925d8e43cf8eb960c656f2151c5aeb30c0f0f8bb56b40c3d9d6987568600c7406a28bb5c33a27e463011ba7a121bea49b6f82658cd41f257cc5ba4f3a2868d577419e560c824c34f817f9dfcab906a2378b2f22824e349fb3d76efb369b92838afd315783621b339af912725e710ed73f8701c0f4fef18846a5bde34a883d87424e38f7d8173b2007bb3a3e05170e39306d70283cf1d1c4816e76a1b4739ab466dcdde55a3c162ef658024fc6de82aae6632b9113981f5f16d370c759ba3c258de418bb18dfb82a92d1592bf50c9e8f1c5c1e9b1a6ffd7493ec8c564c9730a364c1b95d2041646872d576a6c438cf9b3f16e3b6fcf9f9b535fa84549e5ad14b4f1727e5e728491d3a014142dd6e0bb5806f27616fc4a149348654debae9d66d1f929f8001c2e3ac1f2e4876ff39a27a6073eabd7ea3f12959cd1513aa655deb76e30349df57c04e0e31beb41e5db6bc5b9e2ddc1c7a88944555f4d1b2182ef6f01764d62deed553ce1daa2a2081e0e403c270b08e5ec2f751461301c6ef902dc287db06b5bfcfdcd11422fd28037f06f6abcb95eb57439264f90350d1a3c5bfc7ce087a5a463e9c7b4c06e76443806f6e4eea81764813a2575712a06d9400ab9cf9c40fcd38680c2e2708d3099dab4f38dc02ef92f3bafd7399e81cd37f312d0fea5e2cbb8c7b372c19ff832179f7e4e82cb1809315183557f6416bdd00c260446d0788e3a476e6c9f3fa0987ed7f422db88dd641129c7ab3d346bdd1e6578e3bd2c6919e3f8ea8ee9804fe7498ddb2c219604c6f84423bf44414845f6e3291e7ec07a9bce420b2c599b6581ee478781e05999ade154fba600b09ac56ac58f02bf557b031d96c80399e385fe4a20faf1097bdf11ddda2af2a03c05c79da881967638b86fa726db9d45c6de52641fb23af8f5e8e03835307569120aab6ec20dbc0502b4e888dfe82bb1603760d3abdac1b880c87401274b2ea3d07ba9d5d1090a6518190e6ff8268ea6544f6170268e712b76be086a1cac3781765b699a37609e7c38764af313678c6336a9b679c1f9e30b47d55464698901663e5dbc8c07e748d785c9eff94ce2f7ce41822af2b543637bb7b8852a09a10df03e72a6fe051fcf92328d2885fffe27158014b8f61a3b445a2ae11fa6af93afae05314cbf1c0bc8eb3bd417a7c5ed4ac6318ef9f6f6db0e0d2efff4cb1e6bc47b4a501eb2b6b589f264bd7a29c21fdb937a01d4631f00e6d26f83506df79101eea636a766fd2e689b3a86b53a6744cb6d3548f827a9096189a4e18a9a6603d4dcf4f5912f8681425f4952b15a44eb6465936a1aa539bd90039177f41b7b62a94f0c527a3a5663916a125aff21c812a29c5dddcce1baa05b52fb5d0b74127fd54887dc3c91eb99401a248f085bd2dcd226ab5f633835213cb880a092ca9011ad362d0c33e6fd3f62a138f3cc10ccd2bbec89113d52313c4135fcab39e7cfd805ae0cd8336cd0ee669cba85c71e9b2dcf4cedac9ae8464e4f79210d197be61c2b0f8596cf98e74ef57468fba320143937844f529f59f29530600ce8abaaee23c945ac486c22a72b2879609763ebc2fd183ac8b442c0c3416cb7d3727a87acc68581814abf771f1407e3e80a03bdea9731375d93bc0b08f00f0f248819b3e9003fb800ff09cf6e7644f6f70c026109d7e2b77cb0dcaf43b4c25f32284ba817a5b96d8e96368675ab3d37184bdc828036de67572f43ba3dac6be6234405ee27d5983ea5df0ced3f0b36bdd90de3202284bfe879ad04e3acab738a7c117af7a50ee130fe31238937f46277014c03f9352797850bc86147c91861b40e7b3d42050b45a1157445835273092b532b8edf49e14af2f6bb0e8b376cbdd4ba684c38dc7248334c1c028145f4068c2aafdf632ef2bf3c224fe06937cb40d7b472d221576636b707aa4ff33f718f7e7c4a1f8c26d685728274cdd483be3d3700df2f3150db086823d7206d153abd7ffd269a8ce7ddd397ec9894d64f98f9a6b287be30eabda0f0cd57dfba01690ddf63c4b77da309f9137611870c870c1105cb5be92b595e4987cc3c7201be160a38070e7fb6afba4484585916e6219a33b06d6adf4b2febbd1b9388e30c638f9531598b522d649b724182bd1eb27f49a3a8ab14cfe923a6ee2862180ca0b9366bb52b4bcd27b64b26410e117e07e60d8ecc9a3c4f9d614a3995a3cda077aa8e89dd00bbd85db9a116ae9cf94bfd769d5e99773c342780cfe6fbee3e51a02cf846406e5c7f60363b7716d3cae2549614a22429e2f1762d8111a913a2494b4bd677c1f102e731b9874d8599366d61f5c819e4a5e03063ab1f0b1fa092068e70d9a5317e6f6485dd15649af64f0d8ddd2f0f0a3db95819e2fe341d487885aea735d97b70070c30250399a4925b80cc090dcb3ee5b9e636a8ab27379f99be53efccaa30dc9164a34e154ed936c44ccb3611284a817dc49239c0d56a794e3d0165b374f274445235c388a479ffe75c722a8971e549e136152ab59174d8c3ecc6a95ddc6e23c5a2683f2a0f170831da296761f2845aa480dd0b23b7115c8eafe73f5d6d3e321f491214b20942d5a6288bc03f5d850948a6bf6b2b526c3dd792dc6594d2bbd8d7780aa55e18ff40f1dcd7ed92473b126bd38b91a44ad27bcfff583395cc402013467c0d168f04b5d8e54c94cd4532b432f56f61489a71c6e7220ff0a2bd588be016f26fecaa769dbdddd02ef666170a348b4fea3bccd29e06393e8dfa8b79abb881b0e59a4c7c99c8764b0fec8b1031146306257b81deeed0e1e04380631c7fba3532bd4a2c5d081156ae6d9e0e433e27f6b08284af9aec7b435fd5d28f661c7bdb52fc8ed1bf5b5af878272a0bcddd14344fea5a766a79268e5de0eabf7777491ab62503f9edd3d1d1a06b0df0dc3cc5ba2bc4a71ebb8ff8121961dd3288db10c418ac463f1cac558cee93d4ea8ed74a5de4d7a58850df6db02b08e1be68f9b6dfe0be2b8bb37a659c223a7d43097c2b79d959469c2951931dc036964fe797824f41878b98873e41af884171efcf9ee08b7fc7a201b4f8c2e836372b0b7de1ceef1eb19ed65c9d9b44e6a07ac4fd532971a1b0a9f2cf6adcc84f7ea3a61e3ff87daf6bf4818c1d080371a579e953dbd79246ed20d18f16b911c7b64764d77894dd7528cedbe0a7daf22fe57d001fe7faf93cec17c35370ecd51c026a822f6a9f70d3538b4e37225b3b2cc092db2b515a357d433f69f7998f967cd13152af1338028c6d6eddaf6b0f864894c50f8b734e7fade6710e37bcf16082a389fde6d4053ff1f32bca773f9e920f883bfd8c5eb9dcbcb6780fb1762ff699c2bee2094e2c12b349e2f796ec954e76eaee1de567b318c98146ffe4ee55361a80371448e16cdb355737cbc78786d97b2a6294e5070baf9f9610b2e739f02de75de943d34920072deec09ee430fe6dc8f388ab8fad05bcee1e97684c1a1ff9808b775e5b03fdef5c851ef0e4e397f81a7ae7cfb5b122e78a68f211fbf7ae3513f7bab37fb226cb4a66403932f2a72f874551f325a94625a495fa46bdcac13e0c3b96a616f6ea7dc33ba8a6acd5a3f9ba804c01a2155a88d30d104fafa35b6f5c520c5836e9b8d33bb6b90276ff734eb8bbc0b09cafe70b17f1c5aa6122c86ca5112139ca4280b49fe74e4ab47ca8b84cc6eb2d37172cd3a6a0ff40bdc34ad073e941d8f3e6a94688cc9d78cd6fc7a78e2056f5515bce46eeb1cf12d33b68285e304361156785f98175133b6fb61711d758504936157d07d96330e532e9efc2bf060de61a074b88f71c5a70ce3e9ba5d50f91d8de77ead800f95f99e0f5864fa73ff7b9b549652c01d4e4b9a618095d5043929be7c5c5e15629096e7f171fc69a1e25ad75b0077ade8a30092817ceae90b1270160b6bdaaddd45d263a020a7c3fb3035da5d757f5245788ab8f02776352e9d152e4beddf5e5915a2383326ee658d026cf55544cecee60ad87fbdf649a75d873948409d41e8720cf4d7dd8e7c5c508e86195c4635c867a8febc37c87558148f9af3d4a30d3367c617e0d67b0c8ae075829d086f9ef19eabf57ee27f5e7a7fc7a182953d851861cd933f19caaefb56d25f556c582bec3328d489b8e24b4cac0471d5e700bc49fc7dcc869d0df631f2415e4d45339c5530a6c3eb0385eaccf67c82c67c2c333a8b41f1a9c6839b4b1b9da52375971d7f1d6b3a6b518511b27e88e04ef208a3e58fc0d8bb9f6a414e6fb2bb83da90f55c3749f446f9781cfba4313a2e505d565cdf023f64e8ff285fb18cb4cbe4bc1d709671b5ad58d15380616d7fb02695d1992efdcb4f5c5a4ec7685e5ac20a994761b392bad8834450f6886dba6c97ada29ff627407812e8fa54496c559a69c259bd64bcf4da29420b643587b67c30bb15dc1f77dc95fcabbc01fea8b525b23954217710b8cc97ee6e09c4e4a542626ad31278fec81bfbdf6d7e18ceb2fd24bdddbcf92fdd6ce7b3b06480ef51c0b790f8f7bcca1baab27dbb630c0dcc407d328538dbfb4f752b5ecb642e67bd587d81b92df18dcdb0d20b927f134ee419fe5cff8e53a098d9ac17ab23a0de9b68b81ecab0f7dc40837d72411cdea15d8a7d0f97a239f19a3150ff5eb751b3d34804b29d71387f6a7d6233b32686187245b7c6286709956279d386807f39c0a3c792081957a4e1030d8a284c5c91860f3428a23071451a3ed0a088c2c41569f84083220a1357a4e1030d8a284c5c91860f3428a23071451a7ebcf10a4e2c71c41dcc18c78b3f5c5ca1c38d1e258668f1068d3c6cbc88c2041434f2b0f1220a1350d0c8c3c68b284c4041230f1b2fa23001058d3c6cbc88c2041434f2b0f1220a1350d0c8c3c68b284c4041230f1b2fa23001058d3c6cbc88c2041434f2b0f1220a1350d0c8c3c68b284c40c10e834b5d2d5fc2cdcd36c6b42dcca639b6a7aff9b7c651b555d414dd9184ea043b1411e2082fb491c61f6e6811c5135668238f375ca822892bdcd0471a2facd044146ff8a18d145738a18a34fe70438b289eb0421b79bce14215495ce1863ed278618526a278c30f6da4b8c20955a4f1871b5a44f18415dac8e30d17aa48e20a77c27a69d2083f68ac076b9c891afc32e32f581d86091f23406441e38d1ee0aad100697bf743d40c5aaa128535d29f4c54dfd7e8e56517a1911e29bb291342bd872b3a8a9280ce131df26410a6aa3c00fd23a0bd9686404fe455bf80b19fc189c27604f477531943ff4ef06b64a1da79b98c3d29b5afbd8cfba4a83fdf4e947c95e19ab40c751080adcf441b9eb28c0217eb4cc4fbe602f0c65fca92149cee96e75a58b95587f06b74165442f9ac4d315cfbdf6c61c7e598daa6dbe0eccb8999c45d5da912b33593b0892e984bc1fd3ed4d8e8c51bd7c985c25dd679fa0e5fa7e77aa2654b5f073c925aa35e5f65621148132b3d0a165dd9323a88d6bf9884289d634cb126d9a8b6d5f32f9877b146d49cf3f26b694b3a82b7d05d9bd3a882ba60cbb34cfa010900ee46a4be3f5bc074a816a092561b6e7b9cd3df0299e552c95b08cd9bfd4dff88190dbe4b839d709ab37ae3d985e0f32606dc3db2f507c73e77fa68640085ba4c0f01e4463183560abd9349419a4dadc8f80e6a7298231afa6ee07919f10a40293c97ddfbc382198270120cd1936e167c70771b12ade1312f546fe6ad2e65eac1455c77882fb82f186f92d09d3f583fe0349cd68d46203b8c267453c2faf5365b597a84f3cac759ce73af4b0d915eb8ec364f9357f72bf113cde8341ae1f78e689f39798e3293dae07ca993539c1b59a2b277fefce4e9a3aa7d6d50904764584c5387b06bca49e46a18a66319969698ddb94b7d40549388c38311c9d3a3e43b64cf23620ea9a31cc5841aeb3ba64df8a405861c9cbbd64a6d24a6d750b77a0188fadb81eb8deabd0713a7eb8d0844605ba07f817b9fd1cfbf0360a0aaac6129af08af6cc4e7942e8bcae849d7cb28f47f502652216fad42699feb53d5a8fb45ced9e21666afd154a3d7752aa46e0e27aaa36fcf69f44d9b4d256fba9289cd564b520fb966d55a18d8b0b25bf88d3c5c72b3fa52bfe8feb74cb82e7c2521230c66c3ef36291544aa758cfaa34953079fecfebfda3ffe77d4b941aed9a2b089101d83f80cdb7f71587965f21fe3784858abc72d8097f90226875aa657590b3eaa3c1539fa8a599ff0eeea66876c42fe315df53e31d13e36d4de56ff86c8901340ebbf171f0cb8922ce1d11750017e92f6efcedebe3a1be14aa70cca437994f50eacfd71de50dc21d1ed1775f8e3b638e09c4de4fa89c417659cc6f70fed7b2baa4464880ad19aa2d252eb517bad1879eeafc980d48d46d3dea00c1475659f7f31c0a41b6e12c15c7e7dc73d69231c742b89e14d96af67b1b2a79840c5fec05b7e4ae53e58946cf0a10381b542b3b3b1e2ccdb989132cd55683fd586f577e67d9b97bd53d0dbd0952ce69177d6f36649cd67e752acfc904f6be5ca2a73b87d97af61bd6c78a164d2c4231c3c59a26c2113b64b1d043969b3c555e786a2714f360fa163176261c34c8c05c2678f936565124ca583568913c8c3a1b37a4b6825be468b2e5798fe63ac337d20933140254d6002c2811e9123803c348e41607189bdf4758b0b2e22bc2a4f5fd09bcb34a502e3fcf193ac123f1b957badbfe88c171294037beeec91ecf489cc96a7d7a1c412e95adcd965a05da47d37ecb2bed66dffac99ede7b0a5a07003fb002e021c3804aeedc4b00e441098459336401606060606060aa13fcf61bc32e134f553e603b56d6d66a95964baade4caee7b7418cf42ca274ae15805307399dc5cd93b4b315d089e3df47241a7ffff77d512680d270d3ff28498ac1f79425b48d248e64ad287a62a9a5bb0aca66119d1c870967349c2d962878c9352efbf96f2ea156bc799e5a50a1648c312c4080bcb0b11160ec8e31d1acdb00f4db5b924e17c40878826c4e1446003ccd9800620908108b0408181085c80021680c0e6890ae8a0c005262081279e90804c0498f3a1cb094da3f9040438cff57b4223281ed01870007332d08096992ae7431c0e2b79ae67ea8b9c01359cc8ceb57d0e67d4e431b0006e6f02399a4c4c46145879ae370a68e6f42b991fca8901793aa2d9d924e0020880774433810330e73b1c4e1579649e304093295381023067fa7062409e9898e90301026c06700101b0e584680301e8d13401c0ce0fe9c0c14365f4311b4861111428cd5927cc014d9c5193dfa134a0050c40a112272f33412a188882139422804a031bd2e89182026506550645063506250615060506f505e505d505c505b505a5059505850575056505e586aa82a2829a8292828a826a4339413541b1a198a096a094a092a0d6504850475046506a106108692480454225a0d94f3983003932d00395030381c891812732b08049e46049420e1618e460194329a550625aa0943a192307060aa0948a018da608c4118524518a176d2895d2821c17f06208392cc0881416a4ac80a382142855d344bd6ca860c9510127e47802e6784225870484fcb04342528e08e488400fa544764837f8b8199d3e3ed89296961491ea99197ac9e1000554114ae96226470380a81c0ce081a806d4c3becc7469998a848b1f79409e0bfa78b6f87c7ad8f7a1548a8c7d2c3e9f1e4aa57c444aa52801d15ca5524e603d4946b58752291e90e7be15a2944af9d13a0ac63c562aa5c755aa59cd0c857a88944aa9541d79110dc9910045b41632b5891dc2b4f80f9d7534b980863c280b50169f0f4d900f9da3c9c57e26218f02cf11a552e62834cd4c215016329ef9995e44ff412617394d2f4aa5503133432f2ffacc201ffba199b668223ff3814c2e4aea446404ea3f748e682617224aea1c1d1149a55292f48001c251293e4ca0944a2981522aa54747343b292468f2a32c944a19410a1529493e1f1a17a552b810a520014d1ea552b6d022258b7f99ee4b8a8f2361b840054a2902a81c04e822070164945238540e0274548a1111289552040ba5525cae50475eea2f69668778da4b0941ca90d5356f45552a45c81c4dab544a15356f85fd783a3f52828c427346a914693d3523cf6786683e45944a01f2d5336921ba221464249a4f64d4a30868765e3e345d61877840592895f223f44a3ca0e90aa5527cc8cfac1e50a7fecb47e48979229fe9a91e4b1382075213a211cd0fbdd4f8502aa58752129a82d4f8b0a01f897e0b23539deab485522954804029191af9a1a9239a3f409ea98e3c3e441f7a218ca49130240d8943f290381288a4a5a5978844dd6c494b4bb3252d2dbd842d9962baa021a2e982864c017a229026344d017a222f335d9acc2be668da293c1c9067869e0e696999406f6526904824b21fcf165348515f0b10cdb525780c4bd046a159a4c64e3e46a1396387d4d8c9c714533493a25916231168fab1e10169a480cd3490a6e695b447633dcd68a6c8818302d633696124241a5d17998ae4c7a8899c219a1e3344d3a30768f2882aa0540a0526a054ca134aa54820254704944a8180130f502ac5010d502a85014a299594052825aa01f5c8a1dac0e28652ea24872203f57c60720420707008a5d4c9247ca0c4471a6edc600f4aa9142e668c2c410e0bd8042494522a6b102109457260a10cae943a29f208186ca10019e8810c94522720b8c108b028430640c88252ea640c445460470887144b504aa110912010c4181b1883124aa9140690d18645ec008d2cbc412905801ee4e08d9338b468820aa5144a202461860a30b10340b8524ac5073001e041070d27e08352ea240eb4c540821a74a10725aa792b54cc904359a552722865a70236a163850f532965084d2829a441a93ef86125133468d18452eaa4109810c4ccc4b4682d38a2903402fd91e9e3e39941a4d104bae2071249643793e8d36323896060ecd7cc4cd1775e364da64cf5c84c508dad53241a792693d13451298ae9f1c0e64a8e39f7984ca1988d4886668b8d0423a281014dd1d789c9683359d09d3a1db043423f43474342313173b339225391d8e9631298500aa5092a654a9129402671c50f245114b9e2079222285ca020c07e17a2c9c403298090841e941403f2cc7c3ca28d8482491492945253ad48e2899548bc11892e2834945291184542044aa99548b0ac4402012b90f8c113a8a0d45879441e4820b5984cf0c0c649017a00b12b8dc0b1c2083056180185525267feadd848452697d1df2ec800420d6a10274a0d4840610d4aa912a8153ea880a6fb403aa22984106f10a2116f509c1542c480109e4d0b21344008012835a2999c502b831083524a14925a5a04a18815410042106968598152aae6ad8809f5b04229b5a294a81541304014926a5e49072cf0a0491492020195529bd1dcd4bc9240a828a5763c4dcd2bd1a1529aa0d40a20c400c4095600b1031017500a3479381d58f1031b56fc50861f9aa89a57a292e20714c5647646d7250b511640642a9219998ae4c8911f1b1e47a4084d413e9ff9e3337fb4b40cc07a262d5a5a5a5a38a0b7d6d3e17866266746246a1e32c5147566a6c7f4d2d2c2b211cdcd0cd1f490628668e60834997ca64c4562a71fb4665e315dd090e9823c1c1ecda610fddfd0e411bd92ea19813c56fc124f8c4c6d62259029d812998aa4a5a59758191a10a70b19eb8634dcd044f5700394dca0949236239a3674a10d6228a55468a2338a9736e8982b6c68031bd2504aadb0c1056c5812882624f2d0e943376b68845292677259431a4aa209ad810a251aadc10925d957c3153e9e0e934d0f35443518600a6930c14a1a90a441493c8e782c688a36303f776e6060ec7b602634e8000d2e6888c019de38c31a671043492f33d5191adbe30c1b38c300cc100533d498610365788392be33ead49f01d13c0fd00e1d9b8f99329e8e6876ca8003253599321634d195321060850c4c20434729a96632097d8cfdcd68f69036224f8ca7230a8946a309a463f3d633863e8c01086310a2241890e70a989929049a36bf64fee7818881116258430c4894924430309b69049a7cfc80f9d01d54c72f999d1d53872486b5094324940a4d3a36d6c3b1a08986a10c150618a5800cd984810aa5442f617880da80c0f08652223080c10d30a00186264a94051838ea993ce80b8d50138f2fb080046ae50b17f0421d9492607e636ba6cc2481bc0086179e179c50eac78866ea00a84a458c74418d2e7456baa002e3919199a6cc688eb8a0038f5ae142145bc803f7d848a0c93312cd1b7a26d367d343ead43923da02932d944029f56a450b6f2805c3440b67ac6881052b5a78805230d368633fd3a8533d9d983993052c64210859200223cdd10402c3e441d36464661a89369e1016b680852a6081060b2c5750c315c670852faee072050a5821134a29098b4d0b8c24a281d944317d642a126966cac0c0b4c04c1fd29481811185a650e847738a21561043a21ed106d4c30a3392158a58c10a4e28a5944c6eb421b9b1e2869d3edc70a248151aa1a42ad461faa8c213ec8b68387688870a6f485470433499ac50a1a3946441930a0f5053e882524a7d56a640869a021647a4200929084149a01acae3ff6e60a6b043663ca39168aa9c8e680a91a9483aa2d991c292421476d80ffd12cfe7edcb44410c1505181b05a844d3f3991b26ef12051728f4c10714dc508ac9bbfc5fce14524c214593c907678a29ded27c3e34c399461cd11390b4d108a5148ca80d15c088be8dd746069c90871618980d1435af0466c786870e988d13de09185869421cfeef86039b4d88c6da1e2b6c4c820d44b02105a5a060c3b2c1030a181111db432925865a61c26785090258421b6076ec586bf10e1d3a1ac986477b6436a20d6c5aa050129a986c441ed1280a29667a96ccc02c218c25185902039840f14c5aa0906038164624648a9729b4d8f0b0420a2580a18417252c40d1242110d2153f908c402f335dd1d2f2c264022581034990c01a605883c91a3ed66824fc01096c200109127e4830a31771361b2573d4d2a26323e9d8d0c8f0268a1d303b9882b0d0b181818181a9a17f6d0cc81303f21ca1115d5047e8718422343147d82849b4e343938c8e4d0d681aa10c1b8fc86261843494149a36fd62842246d8484528c44a11d298403d8a00a3a4cf5750119ce8a1861b4ad1cd6803200f6cd468b2a206154a7d98fc5d51c30922486132e1b1995ca6480312692c218d1da401930601843005214c42d011044c7ca268922409169fcfdc7060c3c3a3040b2c36120cc36c6a3e211e59040106c10140a804108800842e80f08464375180a60d8f1890e707a21fc81f60c00780f0c1193ea04229f523c986039b243ab090fe33977868ba3032e311d1443137437ae0040fa6c08324f040090f1e0f02800615d08841a3a1c1811dec61074bd8c1921d5c400769d0c11374307550801c6c22076de4e073b08133dea014149b3bfdf57c36a291a7258a1f316f5ab0e8c0904d14d28e2331d3470716303a36a3d0df49039bcf9c666a689536473aa3896e78f00061b189428a02346571c4634173a363e31179866c463b746c44517842134c10110c0f984d8b04c381e14d14d763e79c420ad0779884a60d0fce193ad4194ee0c00d25ed004d1e100e6070f08452d28e69740338dcc08b999a91e86f204429c98c4658fa9687c78c36a41df6ea30a30c3360cc28831265aca1a6cf8866e2b129a389928a2855437b94a1431d21a30fd28e90c823b3094df7081965bc4787888c193284281f643cc0a3a40336a8832766c9a6870dd2504a82b1c18c82b101154a493030a21abc51833228c9d3a9818f1a8c60fe872e87c9836ae0040dda504a493c60447336912b34f828698c338c311ac3023340c30cd49881680675062b32c0820caa0c0c20461a144d68a463f34b3c9b69b4d9a163d60d0fd1ec6c3e1f0e68fe679a24a98e7ec383470d68d6d0990fcd78946c66886e78ec80d980fe958838ffa17323868b181788c11e6260460c48a0a4d0c443e4f90f058189014a185008a30461c4309e08438142349dfaf17c3cdf192d99bf858ecde2cf67fedfc9c48271061849c0f00183444818f8c88c3a92b469a1996a3210e3041acd128c60c2c00d18d040492f5860616453437f485f290cda178590449d116892d930194f93285e66f280f962f485902f0cf0023794827951161b0b00912f3d7e4823452410988dc833459e39442412c5cccffd0fdd17086132454bcb8b144d261f1cd1acaf850b4ae082014cc10b8f170b9060603677c8e679d887f9ce0834373c5e36924886a6863c75f3f94c2558c06c5ab0c54a0b10c082386c2c680ab160c4821d2b784329a9e63ba397563082156c54a00615b440053a528089149021056bd01f3b7eec8079d1f4d1d232676aa847e4e9bce8d8483534459e3b44daa1630303b3691981e668339a409f59e38111d1144c2998220505504adac13ab0d844217146d364e4f3594141cc0a0a84d444a2e6073556d43cd1840c4dca9060288d88637fc234435164fe4814e311c1ec108d26509d1e11cd34dad04c35348acf674e90f4a129d580264bf3a2998dfd234d609a40268550aa0c4cd060d2844991256fa8256948a2d99996ccac2c29a2d4121d4ad1bc41c306a594045aa171c0e810a3348c9a8c88280966878e9799441c91471a6dbe33a2d9c0c0ac88b620a2c18a480a991fbdac844c1082d7481771e8628d2e46d0058b9a20310d623a636253004065983262c8b47c06f1c9820e3ea08f90cf0494bc21f3a3d0743d4a9e3841203cb00323adc47821468c98224aa90a9af5c6e1fe60fa41fc10be7e0260ec083447a1694666c85318eb02db01c16ca26891d9706003aa57a8b762800682a6a19464ef047ac933dba0d448b432210c0b604c00c31e1071fe43975387c985b3860232c4852386c481a97925f63909504a12d5bc92cd3492f1bc15268082094860021d25f04309d650a2a7a317896a5ec94a092c40823790800d127041029411b0318215ac8cc00a25e910590fa7e69558d16323895e09c8b3d93112e9d8d47c36a169b3e333473a800cd93c1dd5bff7434c1e643fc411b991448c243049742895840bebe1c06cb848a3c70a1748242e7470a194928e20a98392824242032454483523d1648224015b9441a92dc4e8b105922d12a0b47063870e2dcad0a288524a6551892c8e908588c9df074d51dccf8ba2004d20d1069445162f6d7859432969e5c5e58868762a8d6823fa2c36927d110d68ae1c7143d54ca1d112cfe7081a4a3ad239b2568e38a18cd4611a414329c90812a5404696521268c588134a29490475504aa98d08bc58e219899684685a5a5eec90969617eb298286222a90608a14008b252b5840e102079734e6157990ae98572c267385c80e88fc20b2002bb460c55ba180101c220467483c42208010a80c69c2102e86e8504a59d0ec6c6024212f42282084031b9909d2b181a20a19841241d45809c24510968f58915b905dc88d92a2780f9599926c46a26966a381cd878a6c3c00337598f8b8626e3a95466623ba93911520330022c50f41acfc08c3e7c70224514bcb4f29c1c0d44ca30d10092606e411493b5a5a5e746c9a4c4f6fed7a7010d23fb64bbd4e8e70dee68e7367e461d29ce3670a391184a4cc369cf3ffb1cb56e48e73206464b690bd565eeecdf92020e433eaa637a597724378ff20a3b3d8a275eb5606dde32b4efc2067fce8d6b783975ae62f9ef4413ebb6b1f27337f773e88c9c4b173fa48d2e8632489e5a50a0ecb0fd642922489dae73e38e183e4e891d2e9ba31ee76eb876844d3a772607f26283443c351324d1f6f96972a5844c0f2838508cb0b1196b807d9bf784e46ab7d74dea807e938be775b2f771bd7a21d6716eb342c329e8e485ac2429d8665b524277990eeb9fb966d7cb3365090e6133cc8bafad1d775316bf5417607f9e885d39bf575cdffdd0e927a5c3c5fbbb5e875e69c9f787152075999417aa18dd33d76b3759c41ffe1d45f12d9417649c20972428718db6206bdb5061bbff6e7e082d4215b9741b7d6aadfa9f3c77b422f9f59272292c4f2a3e6ade888a6901f2ccff5c4e4e595cc8f0f49927c00b9c239962284933948d6757a9d3d217bdbe06c434ee420dd7a476ff3f3dfa6efc641ceea9effec576bf5081f1c24a396f9d64aeb648e55fa0649e98b7445eb1ebfe7d65a619cb8415e5ff4b5c7d031576b7b21d2832e49384b4eda20db8cad7f35dafed5cb5cc7f951e0610ebd9ddf617636c8c5d6edbbdcd042d7e87b4fe8a5e93859838c94def80ffadb4be3ad1f510cf568aea01a516b3951037cf0bd86afb1836fb116b9c576b77e65cc4c297bafa541b6bef3a98d6f5d7ff3d5b72e7131b5ee928423c5091a6475ededb69f718d6df90c72326c2dc2efbf7da3d38e33eb2e4ecc20a77363d4dfb3d6bac52e5bc14919e4b767f0bd377aaf6590c9201f745e77b6e7eeeaa5750c92bd729dd1bddf37793531489f4c6d74acd67ff6e3890b4ec22021e51a9d9b0fadd75b1b052760908f6f73bcb6459eb13a157d412e1a9d8b96455bd98bd17a41b67b08d9639d8f638b5d90cd5dd9b2ed192de39f1d67ae39e18274eb51cadc73d31febfa3ace2c05275b906bf2a3d3fa727addaab4e3bc5893132d48389bb79d956765f35edb71e69e4eb220fd369c73c1e56dc59eb6e38c0539e7db58df7bde1a593fd7a2932b48c79a75cb6b356abb8ea6c90ab29d2ba3931f5bb3d9ee8e33643724b38f6fb5e9f8759ca3cc4915a4af3819bfe59c6b73ef182ac876ddb6daac45b60e998ade094ea620af6d6a5f37e630dee8ea874453c759c98914a4a3cebedd5dd0b9fbb63bceddb8313397249cd04914e48cb132cf7e6f59f7ce4241d248fddd6823d34b6bab4f90ec6074fce2738d52365b1baf99f1748ef3ce3a9b3b01fabdadae05bd1775f19b209b9b73ef459eed7975c706730d3a6bab3973fc60847ddbf9b28cb649ddad183bfe5c0b912411cb0b114962ffe28409b25f5b48d76de8f0d7ea8eb33bb6fab14b12cee7640972bae8cf0bb2cb1d6778851325c80ae15a9351cbff1d7b769c471fc384f800d2114d2192442da7239a9d1f2c3f5884b0fc6039c2f2428445fa784fe84592a4299c244136ed5fede67c2c42381f4b11232c3e587eb0f460294284c52d7649c2a1395943c6e5b545e6d6b18670d28e73939119d2100e274890df569c31b66eb7f6f4d971fe0f8d38cc9f090a61f133352f8ddf891c4e8e20d97291b56b67cd55d7abe31c03f2884234f5431cfe182be713d182e5078b152c3f584634574852e89584a6ccdbc94592de4e204992a4c5fd760235c78cf2831323480aebacb76bbd6f75645d0469e7b3d6b1ca3fdbb17d6ac8c5ead79fab17ac11c64604692f9bd6396763ed8e9487201f8c94f2b3f3d26e783d0de97dbf4e67fb6e841cbbe30cf2707e3459088bc5d66d3d6ecdb5860d1b5b147665bff47ea416da20c8d9dc2e3f6bb13afbd6aec8831320c8d837de4b2bf438f936ef384f17244990c3ad5d92704e7470f203195b746fb953389d7d1a7122cb4b152c9254dfde372249a32a49955333f284e64c5b3438f1817cbb28b3df9a37ae6c6b0fe4e4d51ae4db68fcdbcff1a0f93d66e7d690db7bed71639f91f9f3cbd869eb38c30e8d66b6384143425e1d2db4b5c2fe8ed30834e407cb8bcc140a527fe68824bd307649c23961e3640712de4523b765ebb566d64f74205df57eeea6dd1a437b7310996baf2de7625c8b316cccd95b5cf36f37f6fb904fce90d6c5382347d8166def210ee4dfc8dcf87d7cbedeb5edb924e194e0e40694beebda9aff9cd5fb9a19b09b3ec65e5b6e5ba57c528684ec9b56e8fcadc6c8d60919b231767e7135c8bed17fed9dd840b2dfc8d8b5cb96b3ff8ee7490df841f606239b2ec27a3bceedf98c8c879dd04046c86cbd062d74beee84d0d3e485849ec944a44d703286a4cdfd0dfb19beb7d8cb42024d204992a1013d97241c7a3203c91ceb75b94156dd5b2eba76220339176dedce77d39fadc5f1440ce960abf5b50a9da73bbb1d67260f62a13f121a1d61f6d048c4e2490c64b75b477bd5c69a3665190ee83fce353353756c1d0dcb0b0b75342cb1ff240cc9aeb2f9bd96bb46bd46f7e3040c69bd2933fd660dd97f6c2a4e6020dfbebb9a32d8d3c5585d9e7c21295b18e973ff4d19a3cdc7c90ba453d66c17ff826e31d73acea310279eb8403a5fafffc1c6a87db71ff41f4e3b20275ec8b7a6fbaab3c54bef9d8d096981a4ced275ad7b3fdfabce678705921f7decae8deddacde81de7c7a1916805d2d98e8cd278bbc5c77c3bce9ee6a802d9cdba75f773b91a6fd38ef3e759ce0cc5386ef610a4404eea204fcbdef573573934122d8902f9e2ac5caf63ec5ffcca9f69a2426ae45d0d67e34927adeffa5d13e9ad46e72e6bfbb6d9638d89fccbeaf3e81efeb3ffcb9c5c221b6d6f3d686d6b0a63affecf0ccdc8491ac9fa41eef6cb99abddae6824ed2f372fe5065f8cf363c799c338af45b2d57eb131d7feba6baf779ced9c615248526eadbd5b1fd9ac5ebde3dc41ba901032ca9cc58ffc9e3fef388b26dbd88546a23723afebe64c9fc6b938b6cd629210c60a699df71fbc2fdeb50548366bef72ad42b6d4deb664e4fa66e357fee6fb66cf8f643fe37b71cd587db979b75022a38dd731daedf55cfc99a1183b679a433f8a2399cbe00922bdabbec3c758a3ff3aceb099863392b9b00a4fcf67b72a74ff33d6be1de7f668382f0acd99c6323420e6d259eef9653f5d7bd63eff8eb30b3d0834248b18e920730e3a4767bb33ebfe3343f3b2ae6cdcdaabebdba22e32b51d67fb79cf754798e148e632295eb23b1bd2b72ae5f6fc388d439d2af332cf452be9b4cd8db57eb16b5dac879d452449147af85c927062aaa4ebdfeb59db82fc7d1de3f7a1a93695b523d74ad9a5eb5e38a1283a97241c2493c5be4de1a3fdec7b65dd5c9270268c9cb77eadb42d7b5f8c8e46f600c75dfe3ae76cabddc1898509a48d7e6ff5b6566bd659d871e688209740da79ddb27cdd574be75b1de79af961ec4820ff466b7fc55bbdc2756fc7d9f11881ac8bbeb3b13eaff16d27610bfbce089bb79fae2b17b2df5666fa7f6d7bacf68c8c875124b2a363962e6eeb6cdbb52d646476e3f4fb60d7766f1ba2857cb33aa7d6b676273b5b35f33323f24c1734a47a40b38824b94b12cecd42365cebb6162d3357abd7189047495c2f92af7dfab13dbbff9429e3d74724f3f456abf31bfb1b6bf68c4806977bcedfdb366dff94a4cfe7eb7a2e49385c8840be7eefda6cf628658c7dc7d97a383f53337296972a586a403d24a906345f1e7349c2914564b345bd61ed76f860b38eb31bc95c0d60211f5ccf9c3dfa8c3e6a5fb3450217d97841c668bbcb556e7c9bd8211f039a4624e96340332449a29989c3ee928463af80bebac1f53a3a8e0d1946c6bca66bec4606d9ed6763c739f4a3213f24c9be6766aa922492a4d08f3c418cb0f4e2d068e6440d22b2596e949bfbbdddffdd71fe0a9ae2571016a0200f8a66a6162b64a4155ae6ec8b6bad05ffbe117f3134f4ad1bc95c361302f9ddde735b2f630b5ad61d670c0ca9bcda5db5adeb58b594798890b8fad9bbe6f77fe3ea38332baa80cc9f9bed0e79b5f5607308824c6fabb2fdd78dbef8338a40b6eb145ebf96d99bb3b4e36ca72340287b6cb2db9a36c3ca6ded64e4c7f5e7f9ea6d7f962b5b467cc8eaccfe2c8cb5196cdc1de7cf8c8c87633f343b22e83d7cbdca7d9fe37a6fa4b0fb850a19a77b4effba635fefad8ec8f98120b2d9627f84b5fd649527992934c37d640a79993bf7f8587b36461a3bce3353c8f2fa80a4ab32ebbd5a377bd9731d9f468ca590ecd8cdb8d8bd78a37bef383f68fa544ef58ce648d4cc1e9093dd55d85e37da3f9d3bce3253e83649aabf84e585088bcc140a22493e2429ba24e1a0a045c6c92e6d6ef3398cacb58e33f411bf3e66eb9ab1ade6fdace3922449cc85463333d02d77ab2d171b328efd166392cc92cf4792d65e87b6576c5adb6486028f882634dd181afa403e33329e763234203692b94c5c4ea7b1b647465bec4ad7bd5324f224f5efd8cf9d9bdf3be6a46333425f8daee8d68bb7e3cc61fc1ed75f32a449ea5eb3efea2f1b29dfd871e634bf91cc658fc94c132726c4e138d1d221d369e9a3ee689d2f769c47214805cbeb5673bbbec6bf6cd1da71e6385cf2418ecf9ba30c5e68d7ed3843f60ec858afe367bfcc3d99c78e33932914c32d496eb71c5ed7dc5757d9eb388746a22864bf7596c24899b3af671de791cc8d59f090adb2a3add616a3f3e9b7e36c419333596ee78203f23547d7d7eb342edacd759c3b229a194e68ba9cd783e507cb0bcb0b1196fa5a30994622bb7349c22102857cae32bbfd5a635ea3731de7d0ec544e6990143a766dbdfbd1c15ad120ab6dee083fb6e55d193c836cecedf4b6e5de3308971964c7c5b1ad48275d91322f03e48ecc9e2f66d65a7408dd7a8cdd466f7eedda8264906f5e5ae92f6ae3f36a2f49afd14819835c8dab5be8a8f5e7d8a1182474ec3e3a6153e668bc2c0ccddc7b31f81adec7d6e1a5bd268460906e2dd8fc1a64dfef5ef805799deb79edfcb726e3d8ab7941c60539dee791cdf76acf2ec8fbec9a8db35ae69ad5c605e9dd6afb823d79f9b4bf05f9288cdf17b2f876797b2d48db266c0efedf7ecc3666c17db16befc1d66df9e3d6b83e6799ed6bdf2fdbbc6241dad766bd6c6737ea7feb152473d3ed4ff7ef45e81ab382ac96d67a23b3492785cd53dc900fd2a71f3fbabb4b12ce94520549ffd9becadf11527ef63319a1825ccc4d6eb063ab70b275a09429c8cafac6ebaefbd9db738d06295290d4d1c9dc5c1552bed72394414a14a475facfb66b9bb7e9b13f2950900ec6c8209d0e4ec7263b7c829ccebc9832a53c579d5408296d48a7ec5ab66eb208dd8b35ba13645d6f2db4f55d9b347a8465a43441b286f4b1667e775eea8e0de98c1da3ee2e64963aa717e4b806294c90b636a45eaf6d06dbedb8041959bbd59dd95fb42b3f25c869d77b3db93bb68fd425417e6b375256a98df52dca6bf8f3d5cc3d326c112e87ab17acacf1746ccd67612341be777f1965c8f3d93193a4ee33528e205b8dcf9141dbb5e75c168c1423485ae372dc984f665f3f9b520449ef7f3bb3ba6e6d6d5e35a4c3ba68fd56db9aec412782e4ca1f5f9bf3f99df3dd21c8c7de471af93aa4f799a621dfbb3be163ce416e3fa710a4753aefbbf53acbe26b2f08ecde8bd11d5c8ff9b53bd89c7ea55e6f7ba79602417263a6d62163b4b2d9d69991f203f962cfe7265cedabbb6fb5f3817c343a56ed7a577b20e785de1ef36f0b36ebcd03e9627318ebf772ec359d68c809ed3bf64fbd3d3fb603e9a675d775de161d63d77420ef6c75c2e5f86ffd383907f12de871bdd70e32a47ddf7d91becb1e73ef0c495b64ed1abbe6a8abcc1c4bc1816c7432f6ac83b031fb6e536e20dd3fdb9eaf5f17df55e622c50cd9f8565edee037f8cbb19432628b8d9bafe69c63cc1a3a072b8dd751366bbb2dc6994286bcb59b63fe8bb9e618753fc3e1b824e1482936904ea7ffa2d4c5dbb3727fae0b414a0d64b48b36af2fc2d6718e993e2c850692d90779dad98badc7ed3acef66326224452c660ee0fb26b2b7ae4e80b3ee6ac27b3a3969f36ff989889c8cce4224931d3c7b1941948a6efb5763c5dab919d9d418a0ce4ba15ded6e8ff7a77f5730ed7a48821a18bb3d6e8f056daaadb8ef3874686bd1061f9d05c21499d9127342549f4f978a88813fa19497249c209a5c440def9a0b7db77adfad559c7f9b1bc5481c4a584213b3684175276dcbebe088674d15eebd4b9fffafe5fe8d54881819ccfddc8965dd39d6bb45f48caf04ebeefae1a27ff4b79819c8b2e6691f5830e69ab4d468a0be47bb0d6bf0c699cd636853748f14232c820b37b1fa32e49381d944dc836bbfdbdb33c9ff76590736203144d48ae8f1b755fed3bd866776226a437d8b3ade8e2fabf5f3bce50d4fc1a132c325f1e975bacc5151bb3c60fb2cbec8a773657173bce924892502e21a9bb914e5ad964d339fa5eb484e477df8dcfab2164f85f09c91ab43cddbaff766b636fc8c87d17f36759338dcf9490dfecaee6eaff24a47b963ecad67cdfaed1270969db9dfd73bfd970dd180979ed37d7d442c614d2cb90f0b63ac6e6ed203fefe61ab4d6467ffb1de36aeb1192c2d9de74d54268e3726ed02509a70b1447c8b8983f7bdad8ed499d3f9e02a51192ba9dd5e3e3172f6b334a92c4e12af2c8b4f718219d3268e35f7b2f6c8dd545487eac753b6f9659369d2b42bef6d5f6645ed5369e2f1192cd661f63fcf4bdb5672f445824e921425676567bb9edf5dc3a070f21dbbf7fc6188c17c26b6bd4018a21e4b313c6e7f15ed7b81d8a6a66114992240e4356039442c8e57045e7f65b5d94b6450809dddd4f9eb4d2679dd58ef3c803f270388f480f1f5810a9e2715e109441c878fb715dfdda51b7d1fe4ccd0b84038a2064b476bde6607c8eff286b6c4009849caee9ed6b9bfd45a77b2343244992a416f270984fd480028815ddf3e58cb9c8fdda7aebdd4869bb4febbbcbea6247f983f4fb7e56775e5dbcf1d98e7368e28c3ea631647e902f5eafff6abc91aee608631f64a51ca7b38ebea6fd6ef241d2d978ce7ef83e42eeba07d9d6537ef66aa43d29ed217a90b0be8bd559fbef8f17e6413e785fa4ee7d32b76e737890ddcdd8d1f6188dd6fabd83ec7facb1e69ab2c5b59b1de46aaef321eceb2a9cb4eb202d5c5cd9cf4be9c358e920b9d5f6ae515b1d4ec77c0e30b26bbcb841d7a28baedd6cf7fdd9e91e738f9543802207d9e873b5996bcfe7a4ef0c250ef24df8ea3f8e76d93bfbe1205bc78f71cdd56cd1bbea1be48bcecdd87cf78d5138dd20db7b4dab4feafcd6dab18181d20659997b0c6bdb3ba97b2f4e23d010fb45c84061838cf5e16a6adfe3d5ef9a19286b90b7c1e8f5758cd471acee676a5ed4202ba42cbae80d5e6a1b52c76990d6d2b7ea5b6f46fae0ba6c08286890f5fdbdd86cbee55ccfb0d0544492ce20db74cc39f7b6afbd8cc1c65afc403183acb49f2d8cefd9be313a5c6590cff1b3b7bd8f8ca1c3d123835cebc2b678f1635a595dee8d41726bb73cdf3bc6af7de32592d478096b4c401183e405ddcec573ad67f3ce114a1824f3b70d238ceb417b2d0383747e6f9bad78bb755cbcdc2509c7a27c41c2db5cece675416bdfaaf6f39e2092c461d1ab19351150bc20d94e58eb64fbcc51e898244952cd6831a766d44740e98274b6d9693fcf069d73970bf2fd75f63bce151f9b75a26c41b2d5cfb6f7e8f7f367a70509b927756f2e4ecaef19250b727dcf9fb77dc6c5902ba4020a1624fbfb9a747deb37fd5a9617222ccc1c6e0d05942b485ebe9e8bff957953487db9816205d960642cb2391f63ebaa243173d8b380e286a46d19c3da93cd65aded2a48763821658bb5adece3eb38337e549096c6bae8fdd7b12ff33905396b5b7eb99f4e9fedbd45285290ccaeeadeb4cbadbbae7317961f92d49648123387db928edc049428c8faf3bec38e6e41a72b42415a57ef7ab052b766bbf8160701e509b2d9652183b659182ff5da865cd42d06996bb6383a779d20dfad86df8eb57de7da04b9da4d08fb6d331b324e385d77b3491ba3fc4c90ac4e1b5d2fb816f49ebf84179bdba13be7cc357ecdb15d70317d97b266eb3e455182a4d6b29b1edf5f9eb739020125097255473d4ea7cdee5b3817af215f5cdeae75d02ba4f0b2a8060a12e45cacb265ff2dfafcabb30f508e205d63ee5217e7827cfdb124b91ea018415e372dadcdfadef5586345908bb15b71397d57392e5743badbc9ae279bd01b32874204b9acf5a7ccbe6a9d6dacbd4019826c0e276d35faaa73b28d6948ae0bfeaaf5cef6d5b185201b3fcbf5eb73f0d2493f08b2ddf53a7a741c19af07027cd01b8b8cc5d5cb19b218e98d35aee82cbdcc8bc21f48d87345487bbad616b32849920439fc646a139803141fc866ae2bdbfb5c7597b9ee382ff117222c222d9c81a0f440429f153ec7bb2863b641141e486f9eef5af4d5de51eab84041433238b9f99c0f5ac66ea53b90f1798df0e3ff85ddd65174d0627774081bbbe55c6cf6d8b37c6fb7f5b531e88d59819203193bbe561fb2e83f21db09c4f8ede4d220ca19d23dfb90b5d73e5fa36d4e23b7b8070a0e24e59fcc3a8fd331b618eb784d68049223d0ac530bd833538671506ec09219f22d8feb9b5a77ede9e58e3fa68c864592409367441dcd626c049432a4a3d3729b37ba8636b22a449220b39343a09021292fda4ed9bdc78cb226e448102b587eb008691c05141b48eab336effe7ead2d8f3590fccf3ec28ecc9adfe51d6796972a5866a61010499a9942b6c70b1196185d804203e98f7da40dc6e7fce9729431e4fcfaefbd9e5f5f6437a2cc40c6d93e3e56e18d96315765c01e14316475d59dff57cb7cf1accd3f283190b09d7bfb189bfefd36a7cba9098d20cb4b152c5210961f2c43e615923433657ac900250cb97e42a673bee89284c3021430e45d8eba8594199ded623b224992049b39ac071418c8fbcd9fb7d80ce1b7b3ccc040f942bee7d7d3b947eb3ff86ac799c3996238b05f80f202d96baeea1c5d76faeaca1f1108c505f2d9e4e52c7dd710c2b71de7d92c0c142f64336f93616df3b5a597759c39b1bf38d9845c76fe5dd6d108d937dbd59d134dc84979b1bbd66d65313a9809f9b5bd2edad7fddbd7dae2d598903ddd8470dab75e6dfd65874892489258821861892e349ab98464d7deda5a5badced91e5a42d65eb645ead739fbee7f6ba1d1cc4925642fb6e66568dbaab0fa7d435ef8dc3fb42f3eb86a5b4ac84a9bb6770abf4d7e5f27216be4b6d8e30899abcfab2464bf355d7c0b19840c5e1a0919995b0a693ba7966fa390908d7eb7ebb771a417fe23e4c7866e3517595dd07d3a42b2caf5525b619b8d90d74ef7d5baf8ea72ae272364b5d551eada83edb3ab2f423a363d3a3a2955848c94bd5dbb48dda18b9e08e9f0fb19af66eff5ca0e11925d0b2b65f79da5d5c64348c6cc62ec1badc7399b1942ba59ffd26adb7bcc9b7721e4e535d7857de7bccb5542487fcd3d2e3703765ddbd352cfbd9c214182485273d60e7054815e57a9d7d8ae2f2ac8fbecc7161bd2762fbf29480b637396f9d1eb9c3566714841d2e52d76a3f3fa9b90b28e732b82230a92f2bdf6f18c96cedbbafa101c50909752e8d69a73b9e63cc227c80997aff6eaa48edf7baedb90d6f9d177ebd72f6f8bdd091236377b35cbacb5cc51d77176dc04e9d5bd77abff75cfefb0e31c021c6cc8e6d5bdff4166fbf6d18e731482830932d3e4048e2558908709888603f2c43c55000e25f08bfefea6fdc6d45fc7f9459fe78426cecc94617623c09104872fb6c51ebddbe38e91df75bb1c3a86fe8b5f345f5c43c6d67adae9eabc6bb1879f8fc70a49428264b05147ab7f6bce2873c7395a81e308d27fd9461d8b6cb169efebb8c46104d9af3ee7cdbdae57e02882bc975db76ab4d6d235bf311638d490ce965977e3bc31be9bed514204f9987b554a19afea6cbda221486ff7ed75bb1f2773d869c849699cb5ce6717218b2d04b9a25bef5d27ffb76527e402471064ed6ebf8f1985acb5fb0b0e20c8d86c73afc966afc870be1fc8b61e6b9fd4353b1fb3db6c021c3e90b5d278a3bdf636e735f7e4e1cc885c0fe4b5d0c277ad6d961d6b93079223b4ecbd7b6edb37753464bdf4c2689ba3b55cab15e1d8816cd3cef8eeaf182fbba5387420e9f55a7fd55a6b6cd0298e1cc8badc6b73510a6f53eb0c04c719b25e4ae37a16dae51cf38b66440f070ea4abed39aeb0eb8a165207210d6c4670dc405e77b0216b103a7baf5333646dfc7ed5d6fa351abbcb90fc60a55f7b5efff9621538c870cd7964ddd8b5f3b81833efdb97bdc5efed391c3690f7b5a5af19cef64bf9fdcc54458c3f088e1a4857233ffae6659ef641e7b06b010e1a48c8ab72fb638f3aaccdc790d7defaea7293b266d9332f70cc40fa65dbd65f66d9bef9fe882449d2879cc3fe050e1948efe5a085af32f8b5290e3164656c5156e16cd04d475d0ca463eddcdf7dee2a6bb7b181230c399f8decbfb5e5fc5e6b38c090cb9d597b909f2d43f83090977e64d89752c89637fd42b6b3bfec9b5dcb3dd7e60be46d3ba17fd3e538b2d570b840d29ef5c21b9fb7afb361cd5be12ec0e185bcf632b7cb5d073f320825691acdd484eafbd0172a9b90f332640bb6fbd662ffea99b490a40f71f843539551d184b4eea29d93e15ab0276d9990efe05cedd9399973cc8d09795bf35db12973d61d7d97889063e3c6bc9fc3069dad35d76c66d53e66efb3e696908db54bfbd5ebe66ad3d6cf1149e2b068347d4410062a9590f6da181df515d9ff6cf10d597fb6f6ef356c46d96294906ec217dba4f0abad96b9492ce898c1e81e37477676feeed87de6c9ac4b42d2079d1f84b6cdc5ecb5922449921439ccecee04954848e86f457e0ed256e1b70609f96e84cbcd66b71d738e3e423a5c8fdd666dd7ff4659c511d05d6cdd1a3be366d618b76fd4d56861658c5f3742ce3563b3f1e983eeff5d4648b6feae6336ba46b9b65f84ecfeba228cffee397d56921421bda757d7f8b2d56ef9cd429224492511727265faeba16dd5bd7e1de7251ecb8044d81c1b22e4b3ee55babeeffcd5361e423a56ff31a6eedd3dd36808b9e2570a1d743b0b21fd1db7fa6e3f5bcd3512c21debc762c3c62ef6acb435e73cdb46689d0721db45e894bd8b6e19ed2908e97abdd8ce608b0cd97d13a80442be76d6badb5176f656db69a10208f91ca4d5ceb83a2edaac87260e28f4cea1872e2a7fe8181dfb8bcd6b797b1c5b83b3d91aab7d66af4c680a224932a1e97a68343303153f48c7edadba7bd19fc2d754fa20a38babc2051bdf45df6d153e48f7cbc1b6dcba70b26fbf07f9dc837d237cbf4e299c7a90ce3147da9cf955c983bcd1b2b62033b34f8faf8207c9f755b7f65dab0cdbaa77900ceb65cbadbbf781fc4ccd0b743bc8f7d83baf0bf2656c7e1d24bb565785f7eb65eefaa383ec053bbad66c74f445dae6e020b3b6627c0c32079d2d176d5dd475a3973264f6769c8b783e3347422391192a7290b59bae7effd132f4fb71906df5b7ca989baeab850e07e9b05258fbabbb6b395e8ee5a50a167f836ceb7a646eaecb9be9abb841bac9dead153a5c96b5d86d90adb9daa8ebe7d6779bb1331a4d1536785cabc6ea3c52da7176a8b206e9abda17db85b746b6d03ba359a4754673a6d58cbaa9a841fa64f555d766a5feb3276766cab8109534487bdd6cac7e7476ad42d871665c1382100d92edba0ee9a28d32b8e8ec3807314285ca19243b3b199caccd5727fb93a43154cc2023ebbf0edab97c39ff2803953248f7cb2cbed8da63f6602583ec8feebefd5e7736dbe018245bfcb4b9c9dddc7ccbb11421c2126754c4201942760e9da3f15506390cb2d6eb166b8bc66e90c50606c9d74ee69abec617b649bf205b333a5764daf4ad376e4a54bc20afbffb4be1bc31c2afec042a5d90d6d2091d5f3b79d558e1679aa8137d66643c1f152ec8c86a9d6eb139f9d2afb0e32cf240167d3c239a0fa86c41425b5b75ce4256d969752dc81b615b5c59f3d5d3dbcb82848e1b43a7ce5f73db1a0b9259871e978514ce55d715e49ad5b9bff5d1b9b5efad20bd79bbe86bcda6ef3dbb21df2f7e2ed2c9f3b18d4ba50a10727baebbe36bdc96af6e786f9bb4c1becbdba3825bcf9b5bfff8e2146443cafdec5ed8be61431529c8c790e35bcc5e7595f98540250a72367daf354ef8a0b5ed1c0af2173fdb1ccff62efceb4f90ae7283bd1a9dacef7aab0d692dafbeab5f575bbfa313daeae8966bf1b963ae35c8cebae5627dcb586bf6b99b2021f5d7de62dcdaa2743ae7300b54d8f0e51dbdc57e6b41b6185bed167dd63ab665ce3dea5498202fe4e78eb3dbed4f6b9720db62b77d3b757fe76d3d0279a6228f9919474509d25e766db5b7f576b95a559220295337eb5badaed8b6790d59ede3e9dcd7e2a6cf409e0ee751e01145d0f45141829cde2e842e3a4fd6f7baca11a4730c5f6b2c366861abd108b2b13fcbf8ffadd80e571164edff17d963e57be7e3e6031535e4edc78f3d84af5badbfe20f548820db62f3b145e9bbda189c2a4390304e7b69acaf3a3b19d63464a48b39f3db5a431b5dab0841ae8fb39d7bd54527ac3708f25e66e75f56a12fb6fe802027bb17464b1f7dfa685c3f90afa375cacd97bd6bc5f781bced8f218bef71b4f05b0f585baff97a8db93973915e6b9fc17bfda1733c90ef5ca34eed7b7c56bbd190eeb275cddfa5af59575782ca0e64ecc7ee5346efd7677f3d50d181bced23f4cb9c756eda6e8b611a2a3990efde9acccb327bd88e55ce90eed59e2c5e165f8becaf8203f9d8bf638e3ddaf13eb32a3790bfd8b3edcde8f64e7aa78a1992556723f3cab875b7c52a65c8e516b635a357e69eb577027154c8906ddd6c303a8eb4aee94ec50692aee5cfddfd9acebeca2a35903c273be78e369c767d54a181bcb7316fb6cbdad7ae479531a25bdc16739199c7c8e06ac88c36b75eacbc7e4216556620a79d0c6fe3052f636d592e5464b020838f35e8e28b11b2e8ab39beceae8373b2c65c6b2a6248eba89bd03a6b17bdfd680ca45776ae36e7eb9885d5a98421ebc7c85ae566b1fd7d300c153024848ebd185d5c96be4aada40203b95af407e1f2f74a995da399010d1185687eb824e1d4a87cd11ee4f55883d15773872c2e6ea2f20249ebff3bead6ddd81e8d2a2e9070dafad6e3691bfc67a98a17f22ee8ff58a3d03a086f370b523621a9bf055d77cff5968d6c149a455690a209592d63ebd86cb5d7a32e3a270529999097c1d9adae1999bd90be48248a9890b0de066f9df19f9db132ee21a45c42dad6fa5efeb76cbbbe299690b7d9756743c613365742daff65278cfc8db668f90d0969bf5b2774763573b7a55042d6dbdebd0f3e7abb2ecbacd92464752e7af509ef6cd4e9654c12d23d748b5f5dee0c1b3a9748c8b59ce13b37ad3376db8584f4ca9a7d961da4cdced1474876695ccdb5ca95c6f6a223645bef9eed5ef4f8bc7923e4adf0c57add7165e69332424eeace1a4e7a2fc3f8386511d2ddf75a758fae5ffb154190a208b9666535fefd189b459b9208b9a86358eb6daedb617b44c8e7677a2985af56f69a4d3944eb08d98bce7d3107df63d8f445bbb8d2ebb1b6eb264c3184e4d7ba3a56178bd43ef742c8c69aab7d1de3eadede284992d44b602f6999da841142d6e7d65babb2dbdcd2f6ee83904cd75d74adb598eda637668e8e80264f0b42c2c87639a78ef9e5e62cf2202510b269d3192d57f88fefb3e84793480c520021a7b50c59f5c56f9fceef38ffa886723a229a9938a29919d160e123e50fd21ba5f735f728eb7e97fa41ce38a1a5f521bf685ff33e48379fbbf5d6f5d12e1a5f0a1fe48495fff25b7f99ba56f7207bc2ca6ed3e59ec256a11ea46d3ae1b2fde88d1dafe741b6b390ab8d5e9b3ed68d07b9f851d6bc36abdcd5ae3bb4f8b8ade55a63d89c2fd6e8f3f61e7c94e1e26aed2027acddb6dfdedaff3da5d4ea2027b3ef6ef7620cd2c5970eb2ced833d2861ead75bbe6202fad8ffd753bab8bcf5a39c8ca62846fe17cbb3cc21907f9e87cee4dea6dbaeaa083838c0d328eeefaac8ed6d7de20b9d785cf6f9d0ebefa981b24d3e77eede0f50a2fd33648af5f178db6c1ca06d9f1c59e71fdfbeacd710d10aee7fe60e4e7eedaa3cbded5eebb5ce7a46a90f3f5cf48e784f4d9f51cc4414a9c28a8647206777910a32006010000c3648c08148311002028201c8d862322b178346cf300140003528c428c503c160924b2589283388a822806420c20c018638c21464106c70a02440da2a4f70b9e19cef7efb60051e3f3f1d3662c78862bb247f1cdf2e14d18f7dab2172ec655d1e0546dc723454d0d56b1014950080ff3fc6fe122ed4db9000adc2d28e33c176df1f800b3a663d4623dde81271dc1cf73986bd7dd0a5c88b507008728168277e40a49a8caba000b083a310e11f66faf48ebe15576ad18e68adcdf6fc521bd59eb16391659714c0b1e8ce20ec86315af63392550619f817636be187a460c22ac8898a8164f0b8693a65b7f083472d15137197239e7f91780ff8f6d1448d9aa45aa55605a4d0c238848a75321bac4ae2d8a70dc66c14d7a7ecc930af8b1d4d4f309a2caba4849f629dea1aae7c4722ffdc1ec2bba014855b50b2de54a44d45e4596c8cbb69cc2805c0db3e2b9ee974233789a557ca6dbb99dd08842948c15d09f53fd9f88d68af596d347ba730facead91e94304e0b51aac95309179c4fc470a963ed41798b677b0f217e3800959d8e830df4eb02b82ce4889ef4d4b4d856ee86056ea1ae9de56864d7b8678cfef6ff33038a8ad12be36ae0eef4cf0b717c68017aa0574e7a86b3d6db0f5b48f14fc177018684bdb501a6027b6c83727c9885fb51ec26972ea9d5070a6bc6d91a4770c78bbad0615fc6f20670dbbb857902296c4b54dcfb16cb04a3aa8615a7b76b53004e309cc641da72e9274d5737e428553005282e07c8b242bdfd32bc306a25b1871e3408965985f59eb1154e43e6e2d07a538117434d482d55cc8c698c5787059d3182dc70110b817ef06e1ed247cf576076fd2a0d41d9087ae36db1bb9ca81dc2141f4efab115285507ac492e84da308e0e56b044814a50739f5ada4df24693fb0b5c0e0967c142ce97f49c7bea535b004855a57ca86a0cf28ee230f3c299db1d582c816198a61a04b56b66c04ca1b4a40d54e13664bb54dc333282e84289758c06ef2730b1ae25713abd0d0dea044eabf5eeaa973b0eff034e289e38809c00e1d0e2967056fafd2e64df599fcc7efcb8411fa25b681951878c0f8244153dc0ab3b4c3ad24c3f06f728468c319c8dca50bbc5c1dbb792eb1362e73d9d92ef15167e9baf03145cccfc547d4e529b24fa8b7ce604f4119b6b75e70712a76835d7ce9b292b6a87ab2242955efda7e97b37fbef8c0a9e3e2d6865bfe3d4a7fbc18f5a18e24bfe85bb7fbfe23efd08981f58f85ce06985c90d2768cceb1c3b17a6ca583f9017322d3f47b06701d913d074a61b2273f7f49d9e8e2e037d986640212fa368b452e869a2226491f97c76ed03e934f47f19ec3a2c12b493bab067b95d530469e52eda4dfb47640dbbbe735349d5344da30e684e52008f9708ce852ae98732e632f4a71a88ce6ff5120d9b04ad59a286c05166277f7f651f5456a847c839d40c94980668e9b821e33f980d61104fee8e7a747b30b441fb543891f36a90dce8318e62572f7ad0e0e235e584f5142db2655e16871fd0fb4d9991a398be36cab405db739439ee7aa9812a7db9ddfae7a8651552ea346b89b0ca14216099f7fc92ff9252a16f9a0153183c3570b4eadcde1a1c63dbf85e34be3bffadf6be1d5249280be54d3213e972e04745cc5086ac348a52d08f558f50c3e3c16a71c4e04ca914cc6133833c62cce318b879cf248f67a69d799cea7e58283f205c492e05317172d2904273c4155d348f071409d1bf87155922156e47decd440e33a0f7e99761556739490838f971ead6c4c18ce5564f899eca59801a0b5b35f7fce0384bd55b18b9fe544163f67eca15594b5ea5f66e0da2fcacd3c0c5c8b2b9d87de31065bd760701f790dc0440086483455d40563046157bb339950a43acb842a080a556e526082f48d293219039be76af9f5eb9e62dcde8d2bafc08b64d7d60a724e9d4d4523e210e3d98aeaa56a6c5ede45b91bce1d180165965f02e4a7584442412f6edbc7c4a22c71a444427a5ca306e3566f6887b4bda23aefec0474f8fb727ecfa74bed2676e162b8743cb1be3f8e0fa42f73d8fb430fce5e84a669efba3cff2f5764b8125be20340f617a5435517c11daca4080a5ec79c6b5e65db0c53263e3ae9d1d8b88141f988b924ae121b4772069b872ff29f79d65eed8d55892e800895a28a1b06177129d999151b82a8e898a8dba7df3d107b835d27b554737c23e3e147ca1649de9da16a89be1bad97dfca96178e2a336d0367b49140a80d2d1eb6187e3881120f70926e0c84485d759fe1a070d22717ab1fa4d7a7363fadb9998e8753c50e46ada312394bdd61b28dc6947e66acb6dcbe48df974a0503e9503d77e0d6c7f23255c7c2b0a5ac99cdf63a24d571b5b4ddae718fa4733dea5902483e94b0ba0c6d9bc434c76dd5720858686a7ada734cd7e6c1ba1b0424c3c922af253784e6c5f447156f0c71d867aa83f6e21d27e016b365c9552955d814daa90949f0d797aa5a4f655b94d27e361e87cbc4998ebd2b58304034e8a4007306aa08fa086195d3eb558f0e7f2915ff4c27814bce4f510259f163be87eee91261416069c4954f82c2bd30e06508d112abac51ce20fe299a835a119fb99165d79c6b23ada15e9d3523dc50326143f82a3d753c659ca947cd939010ac28cf1c9d75e1b53876ae9ec5eeb30aaf33e19b1b46f0bc754ea08f1fe8709a7eb8db860215f5962d138c58ed30ab3133993cb0447ca32590eb4b822345abddaf4fed20b3b81b402c31476e32b1df7b9874833428d5ee220dcd1b060ef4d74ca3abf2c4440a82720ab41fce6121522bef622914245fc48721881eab526179a673d210d87e4da64955d6b0032a6b2c9a42e04d80d204aa8e267ac5a4187c977344357c6ac375b8d43dad2891a316a8e58f38e04d2a19ff12b3e03cf5ab0209dd331a4e9af084a512713618b7168d0c3385590bebb4f0a68ed3e9dce61f30cbbf902f1b1ca2278cf3c19105c8dd76e54b58a1e1ee2df7f659bc2fadb5fd4069b4260a19ced4bbcf55e4496f4e2403f77d159452d1b056e49c91a10d1b777b996dc9f565581a40ecc1c9a9225ed0cf2380e0acda1f345c24ca9fbf6442e547ad2d9bb7d603fcbdb91947ff674bac7080e703d38cc871632896db340235628395141e84e41539cd42e05dfe2615505fbde921873447199790fa524e13dae15b221e5d2e37b8cf74545993ace22f5844410547f5f226817850fdb99e87584202c608090308bc03af70a87bfbab9812e8108925d0920984db94585436bcf706c257acb8129e30dbd93ed37a841a161c1734608e77cb246d8e6ad6ff4325072a47653458197423f76f164de23cdc219103ad638dbbfc549807bb2675ee4149343563e4a26c2a5b9334678c966e4cbaa161e5594e0d754715471dd1b968a63e66ecd7d4934806b6adddb48e642b320a8c3ef74b39597cb67af5dde7cbdd7c2a1257afd32efb64208d5570984fe3c09fb0bc442f788a6b028070dbdf0a39196559f489d31caa33422a9d93983dd7c4d506a34b682e7600239fcb2ac71eb19ea314591e98f2f5492dcbf0e801b884a84039165fbd11e8c4538b4af077000dfddee4b4bbbead33892fa00c3d8294aca8b1d440c2ae8febf9acc16c66b16233e4ecdca51a9f6e68ebd2285cfd9cb7c99a1ce181622ae66356d361510e6e7c1a145cf70d4417df93e813274c49f22a92a4de887cb60be898ee033fc684111f772bb12535f5b2418833ac7712d6e13fe2c9b3adeb837e84dc1c947e4e53a398b21ec311e2850a1ad062e239ca8a3ae48c127963e5287aab37eecb504541764c75a87ad6fad3cb5f13a2798434b47ebcec7bd833acb0a39d0f420f1115ce15597ea7cbd033e41ff7b7b246fc54efe5108dfd51af856361119dc4d016a4fb2ea410d6bf9997895129ca081a958db024c87abf78d37ac69e1014c82aa2fd37e7ca7eff82251b5738bec2dcf9dc56ff01f19ce4521d2be3e9e522808ab7168eac51b8d2af44b33dc3433ca08ac274e1e27b8072c3cde2b4fb711676fc9c6388089c0be2f51407d4fa31bdf0a5604f0404eb5e911e41eaf0fa40f6a2f34f350d47993d6d2e8797ca3326556fb7937a7f042765940dc915997acb011268ef0ae2c111d28c17d31dadf4d9b46f9f0645d3365875e625b344930a64a03e16dd8e4b30094c6e4c3066a98c677a0f0838bc4832ac64c1c97221463fd4821825a0a1b5208a1423b1e6ea20220056c57894f15726d081616570e9325fbaf469438180b1a05d2777255872c2a10dd4f6a4dc7db2738d922174e89c342090e32f5e5f6e01371d5a5d401b5d2c2f7ea9cce8c305fc95acf1db2a715071822c1127ee1590d116a0b12fa9834d5401bedad8e9202bf37a91c5dea915576df161066d7da54352a9c98c15c5e4f582831006b4fdc1e1a053a9d4dbd4cb98a29e3aafd289cfaa3ad9246c937336cfc65eb874b9a990fdf368d5b6768573c0462b402f26182511ad5998178ba565803d955b5e86edaa9c52eda435359c8f40ec4ca89ac556b33e2f61557824e5d252f5501465b311822720b42103c8a06c2e20eb9887a13c610c954242777532577dffc023f10c5aaae0f5deb7c1388dae8661a53a4eb6562f684b1d6852c87feead9508fce5ebe158a5372542e18d92843be413bdea91de77e86b64b361da5b2998126ce81ddeced2d52ff23e834b09ac9392a6f3658531f096899b50759ea8da00c847551e7854313bdc54939decfbd45eebb26fdfa6fb9c2243cae5c4fcac2a9dc10b934d4e6f2895a57f83164a83f860268f09331c8ef89d1d3033add4219c3e519c623aaa27079924173217f7c143e82a030c496184fd416f6ef50738abf01b82b610a2f4748c67346c3fbe945fe07a48e0c4443314bfea4a8eb7ca63528f8b80bf33e8306e684aca529e6c1c12ae0895052bc908ca827ab2caf4a456f0ff3fb37250dd5576e761b13452da459dba44d00e3c9be4ae16eee81f45c4b45d8cde64fb66e6792d0501d2cad35bed328a9b1268791b5c2c1103586144821639fd03568b6b5a1559017a69c717362782b06e776a7805fd6b73ce8a91e78d76bd63e9f7d348ca795779ec32137a10070694f38def13ec0a55e8acdac266a8b3de044c1647d9869bc17746f12408a8441e096aa68993616be7591a793fbd9c62ce0b9289ed26120f3c112984192d7ad2364d8afef08ee9b415321b99ba0cf683bc873d7c00325bcfabbaec060f72435eda68570a4212b1f4005ec82f6df63511b7920d763dfc42d87bea017dcfa66e903a7c97fcb30d0833f9f98a5d3e38cb72899d685620c7e6c02784b661c10830fe5ba48ec9f49253d8cf694db93f14db4ea69ea92d66b1b7de714ef1959b6cd6d6246b73ab16202e386dfa94103e5f8356842b426b2dccd4062ada9f085d173a3f16b007211ba266d6eb0d1e605dd5e4a7e3256363abcb25967027083f0ea1cb4a3e20b59db4108ee760f44d8b84f16a74537f555453628ef977cf9f0362538421338e0d29eff1dcafd850d0364bb748b7a422397e826eddd386e45499f868072f305fad8969ac9cd2a446ab3e6312467c93b8c363625266485290cca1de49bb9c5ae61e19f5e295c253518b451a6c347337822624915f77fc21d08097f949281391bd1b3fef19b7119c15fdd0efc1ec132a4f76ec7a1618f6fa0f70efc62bd99261e450ee772e43049977dc1462d3065e1c6025ffd48a1d70eede0913faade1a253b789921026979f0340aa19c92517979c28a48d248705d683714396f7862e093f61291c4079da26985fab4310ed9c91d4c41d21410e976b86c5cafa74e050a1750ab76ef46d3098d810853d994c4dcef638acdd5e275cab405a30b73a1b80c841e9806f0aaa3cd9d0ee813e548ede3c946659f9f089c104dbde37a788d2a750622fcd4d0409d5e564494bc23f626e17699729d7c89180a22a0195d79253411b4b250241d2f60c36cee476f169acdae264a3d257ef2bb6d59557ec5ada9d62e4993aad6e0b9a6745838dc5e81640b1131e889603909ecb1f9908c42bcc44ae356325ffe6d34b2864ca60165565646e3bc804cb5225e139d718cbe08e819310b99ab331752bdd801ab1534e2565b060741d468efd75fb1d7422bbe3eb27192a6506090a54a20f3f72cd93024dbd29851e4168a5c5e1e0dff01086c634ed494159c5aa852cb74340723d43f206614a44108430aee7bbd0e6deb09ac913d2e8328c74a69c123f7fbbad8291297606cc6dac980dd7aa5f42349607ea62bd0d0d9eaa96d148cfb7ee6f2f5ba29d52d28d9d7674c7d4056f578a8962aff774330cfcd52269a6ce17756e610bc8b17b099666bb6e1affd0f2a1b5b2c15069e6d26b20c2dac326bec8b65e51d073090e0008cb991304b67dd879d5ede77716ac5b69b8df4bbbf3b939f51214218b36be3bd2ff396a706805f7091230367f6d4dfb1375baa69bc05efb580776e4aab243a4d8729ed3d688f96df24fb1e09329c6e42ed4811d8f9e474f1088833172e758627cb9990c05498e86a20067f389795d04c963a14badb5b55c3ff86c6b1536c18c79e6591c20ab37dd30454b9c6a4b94f79ec6343f4aa6a9dcb79aef52cece01e2155d1a1a34ff55e45f6c50c634c8d12d193c4301d1be4510d0268561edbb549712da9645d12600a620d32a392590602d8c9fec027a864665475d5c867d080d5e2afda3bbd33d542777c808390d476672545f2cd18005e75338b70925d9a0497f55ddae19471f97ce4bb68b0ab7dbfd66806150eb70d47e1f46453cf0024d9a6d54e74105fad01e1b45f0a02b441e3b9b10cfcad4b05178bd5ece31e5ee566211c17f287d9d072211e3699696dc36b3cd8655730f1cb43461c9ee7c9d7e66a52e2d71ea8e26e285bea9a19aa116f6c94a1267a8a7164d04123fb9344590e5b421746341eba46d142060f37d7a2b0d97f5ad3f202c866ee0813bf5dbc1e87c68ed88265f2f380f267ee5211b55309c6c8329279a6d9e6d8ceaba21bb4a7997c3151bad855bfafc0b0814f65b98569628acecde3c2c72d2aadb7c31acc8b51e6096ceb05fbebc446400c0b5710fe2d3d3acb916070f8e9c07eeca57177eea0a2d4cdc59f78c2ffb7c4781ba4275681c101bd44f72ad8f77757f50c31097880ae3baa78827654dd2d90b704d400a0f21593a5e2e7233237aca0f7a8554f935812ad8f791d3a998b1263a90bd605c783a05ac46925871d708e8a348cb438bb15330cd6acb17f62ba1aaa8a2dead927e3822a1218c55b8c34598ec4da94b27555f39aa60d3ca4c9a6016731a94304d078ad5ce2f3a5779f7d119eb64284b06eb3fe05de564244ee2176ed24e24e9af8a54d99d665fe56edfe3c86ddc281d012abbeadd32c1b233320c314aa418684b41e8a0f4d58dd4be22e8d40a9c6a2860a4158e30f0982fc981b43bee101973262c837bcf66e0a7913bcd6971eefaceb07f7ad39fe66310182a1a9d46639c801642b9c2e85ef7694baafc0572de63086d49754ba939aea5fc7a2be5368e59152505fe25ff262b41b7e314b7b9b618493e35b9b73da3b22e1af2f2700c8927d25c837ed818a71454fd19f7dcfb5b00db3bd53850f34125507926cf70076413ad12cae56766b4bbb4f6502bd430dbaf74421b56ea631807fea2c0b2911f353b03de506d32f10288b24f8dd888a0a2a9885dbdfc6533d7fa3471aa3e2424614792e01d01224153551c035c87cdb09ac3a8d4e14eaa83c264712f8cadb022d0770c62df7300a24957ffef3c044ba735d72e114f4d2bead037fa33d168371ebc38204c97c341334413244ab3f0616ec8a20eb74677c5d1ccac6ea9a2fe2bb6b1c504451a4ba127429b74eea73528708420bc9866f72aa8c816d83cc482694def5cfcdfc301659f4735a3197907e5bf554e3e470adf1f721c6474a94fad09b9915f326cc6bac5cc2ea9b9621d7250757c6cf6c738050c9e4c2bd26993db7ef61e4cfe9defec0826ae444a600f051b67eaeae0cdd399a9b6ad36f67ecb1a0c6f3475324b9ac4d3419ffa9a94d8b01db0ec465f06b23f64ea4fe90ed80025067767394690a61d39b9f6d7f21d15dec51f6af3fb7af387a0b0dbf7be627c89238a0dc6d626c918bcf544e4a8e3b473be16d99ed2cdb03688c141ba5c3831adb5164f21b83695b7cf01049417c0865da4ae2c6ada4dc2290525a7edf957b8ae74a7139b5986cf6b574c5bf6e707c1451d15e53d8b7b45d35064fc8f0dcd2680bc2ec407b411f66bbe2e798be7786dae3e42ad13bb71eaebb933aaf76aebabe8adce8f314ff14aeb9ecb935cffa36afd3368f8c3f39ae0614e782ba7e651701f00b1b52c9a1c3b4d648612ad9b9be67668c4a5c747b2e6042962c7ae64ff571df58af5bcbf067b7f60ad4b32c70dbd3b1a960c48d6e6fcd975e65083f236bb8797488220a0d05a73dce40612898f42b69312c3128a08874a1469ab29d86c3c7fc2e2440e30cd8df7293fc457e14a05a8065325a3d08b9531285e8270ba8f2de788060f9720c610eb70318801c643c9f30be040a5fe29cf85871fc8e6a799cafe47aeb6a9a79015cef1692d2014c3ca12938b1e68299aafba391fd35c0b9eb0454373c577fef572aded0077804dda415bf9d5e0c0b3c27c0ba790fc3f879f562292f6d1227a3bcf70b1a2150fbec3ba090cc721984869d14cb4c65e2438d5573313f9cd866e6870bbaeb8229f2c39603791a8ba29afe707930f35e584972d16a312ae56dc771f2bf6302451bcb798f6708aa599fd7302b19667e3b1e3fa35d5753a99c53d6791c1e361cd2968c61a2d22fc8dca0b72aa88b9dc43b1375bfb006bd663998f70315e00abb9db9c076549835d913de06b0b9abd8f17baaad842c4931b56acdde6c6bd3e1c2e0118c25805a1d08db4a4b4cbf31d5f6097351a86894ad2401faa0f67cafd4ff2a4c9fce4b741f0ff73890dffa5a9440c16ad8a505976f31c14413c39a970167c16be2fc7171c0997eebeecd7c3c51208af77f052620c6e6c1b1301071202434e3b1467e6f843fd3c6048b2ff69c4ed9891c7f03c3bab6b35ca79738f3738c10265a334d54f41f753708c782161591f0b7d2215bf368421e2499c584f9b4f957685a25d6a77fd4e796e06dacad9ae857b523eac25a6840cc6c85feb319fe05550c6a88ad6b62acb13577cf3951766395e9d41a2ba8b582298060644a4e7ff06ebf05bbbae67fc73e7aa47c501c72567637c034ebf1a96bbc52261b5c74f8e69558d0f7fa9d1dfcdc4f0790dd8d3238799993e0f83db8bca0403b4a99ee4d2c4b19170c236d3f7e75b61a15b2e8d4157c9a3b241e1e9d7e365c761b017d0a57a781e938eeeec2559e42a60032eaca1a815627316f50ab7a7a83d1831a590998fa6141621a539fa9905f43cdcfce23ed8e6b5bc7a50de95bf2336b45510f2278543a6f4a4fabd133d7c2004b56ca802eb8664f431896f743596995c6679afe967b180162ee2c9f708c43d6e71412a36dcba3efb3230100cd10a5c6e88049472fcc72c0ba10dd0e54b00b4e2e975244571f71c5e9f3d3e8405a8024da8003f360a7440049e3f32046260fb654e26e5d6ca0d83674216bc970b9440033342588fa0216b5806e3f89001b165a29c3a4403be20c4f34c4afdda8a633b38ba9fff54dcbd6ec40a24e6ae73b695c8b8563c200727a6221c0b9c79c22153618937204c0c0df438f77e34bb2802a28dc58e13d0891f5475e32a102394ff3137db84b451daf0e6b197bf59da9e9c32244808b60b350b04f7b2aacbc86c7d7aeddb60baf17014c72404b930eec8e36614432abde32b3080e7f5297e6145eb5507d413911cd8b705f03998550c4a1c8b0882421839efb634e70f17ab2988fee722c4806466a645026c6471d302e13809b1c0a989951ad0ccbc1e048870cca08f88e03c13332b0c60f4c39b42347ac542ee73d48237938ce0538592ff3b55ce6c59dbb46738b0885a3b32422839b7fd0ebebc1ef60ead57e2f271d8a37d461fce56996743823fe8fe2e59db0ce0714e1bec3fa1daa79e54a70b4ae30452d76f171034e81c75030e0fb72a3393b24925c6008f0fb31cf2e083d3c20497e2a9aba88649d0f3e77b04cb347029d69a2ccc039358e9c0ca709826e7c8863a3d8cf6a681c18e4ee332c37d2c17751fe4616acab0177d3f9adaebb07fc00531f88763a869f75fdc141bf5ffde925c3491735a69878e9dfb6bef3d8a709ecc0efa4ab4c2e859f206d21eb14ae895f15422ae67f8aaff64d303bcae891dd8f0287fdd024f1697719bec613726b25d351fb0107315c5bb2998fda07684184e2eea9b36d7e5f4ca0af643219cb0b413bd5603cf9b1819a902b9fb14b4adfb02ebf4f08f04cb91864243ccc8be22953ab36b9f549fada160fa19b14f6887da4a63bf6478d323746216d07e800f920e0157af8aa82d432df0226eefd4757fbe962175b2cb9ba1124a2a96f8a5c0835da510993dd5e18c48e6972447328435ec1d9f538c765168ad0b1b802c53a43c62015d19b4917513297686e9840b6244d4e6adce9da3dce657843076d13749eb0ba95dcf741d73808b481bc59f8188693ac282f3f51e09c47d998ec5eb3dee2deabc3905583739afaf381f1d8b001da100e6e784a9ac343ad0e31b7875428813040d67e6aaad1f9e94979c2a1e265a0687f26f62766dbd8b1c77911ae3541fba8344b179e13d7fb9b04667c553fe261fc15281df8f7e2eb18cfd1f8113763e1e317713f50719a09b913d14a02fd9317dbe3b1827e0eb452b008899434f40f88b285ad670c8869b710a839650e3800bcb8319c63345fb0858a343883c01ffd7fd734927cf0c1bacda046c09d0130edf7f3ce3d96a3aa77b0b9b5e494e9b9710f1f6e21ce68a44b28f141ecfa41135c7e329e224032f2c6beb239552e430ee5c7a827ac33675504767466a3d9e76d5930f8244fa235c350bc358b975bad63a4ed0792f5a6f7d2e2db29bd2a4de31317c9d77e460c8e478cae9b9ab9350c0ef61f31e4066eac43a3c6866c004c3cf8d6caeea5696e265a63296d5a43007a46403b0d35f55643674de797f1cd799572796c4c786a136423d576d23b049495965abd92cf85c0b46fda352a08cd90a92f9b045add70cc530b8e487ab8abc21e12f3a9d05eb3b35491e57599f7564bc91035cc05b27a7eb4edf7e3186ef6539783d70ec862e267b1a919199e0beb3e993a0576f827af38498ea3aaebef7cbfce376253274d5082c60daffa0c9187df960598712271e14c6f8e2058b34e8e1646e4ad020b969beba0637d3bce46c6f616182ae6469acbe652eabdee536baadef846afe7b8ad1bb4b342ce9b8e1128772987ae31fefaca05e176921fd1f59dd25d6ba5ee3dbc42b484fe46c82600f7057ef79a0a5ac3b7818ca2f1cb2e6789211c3473bf37e0b9f2a3d73ffd306d05fd815a2f336b2d7d1f26ba7e12dab9e42f5804c35a9623ee5f34fcf7f02c9eba6fe7687f861b1ef667d6f279d15c20217c28048f623418ec671d31725019e423ded0c65f00a46190d46a78b854b2c7e643c279ba103340baa1fd9ea7144736981cca418b0ce75d18fb9fdb2ed673b47fdba5f3046425c72dcc9566f3e4c1ebbc2efa31a6d0c7a53ef8dae60122040e9788386d92af3fb45e13587edfb6420afe61d731dab902bcbeb94b7dedfd3c6e750d74afaf3c28a083338cc802de384ee69662756ec9d84b41a48c9643db85d39650793c5f3334297b6a99be301d30e1c5b40cad2d37fa50c396936e9ce0498b9018a8968d91ab02e82a27d51b3e793b6a7080fcc3e115e21423478e85d073ceac69d03e20086685820d664672b5604dc8bdb89750b42b34acc1a2228bccd66f0951963dc6608da410eeaebe4cd284fb8571b80220f4edea0d802dcd7311acfc4642a5301c58f499aa63a2452cbb473e7cc45ed925d8548c8b209ca0a5118d882d1d2ff869219c328010d6d411dd6c2ceaabc5c52d921a182e6d88133553505c7323430f9360e67a88a8e2234a9dfd1485dacc11c93b6842b009edc8ba1a2118e7ceb90d0a58b2b895b3bb05a25ff10d75ab6e5cac9bac2ec3c25108fc4736afbf691753d89a5f07148758b63e0893ee44cec42c31d5573e5b6c59337b7156c177e097d5cefcfa2cd2f020ce2942f6915df1ae7b76f283ebc22380dacaca756cf912045971f9a61d1385029c21a8254fb144fdfbb55d531f6d7cfcd18fda9de87fde75daf8a7a60d9b1c68418747e5159d65498175f700a82f90c3eb4d62fa98e48f55404d0b48c623c14abd2cff6ef83283b8ba1a144b1abc67282c9c01c5b90eaabad75604db4259ffc9b232c78416b6a0d8f040753a9156e394ef2783b0bb72c3f7d94189bbc80523075a9e0361d216937759d9c84225886a799c6bc3a729da1adfee9f0417eac0b2e5ef51584d0d2effd3fbb67e0723df4e9df4e01f8ab6827b14d74ce3a18df160c82279ec4d1cf9c7c545b9e8d5d41974002a6613874a993d7154ab4fa20ba5f7417ed07f893997de7d68a535f31aecd3827d38d696b396b522e549e5cce3ef21571e6a577806c7716891ae67587e503fbd208618b74ea9ad4c02fd576ade122c0c5f5179c36f551be0a69d92dd7b871b0cea6b5833063714f3980a6c03c3b58566fc2b2a7e2c3ea6339fbb7f0e3a70424065deb08fbab6fa112abcfcbaeca2758b60a000279f1d9a32faeaa78b2305d697d3472794e6b5471aed1f3f142c960c35b8dbf2c175470a98601d3aaa039a4576f7cc3e687c3f747f181747027b7c7daaecad8e92b8571a1348a482a9d134c5d99b93d3f27cfb0e71af5fba6dec46a40089a6e62cf9c62ca60bf971afc5dbe16b0c1c55b34c4ba80e891ae4ecabfeec0b8b2ec9806aff9367ef2d1230a802d932de1b226cbfc2d9b223410242b11ac21c98281a9bfa7c6de9affca34cba2ece2627f16c6a25e8b2669a43112b3f616e5ed808683dcdb127b2618f59fac7098399c922566fff2090759c661cc6d2821f893b19050bfbc6e1268c7606ecf27d26b6ac360993ffe201f889d5abb7f7ce0b0c8a2068a2c6b20022d9850f2afb5a085a58905f54ed56e8ef080160c213afc1f876eb3fe062e9ca8ee3f8dd77fe5fae55cf4b7c6533d43fea6ca65039fdfa37e06986f7eaed27750973e8c1771323dfa4d3228bb6ef3bf40a1d759e15d2d97b5095a91f59b8fa6525bed74d351614e7b1e99b91baf96bb4bcc4f70d7ea2804b47d6f46e64c0f771c56291a933be2f20c39a748b6cf25de9cc40bbcb5e4bd3f6636d2f54e851ea683e0c9b3806840fb75508c824eaaf84ab92fbbbf10beb380bbd3444f069e9477e5806ec8173a4e335d5f39c7a685f1374da4639df4b730f5960eb8b75ec672fc655f4a83df7cd73efeb31cfbe4e3d14f31a0d01f2a131760650ad4c377b3d3793d1c2e58700fd586f20cfbfa79c55daaf226a74814623cbb89616f016783922018b4c84d46a0383188beb93c54bd5248115e2e20f9d0f4b246280913cadccb1ef1b407e5578a62dea353a1bede2f0d9f8d019f73d78f502e3c9fe17d60271b355592c6184d0ca2bbb2d542e1de9f1caba01b540953a265b30f8d50e240a6ff45a5924bcd124e073b3f3b640f9923dc32c45f3df20c90307c3616e1bc51783d792528b10a60eb63f0f4251b41086866e339322d4ad4318692442664d4aa83a9340030125ec929950fd9319da3200184d36af9ec09831363198a8f8cadd7c92a0d8310a4dc19914f93a96f494819fa7c01a742166b2acc09b3906ba856356359f11c0da74b51cb91d59b7d80aa12dbc01fc127c667d8bff39d066f3195f99249bfbef58a7b6b1f13b1c5bdf51e535521966b0140e73a366a865a2595dc2a9b14fd26da7b4e69d2cae2485dc66d35929093d03bf4e1518f2608801a6cdc8df96a07b06d7b4c8b6e8e88a38116ffda8018f1683c5c70d4386532d1688fdbd4d9775cad4bf2ec1b88d5b4b5e4dd498b17bdf432c1711456b019ba299c5a70a8655f34771f18fa8e432b5a311bb4abaec1ac1e8e2b4130a9ac14a0a99b755fb9115156fa6ae89e018a5f6d73472c16b5b734a2763ae4be6b897b0a688e91db047bb7e65dbfb91cdecf9bb4b0becdc66d3da65eb7beb4f3ec4fc2083a78e724b3f9640c96daf0c4625c2ecec763f40e8e41965fd883184ace9de3b38aaf95e0bd51f22b8621562f17bfd6d3582e92cb1badde3e04a29fff5e0a7d9b95771360d32b7fe465d6dee34c7ba7b8eaaa51737e0044ca231d2ad9989ffbd60a1b7c2c6e11a07e9b44f4bbf9fd10ffe54f26b877ba76b6e522d355e89bf85b4a40a2e5c786abbd67ea98d7a17ad5b0751408ce2fe9ac0d33f35820ef15eff1284fd49b0fbe7e4e0e15408ca6e29d099087b1a5894697297a6d16ae813d182705c4e404a44faf52a0e82ad55f126f5f00d295cc8ffe894687bd47ef9c56001ee52c195c9f1439c63028feb624630dd958ad807b73bfee03d50fe0c2f1579ec85d42793305bc0f1f9d5008af230145c58743b95cbd8a2dbed372111cf21a353ef1e753806c6510f7968402a36122b41a81b70a6b56c44d85f2f27b30e8f09953ba66c045ca6da07ef71b9b5c7de3f3f9a6ee712d14809c690b8191e52801fc343c2e3576671c203a87e9f57e88f61721b7edba8fe5a3da8b054095d964954339da9a8bfa16c8d5ccb6865c0e6d22b66827f02a591030a8cddd164f4cd67fba8680cae2f4675613327378eff0a984d01373ef0d692866be074edafe48dfaa72ef23007fa0bdff04659cef1ed6f373c1129c527e528bd352e60cde7961b09b5471b2ef91c8c593e49e63b3ba13fd5839f814f655845fca7de51dd1b87db8a8cc5ac526cab904a4436ec35e4c815113a575b2f008092844cbc93dc1a6e4fbc3a6de47151cac5605567280e4f8aa5f317b639cd165abb1418e68cd1d9879d4aefce65a4523c963c5727795f3a9332fb658ba2c6a9f273ce2733b24eccfdf8267944a8cf5d7d21567ae1735a0a78733f0e5f85f24fcb71849ecd0186f9a71e006b5d6d256610bd0bd0fc1c1e4714130e29c7e6b3b49a328cf511773b224edd436a6106df077799acfa4eab59db91eee4020609726b33b2010932b199b9b6fa3d25fd84642eb28acf1714f1b9df633e6e5d8fafb2407421832453eba4f02420eb563e1eb0352164217c699b3281b807e2cc1f6426fb07a41c537a73693f5c6e1ff34122a8728c5f362a14ea3d907a431191d6b4617ecef46ef183e0e18d67b1fb466051bb3d77175e0842bf5f2f2693b0b58c0aaf62868246380d984a54f03176912aa8f2f80d2188d7d7fbba4c2f0f18a749661ee84ce12c980d21de88b5d02d27320d542d662875f3ac5e18c01e3a2884a9db8d77d1d33d1eb00117aa898b6fc2b5f3091e4dbde06160be29e9b8e0f5ad70f216b35163da855d23879d60ddd8e670b2eddcb736662e6bdf94e54b914c69ff4928536e5ecb037fd65a43eb28516dc43fa3b819d2dfce58f422b74c983ed1abe969465c8f88b2961b97dd4b0f3d2244c518ed961ad43c62c3c44496faa1151f35d9c80961165b10b8cbd57c32a0a75b4816a244cb416be126151a5fd363b900791612f18396f53b0acad1588a5fee610cd8f390fe190ecac63d2a1c28c03d5c443405c9bb6eb1c303e637054e819eb605a44737dccd8951265b3a6f731aba5d4c47286fb99104d8334823034684d66d5c826393391b53748e70f761c8093e7721d59d398bf454da97d616e7921b384dabec6189b95980fec7b62c7c0234c0cc6f70bec41c5da955f9415112751d10aee7584dcef68218070c2dc5ee191f986842679dcdfee9cb2ef9f0eccf1be3ef6008d4bc1bf72b670a1605a39bd7c6a0adc8e922707d2f9478c67fec03998e567ac6151a9a617e165adb74c8214876ffa47316e7ebd059280c80aecd07f2bf754a8db7e71844d319d6a263849923b788618f4e116b19e169c6fc221b34714d0c7aa27048a555242e9a8afbfb05e68ed0f9215495468645cad197497d1001c41502e3a5c28995f2a47d06d0e28b3b28460ef01382e13f3b8ce5aeba01ffe56264a7d4d5e8bce3f41bf2da1fa66c10b9469f98648248a0842cd5f91451a94873511479de6b82ea95502c20ced1362a2f49fc3cde8b66f832f87ea52e63b9a5cdeb028537a4feae13f0c2b6a357ecb098883fe574651f49cb3f1ffdfa1ba82a6cfc5eb57828f5440eb55677d5ea28ccec6522fcbb27b819604855860303cadfe41dcb59148e73120a539f7daad6a261cdb4ed5e3626f49bdde102824dabda94ba184b226260941ad45981cd5140ebdbe0116bf16b1e0052020a28938c7063d3626407164b6b960f6a350e70ebd6efa0451e76046732c2527a674e1ba1763f3de8bbc2e0d04dfbf1253cc79c8f5d6dff0c8c51a6bbfd5f53010e7ef8f09034206dcd94663ed9bbd8685c33c6438af44948a325076a97f347d1323da01cf6bafdb3a171107eddc370e31799daf3e45421d66d385fed31665db2a4f6b9f5be4f12e7bdb8d5ec04e2b546bef45d177bfcf069db01162a3487af95a45b395988b107289b65232cf3713d7139dc2117a2a76e09dfd87de394cb501d4926b349721ea771436fcf532df287b02cb9ad7b2f5cc4d4420ff89e67bbba692c9e14ecceabbd6e36ed6f7ee60133af7422ce5583c85f933b09a1cb0f35cdc70966c2f7ad6137c00c68fe207173bfdc896193ad4f410528ee004f630192d664a658febbf3beec57b6d21e9ab31ba9149b76395397b3801e9d02a58774acb6a7ede35873181a220f2f071d6374811de26b52b50d5a2225c6ddca70fc78a1d40f97131488746f99a00c6cc21f599f30c699b7c051ef3d38a6d833f38e3179ba6c1626b7802cb8111c3385c2574c20d9937c53d18fc1956d693ace7c0aff8e2230f166b97f353c0a18b1c721bc01927f2e44a69f532db3255c3189defc4f059232cdd4ef73daf1a13e164c98dbd9b4457cfea8d605e881ed2c9b5b2e7495e711c41109f5a039a9e813d72b4dc519a2830d31c8817c0a695cccd42d513b5e7b94118bfc6f28683ff0f362d77507800c3cf89bf9e0aaf33f48b923ecc2502c30ef4275c17de9b43bf23c6c627ceadcc95bdfa583f999641cdf3e2f22d4eca9d6e591ea89bc1091961d2215468513bd9ae35349c8a71f6a6a562e202e87f21846c92c6b159a1341e34e79d15f3004f0a13a1d5cd3d603146c561ef1862088912ced3513ff5abac5ac360ed2b18472ee48935f881c9b32a158545326138219bdac2cbe442ce775b82b61aa484a350c707dd81215c99ca6977dd8e0de7238eade6122b556ef22cb352c5dbb3f7761a12ba303b50958ee058dbe4662192f2a1e51d7e21e22fbd76462587d675ff62127b47102867b5f6527cd0e1b06e7ac9dc6320e3f4f51c591e2e7b08197450b97ca55f18b4552dcfa29bba9e37f9ef79a113370f02de97258e90aee61d0b8b283401e483aa69b4bca20f3a6ae3d6d881de3cb628cfdab8f56eedae474c73286860a6444307ec41d9c91395b552970048d1108eb8829f2136c044e44419c0811abafb6cfad431825294ed3dd3123b9189627e3f5dab29bd85e49dc7089d9f6b1d8c1750df71b9a717720c83acde99de25422cb76b2c518edc3ed15e5bec8b5c7cc8a8060cc32967330fcd08bd9a7e2e086ab49d1a15b64fa93647903910fc4bcdc2821c5b0be9de26baf4996aa377cbcea4b140275ea8d2d63093ec4f224e8c85982cd66dc3b2624b67df5366fb535b1886d15b7e9e01f7f15029d41e19c8dab409b4dcc1bb7961775c3a15d8ff93bbea34ae36c70161208c440ec98483729597a62cf7ed48eb439012d06e1323542c1db53c5c39873d9402624390dfd82063b158e7b070e09629b8bd028e5f2d2cfbdf906524e1b11b22af095596b355f5e748c00a30193bd94555b6cfcfe1bfc1fb358babbd3e31a548d728901169bad75720ad94960eb7b331c17c000979a5886f3ff010052c5ed8c97af1f525251280ddf640ea55228cb43f69cc6590a4539e17e28705e13cdb8ae3dbaaccb1e27ed09aecbe95078acc45d451fac87359241d10a0445179acb2c3410a659c39cfbd6f64ae40dde9f1759da7e1f853c57add373ea33199b75c2436aa18685bc0b4b7367367bc85ca684757d258440b6a9d65df143cf2dc988fa97bb83f550f459f1bf36c4f897ebe579f50d200c3284c30643cd8a92c2e89accbcaf662e70f92dbd275416ba556ed664887973cf3c7ca5fff184b88005941c92a4b6bc6904ea63020e9f2e695a369284e2d0389fc30d7017f8b2b5def77507948d14e99a352cc6ba24e811344cea3153643e8e088db3524eb9659897c02597f3c32e1da0c5b4ce3e05d051b73bdb1123284e2755fd35ddd3df46888bda925aa55a2a3c63beca96474b1ead63a24fcf2cd836817c0e2eb2ba86e51f8159698272cd19144171c41f5da0bb486f32fc26eb27e8dcdff6686427b4f9aca9a318387c05bca2d27ab113a2068a3ddb04c50e8989b9610a2b8878a5100cfe29014480e2cbe7cec1dd63029095f02fdb8efb0a7bad5caa328ab3256b436505d47bdfa6e0a478ce115c7b7d71eddb4dbf1e7361a29395c45e1db3123481cc3598ae45f9786aac7406b5876a904e5e0df244c5640cc684cd141f90f86b4b7fd88c844061c98e19c6e6208cd273f831fd2b6e56c3e36fe09c32f26355c202fb403d99f7ed04e7e425c1d28321b72a78f90e3fdafad4c1d670ca678f4ad2f402cc1ad8b73e60dd6f39078aae4eb5be1a32d4747306afee451b702b3b3c0a85b9b62c3b59aa712b2353710073f733f664cc043539aea642dff09311d420371ff7f21474e9c185ebd6890288586c9f8b6c4bbabeb9b2172f1eeaa1a08c376581e91e4ba5502fddb51463982f4299eba7c4d2c1aa27cc3cfd77b9046b248f2cf8c9c6a400c67cafac30bb33fd965dd0e7dafa084bc28b4827edca10e3aa93fb09bb434e4afaf7b4e088b62c7cf27ffe96eb54905eed6a6b6b82ce7008e486f67fb1afe96e2a93253ca966345b2aac7ec06225a58dd55b725a0f137b499aadddb1ed5d477817ace0339c5105c890e7090c13bde6c8736c763755ee37aa3fdcd3f8bd9f4a12c847cf3cf64f642c0b1579ce65f916a0f492aef7147178514b1f51252d7fbe6ed27bd1556ebb80a6aff955469f8ae3b4bfe3727e45ac9d7515e641a654c82c06d64ad134674ef5c8753c75b0f6540f7ec45e57dbba8b31b7e170dd5e4070979197418568bf7f2d739f6266df30932a52281646f2bef538c7c8e005cc5eb5d6ac7ab0d2be3224d8e98f99ea412e167050824e0e4dbed100bee8939e30ded7110e3d83b1f46a51847cddfa0b40345563d1c1f981d6b73828d7ea50e914277307f04ca3cab7573a8981c6247d093dade60c70b7be5f381ce13dfc2274e8669d1011f3cc904166ede3a07b81b3d11d3b84d4fe264e3998b69a33cf628c1b1aa6b63960c5cbe78f0b5bbf780919b8f8959098e43b93e8e5f25f8e824fa1337190fcece7ab9dfe9528d4d386d81d02dd61fba81acaefbee496992bf42d23449fac7f473b2089f24b007ad0231790370cbf4310bfbb497f3c78326cf9e7ace0676b20dbd6023d67039f87406859bbfac0ada3b71c4e0300e6ce6158769a5e29844feb6e1b7c964baee78e6a04ff741cde18e1b1333cbc2280883d0ce76cc4cbf6b7be6786584ea276fba65637dd87bdecf52b28c2cdd094c3967dd058e735d83ea9cf88f57748fbb96b7596eff1fb6b2f83b22be7593afa62bb9fb9dfd1deae3043c04af2e25ec92dd67141d7d0ddebe5fa1facd9653293cd9f769df986f4fe2af37442aa6f042ddc79875818db7d36263507a66670e6eaf8a0a53387723dd105ffbb7a82e90cb9185e64628ee9e9f75b1eb12effd7c58a4687ea44fb93b6a2a27a0ea6dd9f266975fc7d0b061cbd33e256029aec33b91cfdc1d5b8fc29c4b2db5a5c936c307004fa21a97243a694334184e941c800b6ea17d1a84d550d0067ed81880475c58e331ee0b721c1fb22f26aae9727e805799efe98e672829fdf1e522e93ddcab80275de3dd3c95ae7b9cb8d70a003731b0dc0c0c08b64fdfde45e79cc71c17f35032cf9f4c30fc4189b7b2b2eeeeab61754001e66ea8fcfc96ad713e87bd5395fc615d3a543801625d02ef015a5d491d9717e8774ba094a80713b30711ab99f2c274bd584dd5a1958cde853c12f482e9c9a358e5bded3824203d4ab3bfcbbd42d986da56593c79608c4381f32f747d06e50baf0d8a576b5b54cb62eb5da00cacfc901b43443616cb745b3124b31689393c2ca8ef112abe5f4efc2fec92cf9d558ed467747c06a999f104a3c75279048e5da1678849cdefa3f3e07a0b2bc38c5016da47c06f4965e376019425558196ef4f103a4522613ce787be1aaa589c6909e26ba4b73a3f2648c6a8fefbd237bfc488c3a979e830d41f4ba892230639a1f57dee183e5b22f93cd5b48d81ac8d7a702918020efca0fce7953367b291f25e6e2001965f4b067d30a6824907f71ba86f51d8aa19571155a0f32a79449db9fd63398a7ff39522555e52555e014a0088e3be54858b534127c6f6ac415bf16b0aba31ae0eb66756451e63173a70d49b31fc4a7158d92da82cd3e1dabd5720a852da492a500371d4885bae4c38d1ebd26baa843f17395584c837b4111035283ebbd668b3cbeaebe64abc96e68b901a0e15866728b9df4ea8b27bd58462faaaa9bc3f50448ddf7fc9e00a5b88087e40c03a1cbe6c256e4bfb2c5d58451f3fcc14f25955b4f43dbd6bdeefe24b333b4c13fd5b1baaef3b644d4fe879dcd817c736d44cd118c384e3a2719ce9f3cf32eceaebf47ed2bad8ebe17401bb85c2e602c6cba5ff40aab8e821b0078f35649cc413de28dd8f9452d26c7318f08ab61df171374d4a376e8d011d846259d7dcdf63d285a52704d90022574364b02a3d202ab81ea58652fed4a890f8f440810208451ab45c260667c8de6c47c222378a7bd71addeeb38e7d6c58e6a5f2b387089d25f2c96b6d9c9df34dc514be6e02c6e7be6d30a18d92f16f92e30047beae18f4e9a50e81de1c5b6def6927dcdec0e82463e348ee4bf43ef1adc8bcd3cbc60622eda6bdec5b72874b169c78678ce710388aa7209f8e740b4541c7c686ba36f6f9dfee90ee4ff655c172b7e41ae8161b0a6d82ddc3a315d2620782a8d7ee0869656c3b77dd20c5a0e1b24bd97d4e8d5f3d5fedab3e101f2e8aa0180af33262a611ae149de1f689f4705c696cf7cf9ba55c08e350ddd084daa3b17b0bfb1892eb72a930dd5a9e2edd12bd1d15a53cd2877aed93c8f95db55e7a326f8d421750cf50535a4ecc9bb178b2903a4f432df0645faf1eecd771b025f1e994ddae1ccf3ddc29211d64c44620c8fad9a0326fb7e0f2bdd5bee20bab8fb8688e1e9f03a22c884a1eab9c91ac1c1bd30ae1d1fcdc9a273945aff44b800a659b9a5e2fa260aa494d1943a95b79bd787de3b43d88e8be51664baaafc6ccb626d70d1c1d8c662ebac8568dfe6fe7bea79f63bd3562e43aefcd46b2dff4bfd0fc561a7c2cd5faeb4130adc88f3ae14e2c2d2b2db868d73e07c83c0e37c8e3c96c4df06cb783ea0279a8838addab349ea567463917fc42442718a5114016a0f20374c7073ea601185774b2b91518e980097df2c2505c0535314a44097ef8da693148ccf291c7934068b515019b7f0a532503703c49913eb18de65c03af04f6f4362429bae49c4405cc44474c8a3e80cf2d5feca40eff94998a7dbbc7a61c11f8c9c99ce35dfb9b061351e8c368ffdc429abdcf42c0d0ff2d17f89f00d89533a4a937768ff5cf0597d45340906f3d035006d285c1e310dcb6c1c913288a1c480defb0797aa9e556ceee40c2064cd830d64ad9d43744d5889f17efea0db53c35afd17a45c1e9c667ec9a53f51218b3f3ae3d29d8a2865d98aedc6c98826099acfd4866c256d7408d8ec013acbdf798c4d5ecc168ec61bde6349d00f54ca9c38ea00e20e1e7562becba1f31e5daac8748480c21448ef49fb05487a0c069b06e6da7e3de571d58d8cbacf5843cae7487c8b3545a44128b90e1c39f1db1b890ac87f68e9d8509d9418a96741071ce39a751f207d6ccd52fc47ba05936a6d99c2da1ffa952427cd769d48d370aca701717351bc43f2cd6335b7ea16351411c3c3067f2b2b5cf55397ea79c2408b58809e1cc35f882f7181030e3e91e26197aba87dc1b3b44e2f180a91c4448be1e672ffee2382974e870ef2149a144fa30fdf57985c0f5c530183fde8b11275062f23c7096ed6acb1b1ae7800fe2facbb6dd2faef0038f85c07b06ce1ad53fcfcc8b3b9b02d1f623cd5d8fd59bfef11925b5a7d583764df5cfc309bbe9311a482ea9622707ddbc8947d5fcffec64f512ad425369e211dde0c185ec18d19974ddb9ca25522a491626304324d5f056d6ee95c248d4a886ff009b94d501cdc17848b05afcc2220f4ca7395f38e015a224482676d9d24e4665bcd138dede9320874016945f1d27f31b13e1a651d72625a47a6a9a1fcc7e4a995a1d92b51064f3920511ef9ed4f9bf3b90e9a9c4c23c48b59eb27b9f99ddbde7eff7a1b94ea782b05266eef06b9ac4962141b4f4f0b78c73fdba402e7e31df0063a02f0d42de87d0d601ffafac396a1a82a25732828084f43a12b16c18502f372c3652347028929c6afd9c2d186ff7383ee2fd60cf32db3c6dc7f35c46952636200075667a23e6ca78bf903c7fb9493e97aa07e14df9484fc369a72c9f148577e12d959c2a9e746b8aad966fcdb4f7a3317cb4ea13e4f4c9f5518b168e290bc7e19faa845463ecd44ace77f54db4168dd5423aa4f9a7465008454570c0601830e09b46d5a82e769e31cdd18a56189edfcefa87ef18c2985f17acf7379d11fafc497c0346ece405e068cbc726d606ff0c8c00cb030719871c1f3f35e8e4544833a949d63473132e71774b6dad1b5d91adfa56ae5f811bbb3355aec277c42bb7a1b800b17bf7f6c951b6f62b92db5c532b651674ee05f60a8179cb2dea37efad57b0b21d296882715d3847bf7411d65500d0e23b518a780c19494b9a0ebf71c3c1e6af998ccd32878f760ce5434e798e92179e28ea325bc1cc207ef8d42633eff953574df9885679c8c0ae31b52fece07617d159ac662ec3c715881f6706a520ec713fa93f7795ecf999ff5e63a8e1ccd9c5fb7e39f1a52cf5bb8da6b7d01b817e91c94681a63d7b38a660c6d69d8e8ef3f9f54afe3c60a2f1ba9e148aa5f168867a2dfbf1aca7ace9df57e470233595737d87277ca7d532d14624426455bebb241a2d755f46a79b0f2a2d81899177a2249ac2d7ea10313e27e72b91d47776381abbffc4c9590baa048152cba48d52b6155b621a305207e92e358938058e5d87aa517695d57402f0b937fcbac352a92e0473b55e0efbb57f707646240f1112695eca023cefe620bb76a1d70cb707731fb1b1e4eaccdf81a5eaa0959751e696bd3b37e324c01265fde017afad64c9bc42f7eb77e99407b535bbaf21c21cff8d038b43955c0b7fabed275ccbf33ac4738299449d9c1a881540110ab7d2e817f980cc4efdf22fb5d2281d864bfdff83014bf70c69b914ac39b5bf2affeef5c06f0cc2087a451383cc70883dcbc21f1188e901e87f6b7e6df232475c3e46110ca1b0c7d3175e7e8200d81c1dd507f1d97feaa2418e5570316a517266e9a10bccd0e32b7a83e998614ee2fa4bb477a90ee28587ddabc465c5372ec6addd79fd816bcfe533d035257046315d97a3debd0484cf0afe33b30e03fb1ab19c2c557301ec29992eb003319849b044f27952d5f9b4e7c3a9382d3b94e197f235fac6401cacba9f7cc6bb44a8ea533bba70318ee55cebee9a84ea67eb15117fd77e604bc1623e7b97246ac4433573bc14246ce9ea9b83d3eba7c142d6cfaa7e67a2cdcbd15e0b83c0d410d7e8bc8f369c071846128b5c4fcc7c7399b2e3101bfbde2f4b4f89de7585abde094f28291c80da70ba7fdf8de7611374b30dd714622f6fffb88bd11a163b9bbb924e4d5dd214239c43ee627939f762a22557a6d370b4428cfac06f86fecc0c91666a90bcba0bc6ed31e649790140bb3b69841e2f9752cae101d453e7ed5900d1eae47c473e8579b312da783ef40c621c6d5155abc04854c7d4be7fe450c01aedb1ddd27326f6ad4abfc0587f67e921004bf56d2b219a6ad06ff7ee80eb6234237fa6522726da7ef2df432ffe6989cdf663a25b09caea80482bf61674b81570237c9f4968d70607002990cdc3bc6225eb10d60d692998cc945196afff6322c81f9d8f6bc57cde676160e152c3bd8e603fcb7b2dc3d34224f04248fab139e99bd3065f76f795c25f245d8e484c7ae349eff55b270236c26277cf26db07700d0e00ddf03d8612c62f79462e40490fdb373e5684c96ad75753c2728916f4bf6b9e2de5a5b0a420a3a519c497f5bd0f6638ac0e0dd41ffac81bb5c61bdcafac49767695e87cb6c1e489d66ef64903827124ab30d41f0e36e5e04ec1bcfc0e7f538a4dd4696997afd3fc64e5cc7ee254bdcade7cf5f1f0909654678b18a027faf9f5268ffc1016f4353f12cae18b988c571e819bff3b0ffeff638a46a651765d12a1426dffdd97c07fac6cf286179e2297564159de4d619937c44ad2fb38fd80c5a58e882e8920413f4b02d07c2cb92ebe768b53a774991664fcec2c4a77b82edfeca914061ae2237dd23e8c67b5c2081dfd6659714fe8e8a322d5c1c0ccb7a290b6f10e15eeef431c54abe20289e9ea10715f55c9d2c2b14340ffda887f3c8020347ebc7c412b973b8dcbc439fdcb6accd3cf159690f79a5af551782454165b8359a015fc3e5703b430743157634b64e7be00a47a89178f265b26663c31857af0ae37f0197b61a0b3a707bbacfcf56c2aa038c8bfe9b676dc8a0c7f42fcc442adf1ac5b07fbd76579ae0424d96e6fe0242d470502be1a607b35b984e7aece20df618f4dc639eff295b5e50e57f186b288d67e8622f42935c44522e8535ff01b681ec61cc1d88dbf6d6a777acfe862e9fc433db47f0933afa88378a3ff940ee1afd9e82e4473c3a3824e1a0d59c7a71ef9109b32a4b9c5fa8dd81d6f6e150a940ed23e9f09c30010bcd3de813b845e1af6cbc368d5fbcf08aeee1185f781d94fa0bf017247e069177603e512f468770655af75ab52b37ca4572f969457099fbb0c99cb4055fc862547988889409a0946b1d27ef50c0b006985dfc431c419a93978c66a9bf60373983b9580506b79df86240f6d9c00d4e7af162d52845a09b1260206bde72314db6583b7bc0458697191f872e921c7def7a786d3013bd0df28321b2f69b0b103abf4280e5d50c3023c59fd823722090423b4e8e56759eb463dbd2fb22ef63ee72cbbdecac5d60774ca81b6e899606b793786286e56731499dfa77123bdb7cdcd1022e3bc06092b9d8aa9f75e601883efa8ffeb008cab7574fc67d5c801b31ee5702203e07b51b932fd94cc4dce77cca260dbd316565215233c5cb87ead4969a8a45b62b26e7472f63fcd43439e8ee5dc8a5877d6fb17900cc6377543b178c557a276ec704c90d2a423ccbf0c95759cd0d6d218ee2887f6b87cd6b82077a1bac195b9cfaa8b320edf045a91a62907fd97c8024b1a1846bf880074dff74108ba97b17878437aad6e0067eebe94a32bab9a12d0addac2bed6403abad03189c8e9d7c177c2b6a8eb8566772f78339edbc86e4fc14da607032cff78fccc4ae6b04a9623282308a6c8806f75c5053f0b8979d27ae1d0cf58efa6dcf4699410e8d9c9112ee45db32afddded1e1a6d914a73a649ae877351771535f55e1ee0a9d58eec55bae8044f07e96850f9b8eedcaecc936f55944c1acc6e33eb87961eee5df903c82ed92b5bb32bbb9b5a0e32cb76b729b716c5b84b9c516b64f1cbf7ddfee68dc7b2c28f8d7d19f0ef31081b92b92752eca9b44463d83b89fe7efe8eb6c76b8f07107f082a925f5123847e2fe9b5b8d88a3263dfbd8b34e0a55308ddc228e696acc68afdcfec64d790032ad8fea50a11eac48827c5528a89e82ca2d56087b03bbc1af6b785cb185f6ca60a39951c89509ab0157218a3f0991e8f1e1cc60ad0fa993091b476c8a4368de8369e58a5320a541a2828f7995eccf1cc492e5c65faef49f56d18b845a649b235c114bdf9e887974f59320a39a13540e8b5b0a0a572ce8f986125233e0dc81bc990a2396b4f56b32adbdfc530bb036cf3b013a643f89625d5a96436c731f13f74c9646a095271ce4c498a9452b865f0e46c7e92a1bd0641e190f7140ec27369c20f5f5cfa35f1a0ebc000ffc60443a5aa57caa8cac4bb4203ef1b4d33500f4086dd75e2083cc3ca2f75f5d879643c6340af8c0d2c02418205e87d5d0caadbb35e62208d900e360c943a6c2eef282a5b91f9b61fa1756f9463ec5d7df8642da513d9b046c37fdf16b1a685709215c8c13dec98170fdf851b782b7b44e61e968ebe1e8bffe77321a237d80826b6e28374b2764f78ef3b945bd21e6b9bdfb66fe508a3415800d2874115c817d0ef3ffe74991c7e9aa8ba0c879e27a0d01aea5f2bc59935de65b5bb9bd6b8bdf9c7c9d7adbc222945ade05d6fddff032546551a5f5b7f9cbf469566d0573dcf34557df86cec77d499f7b5a6b6a03963aeeb2742f07312da237d55d0f721c466e1fc4b4aa85877e2edeb528b24b61ae54fc13c54d60f5fb8bbc81de0eb0cbcc6b0f45ea4455fd5096478e7c8d8c288f12ec2bca1379baac813320dcc126a9eb9f115d40fc75c106cda56aff3cdfcfdc90668b66a1b8441c1529681ad4ce7487889a9bd3414fe4e4a138350c598fa47f480f473113472c1e6179c85c866ab8ebe4782bd379e8df9f2b975d423461efb0b17cff805ac84916ccd1ce87f34e4cae2ada5a057a82338d463078c891f64a5335f48698d526adea70c63ba454ebc6a279092e3333ed47c01af9e8d873399b80af16f2fde3b3e9bbb6863c12d0371dcd0d0c890e7da3fce8f111b227fae0bd095913a5ce3f25df18faf014cc9266be79865c8ff61f3cef47179a6d0ebbfd1e413f4842ba01079c24a9e89f22e33aa3581edb379e27f6137baa635ddd67ed61488f7ddd4daf6c011c5f62118c9afd46a2d3765b3adf0b8096a9e5d99168a4d18d036fb55227fae97069a9c691d24c641b4a63cd668b3f698ee0a4caccd46b6bc2a9de3a26d7da7d9ea3b9f12c1f9db3e7d755a6bb6f26c1608caf78b1176a05e7c1a63e55c431c507f141daddf14ae79141cd90c91e5139ce748d13f3fc35de6bff8e7ee4f7952854e865d32d688b5c7449b1e7daf056761db3ca96860ea19f7fcec534e78a39d1997535972de97e1812bd8c9472ef820f2216733f64f7e29be6e9c797727cc04a5f6371d92eb46901b62eb756d6be9bacc72ca540a309ff7b134c7c0be1e6c4af32cdac3632729dca5d07462d40d30ef5d70c97a2723c2493028ef484860c3560f0547230333333333333333333d36c6a6eb81c6d37c99545847b53f1a132536e4a2a55e2d3b9d386bf481bfe22fbb7ddec731a04e90ba60cc00c4984d1ec7144512ea743ffa83bc47a1a51700f21b98799b0791e461426b5e31c7c904514d3448d7daa5aa6e428a21c342c57773ed67d2711e5f4a0d2a3cc7e9cee20a21051cd55ebd63d6be71005eff0234f1d7c64751d4394cc3ffcdac47ed77821ca23493a46c9b6a23a21ca1e9864cb8d49fd7c10c5701f45b876928e1941143b7ef02073596ade8128a7bdb348e21fb6654094c3d29c9366f30f65f7e938c7c1478f6afaa1905f5a5422a6b90ce943c97f73fe40c2d5da840f654fe69ad371545991df4321e87fe60f227a28069bd0b8fed14b7b9f8742ccfa72eb1b5725f77828a5a4ab0ee26f78aefe0ec590657b7307f770d3dba13493a17b26b1e2a4af43714fbd537de9871c870e058919bf6eeb3994dafae358f26dade6e550f83856b266cf49cd8f4329bc7e7278b2e6928743e9ed4534dde630c9bfa12021fd9fb1774339cad4c455547ac6db507c0ff197f716d7391b0ae6c9d63269ec59660d85c96c665dfa31075b0dc5e49ff193aceb764e4351620e3ab80dfd60c38e8692fb5fdc6609922bfb190a16e593e1f383bfd6cd50daea204e3c2ecf5a2f4339daf0a95b09d5137532147220539af63b52437c0ce513897b92471743214f566a0e0fa929f43094ef443642f7a26c733014d5f374359af544cdbf500a8f63bfc8185c22732f9423f268895e9a644e174ad6f16154e4aab90f17cab1f5c7183c3de6fa6ca114be5e1e8c86d3f6182d1442c86778d7594d1d9385729c0357fbcc394afb0d164a9ef122bbe557287dc72719c6af73b431562875989931f5b7468e3455285fae4a7e8ede3b27860ae5db2bf338dafecb1033855247d2e01171ce4b34522868be8e7b3b62d0f4992894343f6fbc9dbb4ccf40a1183c4476fca48feccc13ca91fe2e25670f2397714231c78e39398cf3419c4c138aaaef2f1ef8760e25c38462860d3d9ac13743be84424c47bf74f8d5f9e35042a13e861c43bb459ef49184728449fe943112cad1c38a4919723b478f500e25ccbbc79063276a8472e7b8d2cb25dd4b528472d8abe181aa67448e08858e3ac8f598d47a6d08a5f5c06d752507f5a94308c524639a1ed5d5be7504a198838e63e6f0c3f3b83a805058d320271f678d9af383a2c585048dd391a5c70785d20d9f91ee41395a49b9ba0e2bcbe541b954b2078d0c9be3558b1d143bbbc715fed61da35ae8a0e86d113ba7aefd38548b1c1462c65fbfb7d597a816382846f0f03918d591345b8b1b14dcdc423f468d3196ad850dca39baba6efb241d86ad450dca151d556e5adffe6a2d68507c9f78e629f3eb573d8bb2c8c96a8fe5e4a85a160549f9e8c3bfdcb053c7a258d6eba1a45d94861a16e550621e721e668f3eaf28ffe6942cd53c5d1a57143cdcf06114cfe8f4684521ab07adaa7152869a1585f55d7309999e245e45218a7dcc6fcf70ebb22a8a72b6deb13b1d049753512875d75e8fcb3c321915250f226f089153ee977c8ac2bff984ed8e4d5178e9f8b6113984fcb814e5b6f7f03e68e4468f4951b69ee898f9d1c5453c8af2abb9484489dadb114559e7f43b0edc3fe50f45b1b3739c2b826dfb0e8a8286a0e93d08f39bfa27ca7947e3c6ccac270aa2e1e33c51d783c9b19d2854079a714a2cd4d3cb89420e956ddbb0efa1de4d14524d79b81e8b87215e4d144b52460c49de4c94254fa45f5bbbf3208b89835b7cdc74e025ca41ee309fd7fd5c9d254ad1816b44e4b8ef5925ca517aa4b1bba67b19250af22bf9d77eabfcda4994c7f63c73aad8c8d74aa26459fb219ebe2bae8d4439d88d78a692eb632d240abe11d2c38fdaa183f611059933effd5024f2ee88b2c781a49330e95cf5461437536aeea435b9378c28e86a687b1c5446dc2ca23ce1411e8fe2d73c0e4514ec36797e8ccc33e144948366bee5871073be882869c8d31d73daba871ea224aa2fba1d4b4ea31aa2949b23424d0ee2c664210a75f1615de29f7e2421ca29e6390892a34d6d0ea2601d4ce7c72107fa114194e338fbe4489a636318888287dfdf9373de9ca102a2b491fb1ee7a3c817ea1f0afbdad962dd618c9efaa190e2d3a9591e7964691f4a561de610a6a6ee26e543394c7273b8d57a7ef65018ffbcdd1f5bcee3e8a1e8d991ab4f43c78e9387827708fd295f44b3070f8510a96a543cc81d8a214e4bf89bf830ea6987927cb41a3e54f19836598782984d98707ff7fd9174288739487e9d4731b923e7504e9eac5dc4964321e968981a49377dc6a13c92b7c3daec2952040e258fcf52b2e647ceee0d25adfaa0a67cece3e686626e482a6dd35a6e651bcaadb29d3737fa934a3694fb43d209d72a9ef23594c3cc1faaca739b6fd450baacd30d59437c3fd250f4fcb9a349aff38e030dc597ae3799fc7b1f3e43c953c60871e632af6b86a2567fdc1cc7d1f34aca50de4d1d5f8fd1fe236428649188d4fe64a21d1943c1ab3fa7fd2d698f2262287a74f4f499e3fc38270c858f8ef020f36028bbc669e84f552dff0ba5fc3839ce1ce385b2baac24f7badf0fd385625b249fec12731e860ba508297164f6ffce630b45cbf99c9f9bf07116b550ca5cd65b2f711f52cc42399fb487fbe183cd12b1504c1d27fb103f885213bc42d1c343e89c09a371442b14d2e33ba4e679202356a11c87e704e97866a38854285a59f8c739eaf7693885f277b68efd8197840ca5508aec9511531885f24f0e39fadf1c470d2114ca7973f4b17dca84e8794279a4465ebe93eeee8462459b5fc7f9dad326947368f9aa5e1d7f920965f97615890e92de2fa17431efed9b99437b2514d35fa6cecee1666e9250fe507395d00fe6c320a194217b7dd81172708f23143cc63831b91aa174b9d1ca62223c368b502cf1dc7f298f08a5d07c61291dcb72724328a66ff6d81dbb5489370021943d45f8ab77f06184378020142cd5ac345fe6b5bb0100a1b09f29226f4e8ff719c00f8af996621f82a4c467003e2884e8a1c7bcf192dfd8007a50acf0a859b9edcd616c003c2847cf089247dfc46a1bc00e0a5599ebb3464c9bb20d4007e5706bd345f4eb07131b400eca692a33e72ecf66a20d0007e58df4b0537b42ecd0067083620e395ada6e75b09f0dc006e5aa8f3bf2edf87acf065083d25ada567a3c19d5b301d0a050e53937722a8fded92c4a96513fb9837efecd64518ec2235b787f7b98b25894279a4695fcef51cb60515c7df5c9da817749f68a82ec9aa8bca5df785c51ea3f0f45ef5e7cf76a45f9d5a3334dffd368172b8a99367c301fae551453bf27d4e67844df5245399799857d30bf6b2b15a53b4f1f457dfb785aa8289b4fa79cb03bde91758ac29fa708766b3b6952a628e6f8dbe3e31c79ccb14a51b498ef2026794eaa1529caa15855a40fdae3d46a146515d18ece218731ad124539f0283f316b6d8a5485a2b4de61e319246e46058a72b8fee94b4eaa24aa4f14b76f3dfccbafd5c713c58fd6103ce9cd69be13a5159309eb784e14724e57771d62b26fa2301d67329afe4588268a3b9d52ff7ef3789e89d207dd59653ca67e0d268a311ee6f19a0b0f3f2e51f2b7e9dc816f52dd1265310b1397541d845d89b275840d0ddbf0e552a2941fddc4bd6ac87712c515b38f5a73b8ee114914e36d865d9e969d9128d85766be76b2fe0e1245f938eec34f0edbf388721cc1d37b7c5a0d3ba2984ad3f6356b44396d7fdbd78c285ae4c03a9c0f1f8544025844b1ece3cb666a6d894711c5ff384d6ef45509994d4431c3a3c8ca0eabdc534414ffe3703e994ce86c92000e51ec510d6ae9c1cdff638882c74aff648e2f4439ffef47d16cb631c70980108c695b4a8c90626432db6a34fa4c3f96539d0ca2f869b37cd863d6317544a0035fb0400082286e7ca0fac977f5ce134020ca39f238fa98a30b55f3b01c080010856899e4a259baac7201fca1982d82da5d64e785afe58762c8f371bee3f41ab1838c3e943d7ef851c6d59475980fa58f0e8bac0f3bd66cc9f650888edacc39c89c6e4a3d1464d326d718593a72270f058bf52c6f8d9273bb7828648f1219d1bec376fc1d0a5665fa9a77ef5673ac1d8aff39a40dd57c4909d5a178abea7651d3a118f2111de9781c55633987d2977cb471f3c3944e90722845b6c4ec13d66f2b3cc87128deb4c71cd5f6519e3d1863948170288412fba8b5c483952880375c9b3bcea56fde36461928b80d08c00d7c5c5b842b8bd0d4da18f38a3ca32102684339e79c34eff5cc56522c2f2eb00013a0e0040d70c101c820a327e06220c0860d1b25003614253f4701f987c27af2709d2cefc5c30fa50c4dee31ddcf6fab3e9473189f99ec4c3dcbb4051f8a3998f7384fa567ef7f0fe51c65cc61cc933dbbc57a28fc64a7db6cb94d855be4a1187d32db8732d31d613e620b3c14f2cbadea7a2a73b7ef50700db13ac733fa786b8772a4a95a23c7ed5d626e5187d268d428c944bbe3965bd0a1e021848d0fbd3387424d8ee3b84ce57adecba1e0516aff9d6d42be3c0ee5d71ce6d3c912d6240387a24608393e0e1de24ff78662d6976eabdf88a3714341c395cc77dc59776b43e935ad65b5254245c5aad8820de590446462f489eb195f4321648fb5d5860851226a284d94cf25e7712431c53414eb36468dcdb6404339af94f586b2cf9afd19cae1296a34b684281b33943d9d5a555ac94c865b94a1743fa1f512fd83d6ab045b90a13caa395ed1ccbecef4184adfa7a93f7ee924411643d1839acf16d97379370c65bddde8d9de63a52179b6004339fa5af3a06c3e3cf52f14435f87f412eb3cc8f142a137c711536e4fcce9b145174aa393939947635f625b70a11ce7cb103e62d88f740b258ff5e34c269ecf63af8592e638656dbccc4ea7c94229f263a6f6523d2e1b0be5f6fa2b9dda9b8ceb150a2e36bd3b9f5229122b9473e326ed94940829ac42d937fc6cee4c591f795428f54832ef48f5f29c2994e3ccfac8d4b633ceb48514caa93639ba9973b54c060ac4f882b68842d9a303d94d3d7edac930da041378172302fb2e4618785b40a13415d7e1bcd3eede7cc34c182e2003b77842e123e62fb777f5255b388151b1c8c8329349933226848c7a9a42c1164d289566fee861def1386daa38b76042f9c38f3996d0d418428880093cc00b0554c00b5286d8620965978a4a8898d4325fe1417a87b173e0b659b35b28a1e8a6390e2dd443e658dc2209e51cac84ddd41ddce46a0b249456d26de4f90b0fa34dd07634d8e208c5a8f61fc97af2482118658cd128c00a630b2314ae4389cc1eaf35e4fc2d8a50ceb15b0f3cea77faeb8616196590d12628126c4184c27df4eb92e4e7ee7914dcb1c5100ae121de47c91fbef94334bf02365b08a1187eb222e43a67f54f60cec790800db60842a1b3667cdd53f0663e032928266830ca40c1094cd516402898dc8eb58d7c8a30193f28e6483a775278630b1f14bccd2fb35cea3baaa9c4d8a20745910e365a8ec6ee98048c2d7850d8e8c1e7bff32ad573636cb18352b876c7fe61f89e1c7243eb8b32b6d04161af63c6f49258eb4e0c0b44c0860dacda2207c5ff1c76982d254bfa67b0050eca759bac3fd3d548e76c71838298ca6b4b68c94cf1163628468cf410bd6f5183727785aded798e51e32d68904b494610f78eb950b9b28f24b3fd5317e9318b627b07752793ab738a298bd2cfb706cf3ecfd9cb860dfec18c5814d4b2c3245233b3160bc440800d1bb0a8e378ac32ef75d840c12b0ab719de83bdf8c8dd7445613c46644f7a462b0a2b6faa6fb62621115614ddc307abee711c27dbab287ce0d16373bb3d54ac8a729ab0954ab3f1ce4152b1c9a490b052261dc27d6ccc9bf6d3e35051b60e195a33b91b5aa7df04658821c618163841ff18659011810e7c818219a728ed77646e1dfda76607334c51d45ceb81e488f8724f186694a2b8be39f4a0b63149966790a22039d336e36a1c8f232d4751f438b732685c45510ef2965a1d2222263714250ff3d6c7955c196f81329e0c27cc0045397a583f659ff93a0a3f51bcc8f039a86a88fd314f14edb6dc73940c0db9ef44b935d6d5addaadc71927d88a999052b2c887c7b76887e2b1e68ff64d147eec3b3e08112e398e2654ad8a99d81eebf16df2f2fb61e59b199928777c51ad5982788ed30d2d177c09bef02713e384d100264c1442d393a6a45fb5c42e614ceeb0cc8ec91115c1325c7ef81cf7857e382d5188397efcc8c8a944c167e33e280fac3fce295152df48a23ae38c4914734e9e9ea371d755fa620c315012c56ad9089b324ee7a8138952aec741c8933aba6a98ca4230031285f8d197ad86e6a6b39e8c4714a277c895fd1ec457f18696234a12b2686ccb749b9c51cbcb27a2684a7c0623ca9aa4438d1f8b840dc94594c36f0eaa1ea6bc247b1551f418929f4924a6570d27a214b2fa81e57ebbde7943ebc9d09e8188524ab2cc9c1b3aa77ce48b198728ceaece6f764a8fbbded0aa324459c7f43fcc476197ff0dad2b10cc2844f1bdce63ab74ec3ffa8656162198b58822e125661e61be254adc3ff4204b841983285ed5f607cd4ed7cf7183198228e5799f878ba64788dbb061c3860d1b848319812866f08c1327a4b39304108516530de5d9e3df8337b4be18a3df04a610e04551bd61c61f36bd8f6e2f31ce45c40f4c673b9518a36dc31b5a4f861826305aa40fe5b79b1051cf7e4ae37c28e47029ddbed933e2227b20b94449089134a544dbc35abbe330630a0d9db11e8a1d1b32c99e8d69a91c98918742c450269a425d32473cfcd5918aebd67687b323ba8504eb4c09d559b99ebdd3d677d4c9d80c3b14ae63048f6d9b4256ab43d1c6d5afe3b9d2d8331d4aab1dda3d0e52e7fcec8c391455472752bddec676861c94d02a1d2e9f122376d9ae4dda34dfd05201195f5c1c0a9f6442f2e00389917f389463cae67b54232f9ddfd07a43314bdc9ca3b283d53cbba19024cfdff9ab6fc841da50f038e5f584381e72123114f4092e0333d8500abbb4dbd4bad61c6b28ade424d6efd9b1c3a386428e534de327526acf23cc4843b9ace38732e14143713e881223c3730755a9000228e813540961c6198ae937b89a9867d83c11c10c3394c4e57d474a5279fc366ccc284349e2677798591142386790a110e26d48ded6910ef10dad309a051fc6176388f145ddd518d088b7b2aaaef852b1b2ec58fec3c88fed380c8ba1a8d1a53f3ad17ebd1a6db5c25008a22317a9c3d758f1aa8c16fc21600618ca71387162b286dcf776c6170ae1b9eb73c6ffffca5e28d9c76a19c774dd33da85728ae0666a1d7eaf8798195ce0cc6423479ca984d421318f78f8f26da3d611e38c2d143d87491b23cec545e686960bc628a34a6b23d0812f0a30430b852bf78f9023edac91bea1e5617c190d28366c1820037733b250e89c83901c277e18db344167c01031030ba5f9587e743d07def9c2ca8b0b2ca06031e30ac5f00fc28497fcb36ba300052ab04261b3a493de6029e1c36e687de1618071ccc030a30ac5df88ab1da4888c69a6422927673f88f53fe54ea11c7744879447191fa9195228a8e9c6f01fc207727e14ca33fb1e35e581482b28c1e6236640a11c9dc7be7f47fbf39e2794ff25d54b66ae9c8a6e68ada0045f9c7e137c81e500628198e1849247edb989c49b507a5d9fb5f0f98afb6e68fd08fc8918339850ae37d58dd38a10575b4261b24faa95f459d57443eb8b2f7e0463fc09ca4a30430965cffa3ec9a21f9e7824a1983262fcb8f6c608c01823cf0c2414a773501da66c34a4f308e59af1a036a7c26313f49741c6172530810a0e525e5c60010818411860f0cf304239478a95cec1e75da608659d78af2991e38f93264239d94afef073f89d1f3c8462ca0fed8d5408c5e85b9a35c7991941287ee8a8d2377f9fc7f41795aa84194028dedc6a4eee1dfe4ae88696095a0529f82faa5a05292825307f0301f483724e3b1955b783d065faa0f02bf13de7f9d859562079460fcaf6bb9b355f27e3470086d30c1e14ee6cf2ec65eb8790e9318a0d1b3dc6dbb001c68c1d943a3b630e99e338fcd05e30430785e8712764af091e491883193928dda4e9fcd0ecedebcb0874e00b0fccc041c1834ab9d68c5fb50c0036987183b2872a9f273a9060ee8130986183f2bace768b87e620476530a306336850ba881cf9c6c7cea2e01ad92cd5fa43ba9445d15522ab4a0c1d5387d12216852c31dd22beb9631216c5cd3c1e27b9e99499e315c54c11e284ce397498d515a5aaf88fdaf71dcf865ab4a274aa13de93cb848e5618b4604521976ac4a01eaabd2357519e0e2746b9655dc7122d5451b2d89c83782669918a72ac39353d95993cc21148828a427ea8779def3ad4dc4e418e98646259a91221b15a643e824b6daa97a085290adf9d4e53c4d9adf55a94a2fc21b377ae64cec8f36b418a92c6f63053a665b41845f1efdf83dabde5e76f045a880251338fd8a21226458ee337e69498c0f2055a84a26c256a3e16390c76ef171f0618c709c4305661880146960bca00838c035a8082f808592389879774e4ebe547cecf91ab5a7ca2943d25d57592655422c3868d0874e00b1568e1893a264fbb939e5bde4e10434f6f0cd609b4e0c4f6299abbb792d28aa1c52632d59649a2222ed3e5f2314343ea7dcd6cb76ba1896287b3d71ee65c6694d8002d32510c9ab6f7db7446631e13e50ecd7ac52bd375ce9728bcd8479b35a325caa3f9c344fb20249d498b4a14acea53ea367b6c871285b049272547f1a8fb4994e3fe98ae3186c66824518c9b7153e68e73b88e138972bcafa13e869ea0f78144d12af23b68d1dc613e8f282695d1f20427bd97f51c6fb6fafc390b43021890801708f002015e5ce001e40f350e51fa1c6d62c43cf933f34da86188d2e6149b3f46ff89e4284431d87f10bfee39257610a2986722fa871d34aeef419453840fd1fe39a8970a05514e99a53dac6cbd570727a146200a6a9e1e6d13b2c7fc160935005154edbc3a714c53677fef0e35fe504ccd27137b77dbfe5938d4f0435123c6f3e06190b4a1461f8a9b32b2511fd799c47881800b3c400d35f85088f2e261f9f65e20c00b1380d165d41ecae6efb93468cc88eeeba1ac23a711dee11e3ecc43e1b575e39465aee49c146ae0a1f4398eba2672d9ef9466a8718782c4cc87a162837d60ded03a3fc6175587b8861a76287b98f5cbaac315f5ca0b047881002f10e0850d1b78861a7528af849ffc8ae438884b87d28a47f8758c8e5aea1c4a327fb17fe6cba11c9c759c4e79a651f338143ff0d8f3412af73a0c1ccae136ab9a66f2c8acdf504e2b33af1a9a66abfc410d37947b6a53e26d4a3b76db50928cbf3569747d36c286f249c40c9bf9484ab3867268d6c8fafd213e5d822e037b50430da510793d9199fcac7d1aca3f2b7b9f2c533e8e4443c9c34f6eb2fadf0f256728bfc4e0d1529ded223443a9dac169a7d77850a30ca5c8c99ab0b23976da6428fd884c0eaa3ed7c7c817ba841a6328061dd118315649cceb8e50430cc69be0b1c7d7a6c250eae99839c29fb79a0e06629f89dc95bd58ff8582c4fd31ab8d6f611d5e2825c7b09ad3ba7b74c138f521547cf8980b750e365d7abcd1322d50630ba52b8fe37c481d9bfa06468fe15a287e8792a2b9f3cc639c85c27de46215d1a31d9f19a106168a6d3e5a1792c3bced71854279474ec9412487ff850bc6288322a028e813848144a861855246bd0f726cf6331aaf42a9373df2de8f1f468e59a106150a7976f951e5a9dbeb53286a78daa8c9dd83688cdf163404bc4080176148c00b0478e16180e1002f10e00502bc70c13bc00b047831c6df20a319e00502bcd845419fe00bffaa2185927c77f021fb3ed9f551287fb49bbe43f397351d0aa59d9d90b1f2e309a7fa1cc77d993c9c50ce1cad3e733caed975138a6935d6e46b7eb45a33a1202f6316317fe6bcf312ca3985cdc57e9cf7e35809c5d412268a7cfee0f126a170331ef6f3a64828dddd86e98df15b62c8110a5975f5a1fe87d2b6118aaed9b1a6bd0d1f26b108658f422ad2798ea773234271367dd0a95f7ac71f42f125abc884a8f3a4114239c8c6ac0e37f5071ad508423970f73fcd71e0a259a7ab1a402879fcee7b9c659a63f0078556b9ef2499aae18372dc68ce133a65ef2428502f2ef0002f4e603640ecd4e84139af4c8be768a3071112851a3c288ec6c96934be837267cca669f65935291d94356fce1e72327486899d502307a5087bfa35672e119238289874dee4da7965fe4da1c60d4a5a1e27ced7741c44866da0860d4a571db39e7bdcc9892450a306e520711b52a75777de5a841a3428c688ee4156be7c1c7d16e578aea9e338f20d9eb33b021ab228be795a7fcadf4f537971810510a3118bd2d49cdc64576f6895a15e5c600165d8b042801754de042850c105bc98c003bc8045b966c7c56bd289fd1f46960d1b870d1bf68aa2fbda46ce797645c145638a3c29619b3406081aad284726d6414aee099eedb23ed0604551628ef9231d750eee59453173988e3a45a3b98e65b4e0bfe025d0504539b8baf11c65977e60af1e68a4a2a82e3a731e93eda64af3400315e540cce39a9e9d65cc9da29c269cea8725e181862930d9fc68377ae471f485185e30a0518ad2f544c941d8777f24b9a128b83046008605a24083147d487f1ee7c8346b5e37b46e98327a0c147c218693229b018d51942a5e3bc2c92b8a72dc69a5bb29dddb330230c8403bd00845f1f36d8ad84d5e1d6880a21c44f32c9fe44d4da29f28668fd34b2d637acc1a0d4f1442ceef1231443b519a90a37643d5474e9d1345cfb1738ee3a89e038d4d1473fe14961e3992ff38a2a18972a8a9224662ae3b7b3351da0c216669deac97ea0dad2fc428230c30ceb81213c5df989b3f4b76fc9dc3860d1b36acc8a04b94b46a6265ad7c43cb50d06368c0128518fce4d6aea3578738814625cadefdf21f798edd5037810625cae532b949cd56d43c93286ef4f587692389629ff467ea8d6603543ba011894208293cae3589891af2e2020ff0a205621cc0860dff1640a2bc9e9b3a6f5eb87ca812683ce23888f193f7a43aa2a01e3ef030b337a6480d8146230a1b666b37721c3945e757c088a2ef7856c6c5c67b0c7181c6220a39becfd829e7db2a29d050443965071f6ce7186ff16f68651108d04844e136746d4644fc720f12041a8828e674b11ac4cd32d57e88b2ebe7e42ea9ddd05a43c310a5192f0f22ba737ac46f6891f145ae8046210a31878ff058bec3c99b30548065d02044f1523ba4e35e35437d43cb05078c32c408e30bb331680ca2281242bfe45f0f6d911b5a5f88e15b5e5c6001240c1a8228f6e5768ee0d20dad4014a79345c7c9ca2003c7b02cd00044d93445fc94f30d2d23c3860dc3028d3f14a3467a73cea7922e1f4118606c1768f8a1ac1aba7122635668a7821ca30c14d0e843e9cb3f064f1a253e7af6f890b506cf8e1832931b5a648c32507034f650f64c97a2ed1e4bff74433540430fe51cd48efe869c32cc6420a19187628428b7f1d09531e71768e0a1105e3cfcb06c4de2d7641c34ee503a8fb3e7687535376188618762d6602f96b9547f03b41568d4a1f0916dd2faa8792a73171c5f988581018e030d3a94dbda03ff49ed1b5a43630ea5cc4164afb6520fb90181861c8a2ab91257836c544b8c130618c48b0b2cc00b03f880461c8ad5d3b29d03150ee5306fb2cdcb3cab8521c68621011b36c828811862dc40028d3710634ff2ce1ca223cf0426680424951668b8a1fce95327bd2f8f11761bcaf9273a7cde830d45cd9c340d496247fc6b28261f2da9907defb81acab18b076715da6b214943b13393741c860f0d859c6e68b8efb813be33101b1227ea1e2d29d5aa3ca6e3283e4c9abca134cc508e65ed364712d12843d1b443fb106ae6b62634c850109118c43b73dc1d3d34c6508e39a8f77b3fef8e430ca51a8b1c078b84a1d4d7e1be1b3d7d42d5031a6028c69ac858d3f29a3ffc42393f420ac99a77f3280734bc50fe55c938cd2e611a5d288a072bc17d5b3232cd8552865dc5c726e9d3b1db011a5b287a7c1e62cfbe67dd1c985a6025fc6cecb250eef7ec92f35774cee12c1528076860a1fc9d437bf8c8e369f25ca114b61e964689b5b6150aa2193bd098d77d6155a11ce9749abf1f0dfed11734a850d28e1eb91539ece4dc140abeefdb2529bd3677001a52287e78f4dfb599e7355a008d2814a379a70e3f25e60d8d188006140a7a1131c47b4cd5e63ea1bc5bf211390e6a1e3e27147c77734cfbf864c2d684720739e2f7b4dadc739850cee1586f079a3f7b7e9650fafd398dfc1b653e8e124a9ae3d44c9a3e4928bf69548d5b726ee7414239e99a6ce6e0b3473c4728e68f27e3641af3ea8d50cc41a73648520f91a308a54ae9e416b661354c8482a6b7f93c9137843c8462d27b3bcdd1be1d7c211452c7c8dc9833c7d33e08a5c80d5134aa6b4ed103a19c6e2b1ef9897dd47f500ee6a61fc609d1f6f04131fe53238790e379d77b50d08fa1033dd930893a0f0a9d4e5e3c254e75e83b287c1cbac4fd5487fd3a28985dcbc7d6b0e37d0e8ababa29592e5a913138287bb679fb30bd31c2e60645bf96a8a79b37e6cbd0b04139f86968e67876be73d0a841517672f37f3be7fc418306e58e2071facb743c3c8be2aea71db3b9655134ef1cfb89f6e8c78e45a923f1546afdeb2e362c0ec7cde5038f7945a93f360913219cd8ec8a72b835ef9ab8b135db8a9254568d8478e6a7322b4a3ffb9ae1a3cce6ce2aca79c722b5e4248bbc2a8a1e857df0336f2a0ab1b2c565d3454579c3a38f3727cfb1fd7a8a528c89d76f87ff9ad514e5388e1b23b27baee32845612667a7635576e8418a7294e0162185c494758ea27013f73ff648cbcb4c5194f32339e930c95094d7f34b87aa217310495094e389fc1d65bd8738e6270a39717243dc90931ec413a57fd14dd1e167cd59d289c43da44f848470a22c9a7dbb9a45f6d3b889f2c4788f6ddedddc464d94ef67325a46cffe2266a2e021b4e5a5b44a8821264a9a44e4367e9c3d115ea2741ecf844c85962869761c39aa68cdef57a21c64f4cf078b92fba044e1ae523b4ca923f224cad11173e049d55a424e12a5fbabcfe998931ab9489443e34e7210c3f47c40a29c23f9fe4b0e7b4431679f0631093b363147144ab5ec32781a510e6fd9718e3e37e930a29c437c0fb3461651d00e7aa2d9d88721528a28e698f65f27db686a958862c81ec4e48107220a79bbb44524de437d8862368f9aa9e30c51de98f6cb2348648d9102b62844d13f904cfffc4dfbd01684288668f370fdcf41e8390eb56a639ba611e8c01732d84210a5eacffcc993a671f90251fc38b23c373787aa0c106984b6046b9768cf1692b4cc3f8e83039b284c36491f874e2e1d74347104b13e134549a71d7bc88689525c66de6a26f7b08315842e2e514815631d355e47d9cdb5a10b4b9483fda4a93acb6e33ce862e2a51ba7eb7ea901f94285587f7d0e38469e862128573efb2f53fd3da90240a6ea6a9171d79790c3fd04524caf5a1a9e1496d2486c0d00524cac96de2778e3d22bdc40b5d3ca29c3b7af930379daf992ea313400600c8d085238ae5ff415d2ca53df81b510839043f59db30a2e43152aecc886ace8e451493a5676ef538e4fd684514e39f7cce9cc23bcf89285d646d330fb76223882885bdba871dfef9667c8882cc7ef4bb2fa131c5862859479b7c721c5988725c591f1fa6c4aa4d12a21cfa43dbda6f8328de7fd4301daf5ae788200af1b5b37baa8f942c06a29c436d9f91ef370f03a2ecbe39bb3ece5f9dbc3f14b7e5bd222684d6ed87427d6bd85849fa61b53e14a4eb56fd460a8010bae04321e549a6c9ebf469740f05499f1ff6471ff5e6d1851e0abd31ff98844cee5c322abac843f15fccad935f782844f0588b0eebf4d420c0860d1b366e056380d11340c0710205d8b041a5e8e20ee5e8fd52368796e9eb638792065789f1e3e88c76755187c2768e7ba3319b1b63baa04321a6a8f7e878bb9843217aaa482e1dbab5585dc8a1d4e1a19c989d5978ab5dc4a16c501770286eb987f68f8c2f58f061548da0c368082421ba7803795424adc498d76e286a871f62c760d11aff36143ecce1fc6d74ffbc391bca693ec8192261db53afa19cd3fd7c6a784779550d450d3b199348d250deebecc06ff3467e201a4a9f34e694072bb5d1339423a996f69de4b5493343413d244e4a99ca500e63ab4b18dd8cb10d19ca9a3347699fcb741e3e8672cca123eb1c5bf5c388a19c33db7aa4e8084339b3db36fa6b301436471abedfa2b3ed5f287b68d939bc93bd50c8214c6bfeec314ec65dd075ff62b2783821174aa99ab5e3badc822962a4c38fa331a95a28659a4ca5db7b4a0e9285f255d7f6f9d46478d60516ca13f7e315c6e8c20a85b5d08917f3a1a18b2a14f7c1c6b439248f0ae410e97d750a85cfede1adf53eee183743175228f427b90fb7a5b43ba250eef073b6ef687d5bf250e83a25781cebedff84e38fcc277e1ce6a64e386cb325a7d9040fcb26942c36c723a71126d3cc04f683659def48f41c59822bbd21dfc945ab84438fc97a2aa27b104ac2bfd5daf9038f3979474239a60f5fb374e47154720443b6588d1adf6346602a5bdc67638ea4638a50ac0cf2316796c72d1a118a5efaff9ad3c7949b1a42f725f91b33ea070ba16497ae913ece25375119ba0842517ab2947af05196dd40d83e73d0f6f96a3ae4f841f9e31cfa83920e2587491f7c1fe849a7499922377485d0450fca61a7eef6341375aed1a10b1e944a4259e7207e6a67cc0e8afe935b3de6101cbad0812a9d246afcfe103a9e83427547f591a448f5fbc101ee79f5fbb30721f4bf41a1fef40315c9eff83ebab0c17963744c88e4ce6e17aac711425febd95dd0a0a092a38ff7d78899b3b3586cd445726451560f1e6feaf4518488c6e29eebd6f4e83ed86e6051cc3aa221226aa476c92b0a1f43c7a33fb0ed30eb8a3f871c8dcea64dd5722bbc702b0f1ef2e7509515a5ccc8fa1c7b27f9385b85711f7393e5ee795451b68bd5ff707318a265dec0452a4c8d1c878dca0afd282a1ea9da9bcc9c22138b0f74d5d4a57343ebcb006301e68037c141082e4c717866b3c152a26fac52f8d2b91ebfb121e4b8a1c5a428caef9bc72182ce848f5194c3f051d3594896cfb628ca915e44bad75a99c9a1e032b64fd984ac11a2a0288711cddbce2b4e42f813e5aeeee439637dbe2c3dd1476af4fc1c3eb69a3b510ce1ab634db0ec60279c28a64a0776daa13eda4d14b284471f868ce123829a284f7f660df9f3de3dce44c1b535a64c97fa108389a2daa7c9b1ad511aac4b14c2b463bbceb12769c485250ed732ce548992c47bdefd538f03ef28a1bc07ede1878edb9f44614c738ba8e71cbe749228761ccf85688cd0fd238184a65b6c8c94c86b9a86986b16ee21879a63820e038cf3c51a43a29872f2e626b6efd16556955428e8137c416de0e21185a8d6f318bc3ea6268e28475c4ae7499146943c8ceefe0e255ca58611a4d0be4eeaff594421f787a8ce1d62ce2ba2bc1fc4ec419de71cf713512c515373d9f4f4104494639578521d3fa87ae61065d71c4fe43a0e325287218a2ac93c6895bf901c2e44c9e3dcd1be35a4ad9310a5df2e9d10b5bb1f64104ec8f3f87ad5d611b810c471549ae137211788527d04eb7febb439612b4094ec6387ff9f733b66ea0fc51c1ec34b65c881e64ce5062efc50cc13592dfea551aa5b4170d18782b706f510e5716add17f0451d1db8e04339e6cf71078f50d7667f512d2823f9c0c51ecaf1ffe394d936d691d643b13d9a66f97bca8e340f45d3f650114befaf030fc5bdd80bd1ed099e2a4e0217772886ea1c0766dbd933fa5cd8a1ec1d6c346fca915911c0451d0a66df7b95994387625c4feff16b8608d348818b3994477dd33fde905cf76427702187626aa6869043c9aa31270ee51c67c779fe9169580f877284999eb4d23561e26f28e4b8e2b93dee77ec0f3714463fae62bfd37f1cdc8662d8a879ba573f492b1b4cc9fd14b39f24c305ef803514acfc3f5ea37c7262d450fc385c86b8217af0aab848835962c4ff1c648c8672dc1fc72ab31eade4bc09438c08bc09438c306a0f5c9ca1d89e1225668d8f23586a86b2eb4cda99d89a31836528b947a9079a6b7316990cc5bfd0fc25b9d6abf9188af1f19757fb770eeb134339e618fcbdeda322d261284afb473f396623f904864278145ff3b8ffae922f14dc63e9b58bdbd891e6c20b65eda8c3b0f9e3a8f4f8093cc00b31cae82b3070d18582e76464bd88fa4ba45c28e750571f4287e7d14f82c0c5168aa799d424c7e221fb69a1b8795e63daf3e0220be5c82607e9ace3f8b7d5175f8618615421c00b03e010b8c042c922d56a88ecf17de4af50eeb03a6543cc29ceb35628871f69dce8791929b31d705185a28a7d640d0bc91d422a14f3c318d446bd6a6b9b4251b2c24d42dceaf6490ae53eef183c8e5fa7ae230a05f5bbf530ee840ca1864231ec841892ace2e209054fa969254f08ea93b61c70e184d2f594d668e647e7d001174d285c69a7cb5af3a831b342e08209c5ccb08f3f48fd786d0c042e965088292dd33449dc3bf147a055c62f80b950427147722469537f0e639c84727c125367fb206b914442517e73a8137396b03971718442ebea948788e13f3e4628f707f33af910a77753846207132a9ecc53fa3e88504c13db6d09de933bf3808b219472539348ca1c49b20b6fc085104a192d7d2e6972508fbb61c2e8310e1a701184f20739d2cdae3da541e5020885d78928d5e122c9a61f14e6a523c9904232e38ecb850f4adb2737124a22dbe6700cb8e8416144fd83ae0ec77f3e7850ce1ed33cb3f62ab18b51c6171ae06207c5dc5b699bc6e37fe675012e7450120f356a72eed9fc7095c0741916182301f9012e7250ee99d395fffca943a32a5ce0003b7509113c288c36010a9ac0c50dcab5966b191f4e75f488800b1b143acc614886bfd5ed1c37b44c6082be51060ade5bc0450d0a6962cc1b7122b47c9a0b1a14438ecb4432f665d0c82cca4147a36ba73d0cb1d59045596de36d9e1bf1400c4ca0462c0a1d3bc7be0ed12f393947500316e43e6df1483e53578d5714824d78a0db217fa4f78616d11aaea0d443ff4dbb8e1bba8c831aad287ec7883e93436d345107355851922ccff49c2bbbaec62a0a223944b4c4bf862a4ad21966efd7a537c335525108d26213322e19728af545a1a234b9a221dd8cda5ee7043dc60bc038011865a04002366c7c81891aa728a55f798e99bcbb29b3851aa6285adfdf7526fdd06e3b4b438d52145b3ace8f4bcd53749a9c45a0035f74a006298a59b4a25eab4362d88ca2d47136d69b77948fb7612316354451f07f4ffb7a7f539f138ae269fabab1fac8d23a40518ae4c1a564bc43f1b84f143c44bbee09517165bf021a60e589c279af84cee7965d45801a9d2807b39ef05976ade773a2202a92ed3fda9b2827ff124d95b98626b050af9f2c21234b32510ef13de3c3a3efc31f13c590830e7764643b8afd1285f70e6432a37e869cb64431e74da9b92341a7939528cd4ef4f0e769f1b39b000c151c616ca20625ca91d4c68efdb64e9173432bbdb8c002522b021df8428c1a9328ee4ff9f96584d1232076811a922884099f97daa621c7ed17653c193f8619056a44a2f0f944efc30ea2a9e0e8036a40a2e03ba332771e3d79d00dad2fbc7f8c74408d4794c3189183aa7a5851a9961a8e28aea68d1fcb90265f65216a34a2d471d2630e7360ff1979718107d46044c12652d6e4c8fcb299c0c1f8220c31d4868d0bbc09bc01596311e52023a60f377fdcd04a4514337958d9816ecefe9a88721c075125d95da406224a8b99a8b4ad050392401c0c0643a150280c08a52faf0063140000000c1610c782c1589ce96add011480033e3a2a3c32321e2018121818160e8843a1400010088801413018100685c2807030481107a1f601f86d3a39e541463532ba8bb6a3e72641b0b98152acd00c66a24ec6016502ffed1c54180fde1f235ab140a8038b2651468200682213de4fcbcb5f63641b6600d3ca95a3e77d333578f26375471941e2a1e678d5ba52b6b51f181e914b478459643fb716e4d4f115a9548ff0364186934461a84b1485184bb6b8ecadf5543f838ca1ab96b84a87237bf3d37cc603cf0715fd999d22eac809f6b6a9e8e43d91704fb4131de66650fd9cf6749493366c4424c1fb0e35892ba70666cfc1a0f7a0dc903259fb81c7528e8ef59feb0361bf613f1cff41fad998fd13aad74a39ea5b9af503c75bcc04d78fd157fd7e37bfbafd86fc072afd8ab4a86c46e3932a848604495123044047c87c5044e238dc756c3655d29a2a7dc7135f827a0380ca81a41dff3e205e9c2198353576184658db8c9128f831885ee14f027a20240d37dd36f74ccfa3a0af0782da824e308b740cbd2c3312327c8ee2b68ceccc343aeedfd751447d480a8a03c6de2eafcaa55561a00b5f60da87227a5128106e4509bafed3d43512f91566e0945465fa1659c5350fd6af3afdf966b42ae08b3c996427c02f0f93c8f3ea01a779f633d33ce39cca40d0f9206bc597a28d06e1da0231f2f3074ce8f7d705727cdf5a9e683756d9d4efb026129a2ce71ced86ef5dae1d2b0d6a6419451fca26f3b12cb318cbf359b91617c4aa1107236b2338a1d1b884d5dc7e309aef0845a43cc56867e5442fae65a467d2acc82b0341a429f4519a84b49b5a0762e846a3b9086f4bcb3c62f6483b547e6a3747a3a7258efc8616577e2b341e4d9559f78c02a33ef490d2b89da5119093fbf340aa257e815a2c8c6f04f3320997c53ac79c3873bed680d6ca8d26eac309b9bde90eb220dc287e5229946dc98cc734542220adf727605f7c2d84a3682037e9a61f95b592e70418aa383ea3abd233204810c0fca045a3a22394c05eb247871b8a4c2b893c606d001788db49ef13e41f693d2a76134bc6bf6401e1700852024a0bad0b157733c8aa48088cc7278ec6407fe9a30b463abdb2483dc19f0ea29f875b8889c7fe1387c918580dcb6ebac5dcbb7f0d182df61a8eaa628dbb860c224eb52a541ac70629ab9457b9b3794db823a7ca306f3760555023eaf1b49ce1e25419586dac85b21a35dc21b0119a7895b95ff08251c1315e6a50b1935f6f0d876ae2982ade5750357eb0926a2e795415f442f9b09c2047e8ea883381200906ef5a42f5497075af2b27e2c0b03c2ec6b75280779686753fa75fa56b217464d79303263a6254858141f37f82f512dbb4db1ecc22ee6aad3aed825a14e897299e44c31fc966a0033cfbfd6d6d2572a0125343a504c5218b0c1c50a7867ac0f402d9002665d4c0490185099501ace9f8483a4fe4a4fece067047341a120fd14911088026f725465e0d43ca46d42580337e61e01a0fe05614906d4b49910f99018e5815fc064c0fd94e377e7855a283f75ccda980368c1a2350c52170b4f806d4481aeddc29b4a4135acbca545356beb43bcfe29e997c7943a1adc30a771392393d1905784252e97399b954e003cb360e83da444406bac3a9ee7d6253dab3d84e78cfb67d436061af2682a0113416c4331024c84c706541ea0ef830d0b4be55a15cdbadf852b01d1222f16dbf38dee789da0121ce3586b0d3215891b35b6c40dd29d7322e2b1b66febc70763aa15191b01c9de0ce519218cef8be3e0fec0100a946d9f75d39a086cc063b1783002897bc63a04455c7d65ae990a8594628b0321e2d705285dd0e254c922bd5a4f171921d1dc0eec6c99013df6306954d7deed7e3d44f330ec50f66ea067bdf0ee02945797d520f4fb7f902d38346e4e1fc43eb082b80a0fbe44c86906902940a9c55c2af033cabd8bc9369e7315387c5548700e4392ed0acd57b1f02ed8abd0a39c1730d05295ebef3521397d6abe825dcfb1a805e03e7f43bd0df299783cb96a77f4c5fa3445604c26ed50f734993120e706776ebe7268c70388bd62d7bb45111f4ec2f6d9abea4bfa6cba17962490177c84099635744987ee7022dd9f27cd90a1ade456498fab077ac5955b843bd0e9a3c35ed9a261f824ebf2b3d1674ef2ca4a9fb6dde781593ee922390c06a8de314111e7c9941561b1c14f88b3a3f172cb20d1e216caa69f7d3069ca80d0e2e9f249ec1882e7488d26666b7b0a5b1bf367bd7719322d09ccc7e5197435f529881ccb1b95eeedac8caf717893aebcae798244f674af95fabd7acab288f826828ca4fdccfdf81dd06ab89d7c9d0a6aaa138edd415632929e9e572142daa6f5794465905707552dd8fcb3fca1d344219ea7f6567d19185b59cada36644c75ad2598a1faa04153b9528744bd8bcc101d520ad8d38362b1f2f9e85ecdb191c1790de4607794a21dd48192aef75374c4a479bd48a31d567361a8fae3e1181cd9a0b3a3c955b67054ed2546f38da3b493e477ba40fe04cc2efd1d90e034989ceda213f63ebe2308960a35a02630d62e40138f3edd04152b7c33538f711d357ef1795c5d8e1b0111c90c2ff8759670618ac1aa8857bb2b94c2e6bbdacbed4c74eac5d35c2f0110f0300d75cb9dc95b0ed7b44e3f122b0e182d99b547fd5c35bc9a36dfe6125fef1a76d11e0deb7198e4837d19af565b1e62e9d6a44ecb0d6f154b9fb21b2b728966901cbaa67f9981458e0e5ae33801b63cd77074e7c1d7f990fd23b576262cc81214baff5a2587d20502f196bddbcec3bdc5a46efdd273cc20fbb5cecdb52f20162d7cc0449ec8f1c181c7c2239cda307b3ed448f2e0613ef3b3f5ef33059c8289f6887c14e79124ce9f9c4a6c98180f72adbab6d94a6e18ba6c2a40d19dcd7a5ebf2f49d33d618faaf6157d58287820d31f2501cb2d7e54e514e7f765caaad17404f8d57a58d851adddb059a7cb62797c699ebd1875b1d3eb492b9bd4c9df74e3049d8bad4e23dc037114ef932295144639c041457a7fb9a65e040f4f60e3caed3ddccd5bfb2f9f6cbb8aa7ed25e7f2a2c450968906ff0e66a091bb5a64749ac41802683d0db0534ea3cba515b3aa930a198f41086b9b3ceb52bba5b8e8d561727a2392e4ed03556952d402a9ba0a13286145336936a7b346b31a14ac0c7ac128eb717e3a184269f5a624c16863b17578a8faadbf9753d8239f3f8c6d032e4ed35658872a76457f217ef04dfb0f20fd017eef35e144925bc643cb34a767fea10d0adfc881a76a75ca04a671330dfeeee46a52152479bad4cff43656e9b1c41691c156b5a0f04d0b28cd7561f3a12935beece4b27025d2fcb793b64f1919f32e3185188a16fdfb891d8461e1e204bf6d1a61c32c661de261ea1eab7451c96a16c062e30ec75a328a5ed5e67c07f1ad67bb3e85995b0f2660d9756f7ef379aa4b2cd9ac6330e3bd2d23b67afaccfd57aee00fc88c2a2482c6845825f622254c653896ccc95d1dd6be3f26608b5c102b12d10796814445b9bc04f5ac005b83c92a8db2c29495ce91b3e67bfd2f310db34fd2292a2b95d8b1373a9f66232228a7d444cb23cbb2040c1fb257a5bf902dc8f5ef2b8ebcd9479e5ab4cb3764da496800728a48f729ddbfc422278e956b9ef48dc8abbc80121289d17c855be4045b20e4570aac1af7ab69439e4992343514836b60b5704cb130027afc566b845f606cb2b01e075d85992958cfc847c4bf5f5009340f78a2d1bd9d7212320f5c697c81385a1c22ca2a8903abbd9a8e7698f2c5e95a794c85a8199ee2fb8bc928e20c22d4ff52159a82c1aa5807882771bae0e2f8eb74be11bcc91a28414ce751cfe9f74c7ac08e353d7a2d3c749eee2a3c50e092861948d79172f63379c91e0374f98ab0e1d8e9232bd6c59c67bc39638d19260f805470c1c2687aecb2f245b20d95282d6f7b7d7b5bcc6da001c068f0988e8cd3720becc2693913d24d03eaedea678fc289a8deea1732ad75c954a52f5020407f89dc97892de1bc759dbaf0788e064aaa9c2c881dec8811f1a91fc827a644f3328cf079afc75828e7ea7e49127c7f01829685504d1cd9b674e93986ab176fe3a7272875470f011c04d862e7832d0455082465c1a2557587b7794b256734c7445ed8db6d76d5fb27267a3a8b9bb15113fa7f872e9fe10f3231d7f6844b287c77ebefdfdfc912bcd10e424e048cf0c6cf629a9cc772427e86889819274bf9c69ceb5eb19d1f7468635fefbf6e186b464e0735fba197ff0c322d0461a34ed883e6e5a227c7e75c45581b919478619f293124535ceceddc23de2dab2fbf85ed1f14822c7cab98a3378743e83e3789e46f19e57699451c62eb59eb47d2c04250cc802608ac050e52ec11578f557c0fc53d348e10af0dd68b290f0cb64ede247f7c3dc21352f93a59c7f13fccd7f544f324120635292d8d14508b7f1acef5fdee63f0507b77f26527b82639f9282f03557006045526d625e182838683c36b4ac9ca88839638367bc9e90bb99061309772909faa4aee4b33183d8bbd23fe3feedb0f4b393384c0353fabdc66569ff90256186e51566ba66f0d58e47f70669bb987aea0d9fb19ded6ce64e962543304ddd647886d8706732ceee3d1deed0ef410b8cb5b209a892af292201eced2542b5da18edb83fe75422eb69ff7a3b6a92ed16c6762a4d894c171f383c9e397d53ba496cb2e260a7cbe8d838681af09079c28bf9d97c5bc01429e62d11727e249bd6a36ae57ea46cd1f1922c387103c66f0a7dd41112e8d04591be7915840991f6e56b68128aa286f0df50e256eedced61347adc552597b48314a9e17eb5fed4a36a20288d2e85a69f4c7ba6ec056025f8abaf0f43b1d2eced9fdc1535da63c428c43bb89b8932e85e37f4c2c27681286bf1e77cbf1a23f35c6a99278e15a76f4a628e0e52c4845a7084f9b532e7abba9300be8dc8c03da18ff48bd385102ec84995ba34f00505933433e84f47a42979e34fa052ce0fd2213f09c9c8070bc47c70d0ade0ba8f5f6646152514167a8e2dd9887e4ad8299879510c0d59904cb57861bb8281e54ee8a780fc429dfd1898db919e8cc1d35ac6233806bf993c9df8fc5f9aab4f677e21166786ada5772ccd9bce9a97e04f2c164a7c8d41e8b341bc75b6fcedcb973186cd2f17e1bf9a841f4e4d24281b655a89829b7c9f878ae7c00faf0698a12346bf5fd04bef85b30fc24084edc1981c9dc3e8b022cc6bd965309a9ebc9d54ebb4a7b8378dafcb732185a820990bb101cb077d11f6b5631b0f75917c3e4d9f3a54bab5286b7e4c30b922852d1147f3da02040dd914769786f732aef158ecf45c0a3973bfcc04441320bab5532232410925364811cf297357f530b3d65ba0e9c4aa246822741b371d9409100d998f02906b2ab5a6ff97d559886bd1beb22332009e0cccae749454b48dad3ea96bf80bf096cd13122b7a4268377bd4cfae8a88e395d2ce6226d883c99e93e69fe717fc2cbb7b8cf09213926055e2282f5191981e684fbb3c9ad896f99eb26e2b3b7d39352d8c453e485f57aedfff4fb506e5507c5571332c5c16e21ec3cc42cc8d23a5b80488e72c3207b5a52ee4eeb3eb92e6e2caa4ad3d52937341f4153ae7b22f643129501a204da658b90b87b88e948743cc3eb1ab39e1309b18b3a3744cc15731eb1d6cbc787b3c08b88d1ad38db1303e30a4b72d6dfbc1c035cfc0c65bc43c417e89c67efc18be5c311e535cfa45718e7d856a9799cbe03234d18cb7948916515f246a4139a0424ddf24112e6710e5ce73b9e0f7a85e6ae255e172b281b53397e8f02e51343fd4c4494ae956c55a8d1217c0dd99b4a57312000de1b78aabf8fdcd2cb92692e65b592c3fbb6bacf4cb3dac99b96cde6c669894f583b511367e756f1d243c640c74cb084b4bdf48873ef15d768b9ef967c17a0a1ab569b98fa7a64ae0b403de70fcded52c1709e95dd6a392186bbaed36372fb5cca17316a614b12364b2b40d3c5c1d6c66886dcde43648eb4dd00f5713603fdabb04a30412c0b45d986f4100843c024406229640b8005b25a4bb1ad3bc005e757fd231c25b7cdc792ebe9ec3a6847ed74aabdff365684e274d7489cbf527d5735e3e0766437a0c72806e9c5c2ed79e116c813b8bc8c534e4a2ae68518221ecec0169b8f7c811d0f5f85b97a507cd5a2acde2664dfc2e43569d8fce6b60a7bb96a74b57b36af6992f306bea15b9c4795940bc7a8cc7db436050c709d4bf5459748fa0bf93096bd03815dcce3ed350c410b975f2c9436d8230dc7b0ef1f4dc7270015e233d6bd843c0e6dd957712e1da53a1695569c2300f00b5490f89a3fc855910ddf25281b071488260f2af2be39d796c78f68b99149d142f75e39bf9a3fc2c26620e9521d7f21b6247bd613ae492c951b929d350e141424c59bf8fab7969bfd66c78b411cf17a381001ff894194a0e8262063d46be7726420c73bedd6ed41ab4b27264c1230bb45b8c46c0cfd897202cda8418598a1dd45543e518c439c5e034148a620eafeb34f3c6105429bc8ae1b077d5c7d72dab263004160add9a30be16a2d1e207cf5efe2753040c6b392fc77986f9fb556bd7c06a2e315cd47242268f5b285c9d1852da0666c5a5d1aafc5989384eba7ca39a66ca2d1a4186a486a44a019321703e4acaa4e00f655c11c92c8452bbaad432d93ad06de3dfc4e801486c673ae8f1b94102b51c3673e69c62e1bb71c9e86bf624bb0fc47c60c0109f87447436040ebaaf65357b0845074e0927f075a439bfb8e5f3d773260c7e150dd654f931a64b27d4808e571196d439903621cbdd9f0b2de9739a2d5b6db129f324f7b8209f3a52110d1b73fe3b41059b1c82548318ebdbbc2d30687f4268a1ae0cb5fc8b6119ad23050fb3d4f22b8b30d892947dd320a291e63016238d596f1c97aa3668a35f3037504a5e44ac7609f55376967158e6defc7ac5280a088414068c55cc990096550c8c8b8483a2938582712c4118e1cd5c87de7e0ca21d75108c2f34de51802eb32f3e29ab45c3d861b6bcb9ad48dba344038b172b92a36266efcabe7a4dce9b9ce9383e342b6c88d68e60f1fb320e0a9ae4a73255ce6f7606140937016a8d158aa8612127120f59db5e9c13ab7f539d813a69039a1f469acf3586cc957a59b822d227882cdeb2c664bc9d4c2e62be5126878b318bf6854669301a4521557c87400f9322092172e53984f06e9c92f5a024460f93a64b82ae0182d692c7f7c216f1d01e78a8e8f9ce7a6c474c5309ce44ffca052b672b535746ed3297cd5131f77f72c81b335afbe6042e78d65194d8a963bdcaee9306ea4feb470a5d5715782173955da3bd3d89248d558b8a85349425ea1a423d0a2c8855fe91e90d5679f1c7c2a1d59345491398277530d8c3023d5bd8c3023d5bb887857ab7400f2e400f0c23defb0a1045753b36304a24d2a6a176db7e361a756d749eb43a50c75be5b2151dd937c6a2461283eec830862dab501e4111b282f76b8186891f73f3b6601d900e9a0517c88cd0e6e7b5548df7591e19f256788cd4c15855fc81793b8aabc3112891061121d6be96e3787fe9d122b703838f414a6988aae75d1adf120326a3d1e5c1bac90419969fb395ad2012a4b89d2020174d908ed95ab72a5ed29cf4e56099ef59bbf31c5a89185cfdd7a5bff4044c8ac62356af448b88eee05538220e3b756734c2ccb3b05a1da6f889c14fe9d898c8e2faf35fc10259f156ab4935418a774ebe7b435aaf3bac8c2b12abaaad8d0cc8469e7af7196e2773e36656f034041af41ac00131cd3ab1aeb6dd7d5c8bebe9bc04b8ddc0ef65f2e64e14e350571867c78c00fae8b7de4d6e4c47bada5036a0e2f9891885da83e33dc7fcae03fff49ed5c0fe65dfadfc6c437f1bcd28a6f83ba85929847adb8833dcf1f26ab1cdcd3e37849481b9c43bceaff837a6f1ddf95aa4d98e29905f61bfc8e6863e5b1d936cdcaff4d1e4c8180258714116d28f54e304761a154f58231229a98171cd5836f169130161a2c31796fb4a22069986a4b3328d9ea17513aa23efecdc37d43847fb99fc1f08fe56c208739306835589fb88fbd8c109c5e7d88b15ae881b1c569ae6ca0215209ca228a161b20cec5dbaeaeb2e72a11ce530411f9f6e1fa69248d0bc9fe369ee7b27d471f367624a31e2255befd68c2b4bd4e437ee7dfd9a51a302df6ec9c5dd4da00ecda15a5f947074f24a4044158fecca5879f87e31b49bfc4630e808a0cb0926ffe518d35cca95e1b8e5f35f316d5fa286870788364de120e0b2862c72b0f226e77b8ed7d8dd4a7ab7fb89ee9bdc57306db6ba4ffa324ac40f6040c801ed7b2bafee2adadc02dd872bdc120c9284288260c8c60cd89d8e0d2ee4b5d07255dcbc4a0c024ffb010bc535f1f951bd8b5576aa54101dda999053a7cb14bb48f7d2783cd5dcea665e9a55f03dc3f4ea79974138cb399b8c9cbb72ce53aa1f2ae13c14bf2963925539abc4b763592fa2a066280e8710d74faedfdb4ac561a6dd4b16c0accd6c4c749860adc9eb2ac599779195bc7632074dfd2c69093b2d4419ae8fc92b388d0b19a8b0a6c6e007c5494f72019b0a8401d78ab384b2217ab0ff7da50a6b12e1fdd3ee5c52bc3237407bd115209a8915f3f2b9de48c5507121cfc29879302a2bf891dd83e678fda8004cecdc1eeb520bfd6a0e875094f90a2f4ed2c21e041448ee88b72a39b4bc00a92d3cbb38bcfddbbdabe03cf79affb1d4205655c2ff585d0487afdb68bbe254251380b153531c6d4651b66d792ec0cbeb42a3628e32a7529e190eaee817a82a218d2aa50a9952915d3a700b29302be2851096b0a7e14f86d46b83b241d45cef5f20fc310561bc63a63652606ed94e7e96452c403369a89170ee48cd6bb2ad9242e2b9c88f7483b4204a8f44962230df1a03bc4e74f9ce3458c244847ac2c15205be09240d96785d49398d065e1e418115b7e5ad7f4dc8bfc9b509e17ce760c08188180e777e0e8087d0152d6ea4078873da5f6810f1d848a65757d206294ea0118f0555503fba8b5775587214bfef929c30f2e5e5443603965c014954af90c350a5d7135f9dc659b83fd2f934a364d7b0d4317dac008267e659c47c28bd4b5365ad5ba21267e5feee4e415462927cf43c1f41b14b5a8b9023b6b6ab5669b7dfe8884cd1f7abdd36edcca9331482ebe249a5ccb72620713ec61105890174926fd172ac78e2101f5ba363806157c95dedeae13419d1d415e579b5b49f64e82304de60f5caf1d7407b001555630666874885d5fa73185b5550555c0bbb3fbb5c32253ac99535a4387ad76378180942780e83667484a4a0b154624b8675da9e147f7c6e209f14c7a6e7135326d11956e1cd2a133fc92543406be65063a595c8ab2dd6dab3dba99439d97b8e94f95f3491763519004dc977194aaa057bb7c8ca03004b4dba80fcee1f4603b8d085ad5368d69a3cdbed209dc47b18b34227e1a7a2daf72c2d501fd6f26156078c2e99edef7fd4d3a7bae88ddf86b6366760b8d09ab13f8255f0995ca54db47c45aecc435632574a4a6268132c7ed795880c789585768c2ef0b1c44598ad5fd276db1113719c392c3c733c54a03fc25c184352a14eb01739ce2bfa04e6176890f0755c5214352859a062a715b85025deba8e8fbce8acbee0eca2199b34897553cca3bb8e557d3eb1e7e19562d0482a06b16414c7ac1134f1561c39223a8baee0362580d381df23fc45d30b0cdcb22037ecc2c8dc48c01c72cd30c27f6501f215fc3ce212033d4b0fb9d19f0a7608592804a97d72a71024b49c307dbea98b7009930ced10dc0d6102904c570e5e268302facdf9da44ca4cb7f49e13fae80d0bb4efbe32eac21cf5420fba06b6b3908b04341bf3dd7a4671f16a6d5abc78a3157c5cf525f8c2493697818b1424d10d404e750720d55cbb5878bc914e98fd863f7f66626b868e550e4a8f153e34f6ff7e5f45530c0b24b773c2694592386c97d8cd7907885205061770f4b8132b10e028c2855772be81458242353a58dd9d123ba9dfaa44b594bc9389a8d6b2b4094f515add11e59ad06d1929e42ae2b6995f66b90c2866444d15de69c0a1b1de24afccc0a6b48277207d5c232999ea5d6e9bdeda802b6e8a3985929259e6acd30d3d22eb51d5c7a223be78d88a4279d5a47d4db54880ab680fb3c42d69a57c2d33e7dbd4424c51e9a6466520501e08dbd28add8e3a1dc941d7bef90c3ef51e971cd91c84666a8212ca6d9027855bdc31726447940bf65899a5d0be6394b5a0b06029b1236a00064c63587d8622d22482fc5b319758ae6ef73d9820152d320eb753cfe41442bcbaf0ca76d3e434bee90386a6f91ac733c460236c934eb91d2b716689815cd7b26e0bbcd3b52c2cfb7599565ab8961dcbde8cbdf20a6699d8f042afc5ac5bc0682c50ae998c68b533d43922198e7b694e05a58d9740fc68a8cd0b5e870647fc3ac62428c2d14e2c3ac0ea2a548ff1919da897189a25bd5570860b7acb57ebd86f6605f9ebae7ea767f9a6966d454f5e6e9db19b0ac9451dcdabb46f66145ec2f3c9be6ff777467c57cd4a7798ff42e7fc365ee6b18c82005a10871694bb0551bc7b4a931892200d1596d585bfc5aa094f805d2cd4982ed68a5909481be2d38aea844f0031d04f44756945627241d42d8c88f1d53fcf73b93ea75a8d4e29a9581e3818d63b200118bca383a648ecb4adfc30877220b31317ed54afb01fe0b1bccfd1ec4ac5842118473bc678a3ff504f2ee4a74aa772a5ee586796a5c4b336bad19b5c64372a2e4ea025740859f06f219abfd2237c5fe36996f244263605f09aa3739b2fead81fcfa3dcce91beb56b7718c7321a8a084c370a5ed047ef005e909919963caa899f0aba268b5a01559ecb41f4b11c98db3e6bcb11ae9adfc7ab1c9d237c58777f38fb53cd62c4450a6cab6570ad8c79bec9fa0bceeb3f0997319d1a344039511216417427fa42102f13eebf92da4a9bf1e1025acde293bafef770497c91392cbcc070486fd899097e0e04a1f219c65b426456feacd33ed4010ecdfc839afbe40f400cc4a791a8d4d9b2c453b1d0ed143e2f7a8895fa68120d1d29434cc2258e9604ae062f37190b43e36db16840e83afb9df06aec4c261f2bc39f9cb64930368db9968a43fe1ea7a729047e8fd8a8789ddfa586d3a8336cee6b69e314fa4e925a4159c0524612ad3d65c1822cc3fcf7b1cd3f9100aaa66b5d6523360d3e3ee7ddd4bad148ef5ee9f4cc9b322657982be58e5a5000d23bb64356edc1574558814349509a0e22cdf352c810255d4519e62b0d758362081695665eefee64228408e72b6312443312079ef5ba8432decd329553759bd07cc529a5eae6481b2c20f69fb3b665fdeaec2bc36a7400508ee6889ca9cb1726cad9f82594eb5913a78eb768e72aa5dfe998886bcc8c520d8bd8a88bf0d8d20c25dc5a733ace52d5505afdf676318716cd44459bb0ef3a70b061bc46cdeb79bd01e0d9866f0b39281b5cbcad795d94514f5ca228c948f70db02bfe9c67b81b95f240a8825c51b5df42a2d36d223ba3f9eafed274a536f49d024e54443d0b3640b0a6d0013c0c3c0c3c0c3c0c3c8cdfda7db3334b424a724b0ecf352e2371f45c4a49a62453ea01e7d8dfc65d23f6139dd87750d105120b4d0b3b0b979f58dc9432a23434714c62ea793f3739a491895390b376496e92826cee6860e2a0929ee79a762929c9d9e280c6258e7f2aaf9b99f989ad29cb6147c312c7682bf236c99c32b312072b2989419b98d24eec2971363fb1cbf4f5bdc8486312278b414933ee61f2844912e712c37cbe2889c4f146bffe49d950332f240e26ae4663f8082d571f713eb1945c89160d471cde04e9252f591adb984623dcd4e55e9aa667c431c68492bbaef4d87c110769d59b478813451cab7466840593449cc643a5709659ee2242c4415474eb89365e7ff621b858bf979ae4d81027cd948491637b5fa22bc4498610c2a452ca849bac123408718ef1ba744998123d4c833899586a6c5cb4823866af959b462daf7c204eda366ce92d1310c7f4d7ba5779e2768f79a0f187538ca14ac64942658a314fa0e187d389d1944cc2af82b07c1f8e6b26e575b58a679bc28763860aa129477b38596bfadde87de2f689c13ca0a187835e74cf19b50c17f23c1c4f0cf69fa5b615b6e3e12077835cb2cd1d8e7f59bf7362e869cded7036e9368d8a4993682ed5e12435b364f0f65f9d133a1cb4e952e2f958dee77012453643d706c9e16c7179c3791c879318ad2e6fd01bb4b586c349beed9fc9fd460fbde118449d92dacab42425bbe1f45bd29affbf88d56c1b4ee2bbeec93fa37f31ca86a39d98477e943a21d3b986b3d86a75a6b9a8e160a3fd44ec2a6938496ef516636349e6a3e168725eae5f49f90fcf7092f7cdd39643d85a678653eadca6d8e695e4caca70d4a0c56be42a329c4ed0fc975df6319ce44c4a2835cda2254562382949933ed5379d29bb1cd008c341ee9c64725a1e0c27e99f7382924a4c17fe2f1c6c83aefeeede0ba710427ec591ab0e3fe9c2612eef588ae11566e5c2e90479f94e3e4972db942d9ccdb73ef3c347f4bf32c8d0c2299560925ea8f1b752928553599718424923c46966031a58386df48faf245738e6d7ecdb125182f4da0ac760322b76255872135385e3570972b25e655e0b8ea851e1389671e4da9794dbf5b21b616820d30734a660383a40430a27d91d19639bbd9b5665018d289ca412dd922cbaef4db95038870ca5e468067172deecf1830c1a4f388e127facffa432193c16030d271cf62a64ee6c968a7ad28463ceb649b265ba0ff531e1fc1a3349c24bc7efaae440e211060e1c377019a481c6124ee276fa2f2a5352df2be1b8499d7fdfea08697b120e4ace2446a833932c9320e1a831e9aa135350ba24f511ce257d26d164c83eb1c44638e5e649b316939e49e1221c2dcc4932c42d98184e229cafe54491196ade34398463fe36cb266713c2e97236def2e95b511284b387f8d094a374a70a8483d0d46725c88911df0f0e3e9724953ba3e183d309ea3bfdc47c559a62e006183b98d18bbc34c59d686ef1e218b27ef3a2b1c410c1b801c60d66ece23442647c89b1f717e21066e8e2585a752fe6d5b8a736337271aa133bdd52b198470517e7b50a5a830c3d262d734498718b53d0173c33c5d509d1dbe2287ff94dce74e7dea3f7400ba3065d02bb1a748fc22158418e0fcca885f2b33a3a67333976f040333d98418b73bbd58976bf5a424fb338e5107929a7a5ae2596c5712b777f668d17e3f4589c64cc26dd246b499292e43a7a8481b0389c3ea9c49ce65fd6e82b4eea4ed7e5a9f8889a9d8f19ae388ca83357bde07341dd8ac3e912f584f293e52565c70c569cae42c90e0bf67f5959c549ce944a926933236aaa388c7cb14d82b852a6a6e2ac15b337bc7f8a9d507130c16393f8cbab1cfd290eeb15bd8412839292f403678a534a16c32e979284df121be8782ec57943739aeaa928f73d294ef1e545b7b5bae41c85b12fc98bf93413c5e14a5ccbbf74cbb47e284e42086b9337285d721e3974043340c1fa499374c95675750eee199f38c8f42deb660f1e3b74886086278ca752ac2495954cea44698390e1f7199c386df586d2e8a58493db91199bb82b8612c4590a8b6b2251b521c4a82d1307f950213575d3659838dab95e36cd277659b9c43188d7fd7dd11035269638698dfba2bf412e2e5f89c37d493fd394dc24497044cdc7ba6006254eb3bf974eb62b59d2a86c1247ad0d9e3198269d412389937cb92e9bbe93fc4a1889c3b5a674217a29ea0924cea15164967ec9be6df8889394344c9756e8d3e43be2fc268467be0d6ac4d92f337626691d23ce1a73c53b938b385ac6bd8d15fd5afd55c42543a9c5aa3549c46983cf6faed589e984117114bfb7112bf1d486cc214ee944bee532dd0a726488938ac9d4d2955988e3fa98ee55ddc6dfde83470edfa1438c1d3a9010873729175e94580671be132a2374f6eeb762411ccf94c50813be84f92810c74b62570c3ad46b6e538e1d3ef223c7cd00c4d9b469aeb4db8ea8d5a07b1433c18c3f1cbf922484f01f3dcbc0dd07fae198b29968fe1b9fe9ef0dcce8c3514ef49eaa1294d49234a296a306bee36ad025305782197c388aaa60b28c3ec1b265c61ece2952bc7545ce46fc33f47012524bc593dd16efe433f2701293be9d86ae2c1bf5197838a705ddfb3f1a4de5b747ce3830e30e871d1573995ff74e663e00831976389ae6de3d79a2da359f518773891ad7e46a5e9ea6eb6e84a1816fff40961f33e870329d79e49cb4952a26cd98c35183f44c616230d1c2b5a3cc90c329a824f2f64ed2d6a3b21e3c7eecf6f01f3a300333e270cdd8b7b670389912574388cd1a66bce1dcefdb59394ed2e3a53d61861bce176cdfb3726511178da855d98c369c2c7f946a57362defd970cebeb4f944680d8663861a4e326ab615eb509683e13a7a84813687196938c9a541c5cd5a29ab190d27e525e6fbc7a4f605679ce118e4555349f9554bccce30c349098bd14dad9fe612ce2843e95ab4284177f74f06e3ad578468d124fba1638c20cc188369b32a975b0c8b8a8173dfcd07560733c270aa929bfeede563c2196038891e4de65d172f5a72c617ce27a950a559e385e3574939494ed6d3c38c2e9cb2426c08f37ecf1b6770e1a0514e50497783aa351951cb513c31630be78c31a937ff6644ad52f70823ed28e3bd0437c0b801c68d1eee19b0195a38584ce2e51325c9b0d5ccc8c249b869851f1d9a195838254b622e9afc47d4c6e0d103a51c37e81186084a8003c75916665ce1784209e659629edc57ba47191d8219563878e5c93ef91b1d5667156654e16852927f62ab269a34e2c0910685195470a38c16214fe9499a423a546e2a8dffad91023af3af5850d69b3e2bcc8882617eb38896fbda2843a1df20fab5549c493561c613acd2703188a5149a5953a8de69b6962588194e3849929f8ca741dd2309339a70f82f25f8a85132e1a8ad279cb293da5ab7259cc5c4944eafd204a1d2cebfc47d5ff624092769f46dd2994ecdc68c84c325e937c54b594908a1231cf496983fb615ae1b34c2c1c49d5f0d2ff1e4ae453828b14c4fc3888970cc12afdfcdd4284d0fe1943a4ab42427936b2684a35dd8b13bd3a5ad0ec2498c3a352a5c09ff6006100e8d29fb6ab696e0a31fac1eb2f19a61860f2a4986e56ed40c05e8c549c985d7ea2b7b5126bc389d987459ecb4d899510176713a3156aaf04bf927bdba38c6ccc8865993a6f4a963a406397ef8d8a1c32e51010870d4a000b938f569baebad5efec87171f0be8c9e5d792c8bd22dce2347a9b3bc0b6dc26a8b830891fd55b2415fb702d4e228226b4e53d25af7cd0a0a400b3ced62524af212cd32dbe307195228c02c4edb6f29289da5a465e934060590c5262c66a8646eb39758a8ab23b533bb8776444d4d0f0a000bc3c1a300af3829e9b5620c931e1b972b0c0713a100ad28002b8e2987c566523ddb1eade2b4716d4d2e2e89ddb12a4ef1e226399ba3e4089531c61ea100a93866d70a977f458e3231a8389a50533954b632cdc929ce973bb64c9062ed5f39a100a638b7bb68c915e36a4aca48978408e2247f5bc5e6eb1d3a60d0a3471203c60e0edc00c33d033cc2b801c60d1f1670cf009610ac2087009410128853484d7296b4101773e41942007112b3dc9c85df60716232e40f87d731ddae3f77ba8446d472e428a50c217e38a694dc2ff76817f3b410d287832ab952e4e94deb22b6e3c3d9b5bd534b4596b43ac71e0e72662ddbfc2f88103d1c73ce4c729bab9246cfc36984f6bff892848793be7c714426794734bdf687903b1c94fdf6695c7ac68d1d4ed9a1beef8230a40e079992f0a75dfbf575a943081db6202c36aeb6b8a63715217338756508575306090f319085c8e16cf2fc6430b9c4768590389cf5cc524aa325e56317110287935ca34de593c3b2c738a266831ee3876bc0072242de703c61f51b5c93d01fb78709c01b42dc7092d56d2c653e493c31d68683166917fe64f7d831b94184b0e16ca3f6e29da0adb91d00cc10b286d3fa9c8ceecdcdf6550944881ace275c4c5a4d7e3d258f1f068c1b1cc081a3478b5146084400061837788491af0849c349badd11fbd39524137d313c4b081a0c070a39c3a95d2e2cf8260bd973f8701f3e72240ef80fef91fcf01c56a4106286938cf7a5c41c27ee3df621a40cc7ecfc8edd8b3bf94c0ee0c08103478e588490e118fc44ad8ec6bd30a5319c629de8a2f6f5ffee440c07b129fb82b818bf360ac3d98431ad1bd54f545843c070f6ffcd3b69c5f594d0178ea3e5449384fb5971f9bc70ce603a43cc25bb70ca50e29b98294e5bfd215c38a8db9482923516b285a3c5d25a954aaafc6f8521440b0721cb84fccf88e675b370caa0c4495a5a26aa5823f04208164e55569684d332f7357121e40ac7d42688d8dfd60ac768a94b8c6ab40a274ff72f59dde493926495e00618378a043746f0811b629031860e63215438a6e68cd9215f52844ce19cfd269a65b84011228583ca21644afea6c22a33a26696810b89c28540e1b0592658653ae924429e7090717467bf690b1b5b10e284732949df429c24138434e1b82a9a4a7a0b9dd9472b8430e1b849845566bc24f534860859c2495295f425e9f4c4a8b7e4184294707ecb4aaaefdd6a991292849324b37d5a3619241c568445519996239cb2ce850affde17428c70b6602364bad8b020a40827f9db4c924ada5421840827a1c4ca262d49c28413154286700a27871a1b6d4adbe9236abc234408214138d70927a364654d551523040807bd7137282b9919c2726f08f9c179e6c5c48a59c94e0a317910e283533e53e7e3276df9264dce01a417e7cf7f0d2adaab415b9217a72a251f328a7a2f2905db00b28b93ccacbc2ba784c9751c407471f612fd33a924a1de3a482e0e7a3f9a549e982cc606828bb36c9fc88fd8b09c1709406e7196d1385719d4d9670c882d8e6be5a9765a07a9c5296f854da29a7d93201d51cbc123951980d022310899e2040fe935cee21c4abeb86f72eb5b09fe304b1e20b2384932a878446f84a10131c468306e84f181911f3a5200128b93a8311b4cca098b639f893f66c13b1ff60690579c45ac64a94a21636d8c2b8e9ac34dd5494997c9122b80b4e2a084baa765bb9c60312b4ec2377f6564688c6f0f026415c7db978d76416cd42f2d7f0051c5b136e8deec88555271944d767625a395829211b56230e86181911f3a52b043070c74f41001af00041527156b64c6744d5aba8fe8097688e02ced28630c1d1c0390539c54e55035c944173b494c7158b5ab91b1f4864a651a404a712c49657ca816cdb6162505bff143e50b130d183c7e0707ba00328ae36b4e8b5352a8cfbb9c0144143a0009c5292cb8c5906695f48907028af389498bd698aaf7892499d51a26698962a91f3e76e828f703104f1477a2a2e5a12bb8236a3f748ca1430c3510dc00230537c2f8c08d528c8174e2e4256d595e1bad0184134755b91835172753af5e00d9c4295cca7a7a6397d476324d9cbd72fbe8c96413403271520d63e2090d260e3298c9aaa3bc33d606728973a6d09542cba8c918648973665e3a517493142aacc4955258b2d8943ae6cd434bfe36f165553e54cef86a004289f365cbdf716f2a9bc429cd5aecdd51aa9298d3c0f146181ad031068f1d371083c708dc33b0e7230320923849f2c49b58b934cf71a043bf001289e3ca5d12db6e47b4c5769cc07f74074020714a59369d25495b4619e5b07cc4d194fa2cd9d64c501a88234e9f392b9e9a3709208d38a5aeb10fa94950529a11c73e498bcc9976110771319d1a97d308208a389dbaaf919333efde8938bdba8b18134c0ad53f214010712cfbaccbf7df2a227488a37ea5de5ce63d635a431c4b54cbba71a62475152c810029c4492e799794521f21ce9a9a4fb8112a437316a369c0a30719641435800ce294562d2cc58908e5b7208e9ba133a2977ec7440371da6b8b333742fccdc6069e769cdd010410a760328556db3517d383c17a00f9c349b09c7117b2b44ae47600f1c3d173a4965d9e3c4a2999e006183870e0c07196438d0f01d287e3f965b4244fc6acc83870a08f1feec347973a80f0e19c21665183b8a0641161a0c3475601640f273f498834a924d92d623d1cde74c55872e96a13d3121c1d40f270de4dd7123345450c207838f67be8f82d490cd3271840ee70d6d9a4a2572e9b30b69501c40ee70d966d2da874fae44f05903a1c2e366a65e5cdfacb8da8e5c82780d0e1e8154cc9a4f5659210871340e67092044bb9d08d8da8d140c71140e470fe362549312396bb4f4b0089c3299e94cb73e9672b1240e0704a5a339d4a92fc1b4e61d46bd7bac363476e38cca971ff525b1356d486f34835414e5f98177db3e13c1bc65daec33493bd8673e5d5a5384956c3e1376bd2d0db27a846d370aa9355b7d35a56e4060d472f55756a41e68c3acf709271a28b491bcd7050139e65cafb329494e1e0a271ef44d1797f92e1a4bccda42acba7db311cbfe48cabf29db6b1a30188184e96e7d5ae73b35be6c370f8caa65b326949ff1d0c271d26b309c24ef469fc0ba7542dfa7aae57fde25e38fc96e8eac68b652675e194d794b4997da3ae252e1cb4c589f15a42c9f8c916ce9b525af432afee4ca285d3bfd9c8b552926c1c65e1f0756dcae384b0709e134aca994992e53b7485c325157ed3b292f8695be198b4e7e4cb262ec8acab70b4dcfe37df91e1aba6c249c8ec8e6bcafe25e9148e3e52d36a848a495251291cb36b53926bd3289c949e55f3711f179942e1682974896661a455de3ce1a06e9485a668bd76a6134e2a65f253e363134e95c45d7ff9ad561a99703c75269c6ecbc91bc4251c4459d0352555ab9d14251c93f80e55b97ca7c59284c3aaa6a0728957d738124ec9bafacced231ccd4b1052b3093ab3c508678d233c46f684915b114e3267cc8f2ab55c2923c2a95438d96eb3408670d2fb53fa4ba6a494aa8c01228493ec6c3bd19a830148104e3a476c63ec53265534a20d0284a38990ad416ec818def4838376b90deacfa4ec1381f8e0a46c55833c396f4e6869f4e2b866b71beb6450371e0d5e1c3ff4f66a7d75ee24dda0471829a0b18b2c9c5965aa7bacb6a9c566404317870f214d9d4ca67efc948ba3a6da2fd512c6c5f94b8cac5cf919748c346e71d0a3d48c8d88aa1e255b9c4bd693ac2451b97227b538d90922a3496baa2f515a9c82094a94bfb057fe178d591cbbed629dfa8a6b5a16c7e033aa4ee566aa6a62719e93fa2c73f86e8f098b93587b1aafe3bc55c5571c6dfeb269cc6ae9bb42c31567df686eb56efed79a561cc5ba439332bbfceab2e2941feab4c59fd9bc8aade234dac48af797bc638252a0a18aa3a9b1d798e4e465a9a4e2e4e773e25f0926dc8c8a837e91d14564e8f5499ee2acf9a21a2a8eb7756f8a9352e96a5a3d830aaa2fc531d3bbee9536bbe41529cea1e69a54adc995ee8ee29ce92d73a15772f5258a8312134753a8249b7472a138d6d8b949e99294428b40713eef2b99d64433d1449f3897feb656d9ef2b95d5010d4f1c6453fafea89243df6c041a9d38a5b025a951b17f42c938712e614aafc8743293a83771ccbeba09a9e6edabc87912e74aab623d324a29314be2f49ba792f5fba9d27124ce967262d21b935082742171da1d4be2eb9aba5bf611e77d6b97396d5ad3461d71d89a9197c433d327d4469ce43025a86bb6d6b01b461cc5e413ec4b83594a6216710a9644f88d65147150f1daf256626e93c9441cac2437f13443fbc524228ec1525f10b1ec8ad1fa18448d439ce4b2af5c552936c91067d1f551393119370b71ee102a9f4979d2ff9884385bc888cc2afddb3d0ee2202f575dac25b1510ae2e0e255a2ae64184d2981385cbecc68e13b56d400c429732c5e6cd396b9b1d6f8c3793ca4c5ad94b2cafb236a39f28cfdb0819715d4f0c3b943b78ffa7635fa70ee3929689162a142cfa3c7b5a0061f8e27c336ec98ead7ba20a8b187631284e54969c34e09170fd4d0c369db4e124d95ce64328b066ae4e178e9d428d962497a4e6944ade0e1a45d4e8abe1da63f7bb11a7738accdc6132f95cb34281135ec70d66c799358d91a753885ffbfb2ca1153d1840ee75f8b17b61a459fca8e329e6d50630ee7924bd8aa144c72388c9ed871b741edf72c83470d6ac4e118cdc26a1a5dc2e12493e472925552eadb1d3ac678c34197e9cd08559933ff6e38c5b8da9755cf67d335a2b6a3471928078fa434a8d18693c52e6562dc68a2b565439d39b332b2526b384951297d983e37d5ac1a8ec1b64b34316f92ad13428d349cdf2dad8932351acea5d93793b0164d9ee1a4ff37d4697eeb9d6e869398aa35dafa5acc8c1951cbc12371106a94e1945f6d94523bfdb241644057988de638078fa43fa83186f30925aafa92789eb9440c27fd5695e4a93a499b65442d078f1486ceaa3559dc7032a8c0908fc85841563458e30bc718da9d73e1bc440d79e160f1cd92e59f6c92d4236a5d38c86c93f5d4496a35642e9cbd92924a4d670955d5885a0e1e297b50630b67ddaf3c7a37a9b44d1951d3410d2d1ce3854bf1f963164e27a5afdc5c57b7760e6a60e1a02e9f50d22d778593768bc95e7194d49d8e647c50c30a07619b67c7ba1b51cb4186f720a37050a30a47d3b651132e448593852c4989269bc249f64a347529164a2e49e1b431059d9f73b7d6658ea14614ceaf6175bebaf4c68fa0705059d31a5434a9b2e8c450e30927fdd5e5be49cce65772c2c937f454aa7f089371130efa37428be8bb2499cb84c385fc56bc5439ceca259c74fba594f2a478a156c2d1c4726f893125e15c2c359270d02674c3f4e4cca2922ea88184939b7f2559724e4f291de164a743c6d126e5306a18e124be79dd8b10b727adab428d229c524d4c47e992d67f27c2d14444a970c1c4f593b11a4338d89c1e9b2bf100193d46a9218463a9adb2350d67b5e11a4148ec5ab12bcd2540386cc56ff9af669297fec1c1eb368414939fe1dc1a3e38650e9984cc6ad937687a71cc7015194446ba8cc68be3aca8c99c73f2c478ede25256a2bb7bd797ba38e90aea4dbf94942ff6b938c64bd16252169a6f755c9c749b50f25527b740d3c2523c0b6d9f235b9c2fdd8965a2788813bb166793e5b4fd69ada512438b73aa8f96bb70334d29b338c93521d79915277593450ebe88c541a85829546a3a2127068bc3d58f0aba9b4df6b45e918812ea62860ca57d7345257c6d2593fb3209ca56a4f67475c887669a15e8924ae782b2789372abe0b32dc8e8134f945561c5984c351ba6928ab734495d4a2a2b11159524a5ca1593a4ea1455dd9694265a668d294e426bf5dd52558a539a3f7fb564fff73a299075763d323a8a649e5071fe4b5414c54992c28fd8de84e2a0a1534f6a95676c0914e736252a68d77cd247f489a39b8951975eb24c903d718a5196a24b2cbd7fb29d380933b7ebdf52b696e5c4f182126e35bde4d7db4d1cc4e43596c628a91ad5c417974c9538eadfcac449f89c6c396f76f127260e6e7258d5e9f112a7ef0d1d1b2ffaf3522c71301551736a2cf867a51227a9da323a739bbe3c4a64da624ac7c618fb491c752ea39dd85dddd02471b098c72bb65b086b8b0b5f4402a573b3cd0789746cc59c4d23db233fe298ebf26405cd2d6a57471cf544153de1ecce47e50a5f34c214a7d154d7653c694664e26c64b42f16718c593746eee9aed35f11a78a37d24f584961e54fc449ae8bbc2c3e22dead2db5551de2282e27962ce233d47b439cf4a4d69cfebea54fbc1087f99c911bf93ee226c4f9c3f624715256690771aa3b3d57d3a197a28238daca58de132fe6aa1488c3893f5994204d554c9200e25cf5df3da296362bc91f6e3d2fa5de27ae2ce487f349b2963e312b9949faa0ae9598f6663e1ce498a07ab9234509427bd04ac98c17849be0ebe114fc04dd1a6f4d4911e5e1a0824a524f9ccd1356c1c3c9e337c8935bdfe178fa15542ead14b5b5039b2bd6af4e9694bdafc331578b899f4950b62b7f41878352e26d53933c879398b09bb9733bb6633914e6f409224b1451e37034d13d37b668e170aa72132c35ba6ff852bf84954cf25609b9e16849ca19279914dda1b6e1244c9d6cbd49f3051bce9b16f4db74ec7857c5b0f0c51a4e4affea884bf15ad7fe420d673f1d993339a59468d270c5d12498a445ec97341a4e82878f1825ac9b6cc9331c2eb9888b41bcc80fa1190c61e95edfbb416538cbaa785c2c0b194e6b314993b7b4f9a6319cb2626b8effba7dcd9bf085180c6d498edc8d26a5240cc70b2156dc525e4f76603888c67ca9499029c9da7de1f2dd93ab3754b495bd7032499338512fe5d1b375e1342a96b75ccd5c3809e15e32eea6b233d942627b267a2869e1207696ed628ae1fe962d193d46f9220b0791f1f22b98e82577341083870fefa169b1700ead69196d2dc9d9e80aa74c1be553d72f0595f263878e31ac70364b2273cda9fed574154a2563668450a1c231bc4628f95d392b86a670b0b496417ba752386ed21fa14ac62c6aad11b51c62f40f1e5b852fa2f0cfdf5b1295d79437f8020ac74c514c767d450ca4232923790ffdc1174f385838edfe118b79591951dbd1a30cc429f8c2095fae6dd3156dc27959c39e1c21365e3321794f869d91abe32ea1de1c26e6bf9f14957036e997b48b52a1996512ae2c25f7cdd98504334c5013db44b9bd18bb1186062e006df8e2086fbe4c934992ae67f7889a11d898efad2ca593f5f61e89a71c3b3af1c154f8a208274d6a829a8dbd6517044651f00511cef95f4aeeb194a16733a28666021c5f0ce1bc2af36bd2f6236a4987183c7cece0014617be10c271e35b123749503d2abd2f82704a82bebebb4d621e19b72b39be00c2f9945712fed54cbbf7236a37e8f1835ff0c50f4e258e12bff05b329e94e165cc80055ff8e05c61f44952e63451213482013284f4e278bd6b166a46d789bb0143082f4e31cdeefce98a6550efe214cfc47449c9b8112286e842d750cb4eb39439b3773dd4fd6fa8890e1f3b7424244272713a93afa1617c5c1cb34f0a172cc6c42bbd272bb738882e93e42825ca51d783c70f135ce0870d4ed9e224a35988e550a24df4b53809db9a36349992d6a4a010420b1e3ac08840c82c1c10228b584820041615f20a31c468305cd10a0884b0e20121abe0f13b72e820830121aa4840482a50718a901053f4304048297af88e1af8f8f1212840082972e820c3029eece0a18090512c2044143e6ee03268404828749061811e2e831e3c52d08010502487010c3a8c08847cc287277c3020a4130808e1448e04846cc2c70142349180904c848460c246c825be08b10401422ae1c375fca81142891c3ac8b040f728c3023642267192f3f449a5ae6652b6e4700d8448e29ce157b157424817f5b709227138c1eddc04d9909e494d08818499444b05a5df0c79c42928515a5b2cf44c923be2a439964f851d8534e2b4692d9a4f967872e58c38be661d37e9a4a994c4459c3ddb2f5664869bd11471cc75628497102156de449c2d66895e0b15f3531d2304111772880b3184e128049bc475d150b29de4885a08215239393d4a4ed54e46d43250821b7b2a0819c4416c5a5bbc38997e8c388c9da871599b378ecc0e361671cc64a249375af389ce28e21445da9daf8929cb96d0602311c7104b254533316d2ad1da40c479f48d12251adb38c4499cb678a2643ef1eb2bc586214ef2651121e245c58f4921ce3b96c4d45f4c09360871eccd67996166846c4503360671bcb0d911fda92283cc86208e962788106bed18360271ded4d061299f302966c4b0010846d8f8c3e1ae3685504b0736fc700c32a87f496ef7e1a0b5dd64c9371f0eef5ea53f5efd43a6d70636f6704aeac5046de9f461430f474bb2b75cc89e8763ec934606252831bd573d6ce0618b765afd95541eabcc1cc1c61d8e414fe69d89f2954c9a7318156cd8e114bd66ac4da62418e8f091c3f260a30ee751b24a92a6b2873e250e1c5fb04187737de974958da284ebce60630ea7aed392df24664ce5700ae17fe2a799ecfa31661c4eaa4992f642fe6a3ed9061c50392b6fdc282a2c031e3c46c4483aee0461e30da7bc76a61b0e4a7ec92445cb9a62fe236a660c36da700c71f9c345058c911f3a5220861816b801c68d1b60dcf86103ff71b02c6db0e1a05d94fefa4b82acc86cace1bc2bfaada4bee660430da75793c2d6e8be932ffe061b6938a96821faaa44532e97b1c1061a4e42e4d644681fa1b5bac1c6194e97e3677596d6a04b7062b06186c3c9ae6962a89db20c2ac341882ccdedffbab153186c90e1e4262c54f20afb4c63056c8ce1146477652879e33fbf180e62416bdca5d91b6987e1a0a5672efa520996c460387c965bd2a1c4ed49ef178e15e7d6dc4f975c4dbd70bedcb0be219ac208958d2e9c6c376f6e91f976e5c285e39ccc0b55252897932d9cfb4325ab4cc94d685e0bc7643ff657c2cfc2495daa2429f977f2081f0b07bb186cbd466c25bd5fe1242d84d66d3eb1c2413688d1d2ae4b219454e124b5da9aa8a1840ac716b92e76f2c3db640ae7ce946294116b92c2c952e3c7943ef597465138c8396d628d1a0ae78cfbef0ff5134eca4a5d52a26925df76c2498e0df72b41998e8e9b703afd264ebc8a19e363269c941493e34adf8ee6770907d5247d6788a897bd4a386fc9258637bf8aa2d9249c4ff8d47491e14767160907592293d0131ee1709adda3fd442b25b746385eb8f46df395b52ec2415c9297c4e2466511114e9954464bf2af7b1ac2595d7ded5d2fb9c74238ee5b3a559a47f4448370b0d8f5515fcae49c4038956e9d96d899b13ffdc1495025d637bba63caf0d1f9cccbcc226d1ecc539c734c689ba3711f2e294c4a44c459129cd5d767152c25a6a9697a42c4517870d8ba92fc86fd1cee4e29846d6dba48a312931838b83f9a5097397ebfd32b738697606edb72783db698bb37db8e5a573dd18cb5a9cff24b74ab2093d494b5a9c6b735b8ce9620997348b53ceaa46313954d819657138493a69779fb94e0c63710efdd9142d85b03805d1ec1d2a2353f45f7110cd62624f3583dab8e2b061b6a413d38ac399e5f9a6aab86461c5a92b7b9d7649a7eb2bab38f9f7655092d2a9e26cb1fb3ebf74a938ee28db1437e6f6660e1507934eb444469b6ded4e71d02a3a7231dbe44d628a63bca4497ba388cf6ba538fe6b98785dcd925a23c549e62f51d3368a63d6e952b1ffd27d2f8ad3853b15beb2f7c892509cec459d247d56a0386cb8dbe833ad749616f9c4b9c432e9ae37f8efd8229e386b4cff4d7e72892e428c8348274ee76f7ea94aef5728e5f0d1653e2c50da20c2895309a595b4a2a56f4639600232ca488e0132ca489e74132771cc4c543579a2a58906114d9cc63609215e34c693fd1944327152b13108a1e1ffeb3f260e5ef94a5692cf041d3233885ce22896c5b3e4e6d13c6f8f8343c412e72c157bfd130f4304229538267191a9f7e8e1a7042c4289499c543c292c2209c3110391488840e2441e71161fdfd465ca54d88afbc023e28863e5ba88a705cd273870300f441a7130b1d346d52875b308238e9574e38fcc380f327c780fdd80c8220efadce4d092414f7c8031802b8828e2246dae50620c26e5104a228938679fcb4932f4799738a2c6271041c4d94ecac6294934d49a50193e700422873888bf32f1aaaa1aad8fa85d08440c71d81857a9647dd7a8231d3c7e14e254fa6b2ce9498893f7d7ae8885d256ea0f880ce23866db5f493a11c4d137b4892d7fa1fbe2409c92d25a313aeb4ba60788538c3bf6793a664ea9f60f0731339b318da87e2b89f8e1984f972c1ec2ee83e1e0c389ece1647f297bb8864d440fc7b372cb241afa1a6f913c1cadeed384d91ce284d188daf3c831e2a337802a8287f3b98931954d9bec93bdc3492a5d72a53ca49f9df440c40ea7fbd7ce2cb1ea707ad7f495c424d992b0a1c349d95e36f537933775581099835136a3566a05d79896c238c6a0811862c8e168e9d2be9bbf011f37700fc4e1949d9552aed6cd90aba982081c0e6e29e54a3abbb69a7ec3593ec6b2896174c3513d44c7975e4bcb8a481bce99df62ced718af8a89b0e12408eb16d57b5ac3b162dedd3a29ac8663da4925941edd1b53af0591341c356979eca59c2af193418fc1131041c3b164daedb7653e419a233e7a037b64f4181d1039c349db645072e84ba5af34a26686f3895768b5e9de3291a1a30628528683b88ed025efaa81ef702b531021c331595752490c71b3de160a22633898e09afc3f93c60922623896bc66c1ac92c270d253825eee250b13180e266f5fe946bb19113741e40b278bc154126374f589122f1c947585fcd9ec9420d285f38dd21fbd244d64a8b8700ca7753299ac41fa7f754810d9c2317ea9be9a50f11144b470d8e82698f8b61c0b276288a143065938f6089949dc0a9555d458389a459dddab1246db39a2a603912b9ce2afea553875eac46d8563bc544a6534b19a665d238854e16ce2ea58ae8a51e1ac96dbe7478e7cec4f0491291c6f6b468653adce3c91c2495d9924ea496a49ce140a41240a470f296795d6dbc3741f50386869f6e656cd39568fa8390c9e702c31336553192689b28938e1e4966e94e46a526e4c36e1a4649b5b5c90266e4c62c2d94c7d36469df8891808224b385f9f94f9749d5896f4818812ce59b9f9cf828673209284c369ead167654ad46518092755ba466345b30df511ce7b996452a5a3ecb26c84a3eb9dac58e262dc707d14e1b027353d344f8d5022c241e993b444ce87ba4c1a511bc3b506224338a8e97b134e8a5db2288908e12826364be6b4a0747961f81009c2f92b2ca68d975485460111209c64ef8cff9cb064527644ad0c911f9caccee204add388dacd40c407a77c23bf24d37c2f4e266bf4b33b99c2cb548317c7fcd1a5279ce977d8bb388d8a8a7b69837e06378c92821aba38a8afb15dd924698f528d5c9c0c6ae0a2478d5bcca0862d4ef228f1532a0521d7e223d6e3071988a8518b53d8df684a30ab8ca225a1062dce662136b2e29aee9db11338ba4183a0ee878e1d3a769491c3d10d7a8cb3317c8c1ab338b66834a9aa6d2784892cce96abfaf554ea9b4c6371b638fe1d4a75c364ab018b538beb6831517fc53149566283ccb2385a75c541a80c7361764d6d50a9d18a831675e1d27465b62449561c83922565fe8be145a85ca8b18ab35acccb5ccc772a34559c7654ca55eedaaf50928a930575cabcb65276555071922df5242989e5250695531cc3dc49e16c839f0c15d730c5f9a4594679ddd83fb2b292851aa5386b8c7ca93f19a2f48651508314a73799776b6575dab4511c537be88fa6cdb6297e75a8218ad3269942653b31e692261d6a84e22c1aa6164bd2fea21b501c77d34952a9b6fc1aed11b51a9f38f87de55c8a3b32cb7b861a9e38e8adf8d6fb6c861a9d38eb9624eb9f60d51b4f9c38a5b497442ce9b6b6a04d9cbf7d4c8aa9356b6734711c9fd760e278795c3e13c7a094709dc124499fe531716e3fa99494943c3fd65fe2e0ab6be94556895a274b9c529998b86adf9f49528963ed8efd498bdb6e25943849d269038d6aa8849085325924120883a15020068370c4b90063130820182c2a0cc622e18038d3b73d1480035030284c38341c242012161889c4035120180a0402824028100605c2c02028440ec5909ed303fcb3fde05fb915d990e90c854c6d3639e84c3e53604bad21586865d61b0ac94842f207c8a5d85fcc8db819e4cc40c8c57574fb27442ae4f040f11ab540490cb1eb0e21691e7c24cddb1820dd678333a0012468fd1a9e45c9b217a1b5ab5da525d3f6f48b6b8d49ea58fc92244aaf973df999e661e09fb23942227075e8081cfebc502622ce6d6b5f034b8fa70e7b6a7a940b7fd2f96d3e77f4307c7a0ff4f7295fc2b129a9dac89b8533acbabff895e1d6b0fd085b2ca05568fce9a956524b2b9ee47db431fc6263c03cc733a4449463d9bd45c75bbdc441150df471896b59d672c6f980a03f38214cb679137c4e72e053937f0820dc9bbcede520d8d379e8b32a73d008321780877d01c9ba950acd41c6593b0c30271acb6a87723d982c42e7ab614d3532370f461776ceca258a90b43789a90680766a7fb93e99e64c636a3e57ca75e356979f118f0ecb96194ab49f840dca1bbffa4ae641aefe95bb2804a4242705844c6f5aaee6c54bdafe88627b162c42e31656066b80d2d10193f5a0636abb8aae096ac1fbea2404bd12efe1682b3e62aecb2e4c4ad157bf654c8b02d9622c9f6271df21c70edde6855f033d3131de0b08dde11b48ab6ca10ed03b51a601b4936ff0cae6f612fc0ac47f0362f8dc2fca7b97b8f5847abf66d996cc0feabc5f33a673dace3d89be5645cd2a119656f3dc9a366693c31b2d5a8dfc7e7d23a335b08121fbe5315f7304cc986887f3cba500c360a7201d71377559a084fe21f22de555a6b752944bc0c011f6318a977e445c4cc268bec3885d463e505525fbb2be1fc7b491487044a902882fb5dc6a9d42c4e7323924c33cf33b605b62a410534122390a268ffc6d3fe82aa79bab3d227798d74d5878bde0dba2bd651c095ef6231e6137b6758ab7b727eb99959c3c62bca8c7a1606049360dabf9e6209b1cf5455c8d29f004ae8a29f08458eecdc6a7936e731584305f5824c737bf9ed992453259cd5781b23eeaec98aa1a45a412cc66881c912fb2d50c91ad9147725599aeb7a03dda3ae054a028ba3db769203bdc3bcd3cb1024e5c1f93aaea0eaceefc7c25be8ad38d4770c59853afb0ee89973805d511b406cbfd007083da70a9e6d9fd2cb3980d9bf9d4ca7e41dbfd526de18989d3c0d3756b5418a681f985d46bd9051a3bfd4b649a7ddd213d9f1e41409f06cc2f80c246562345f5075058036603d63749fd1128f0a448497d16b0b2e4d8bcb778dd615abc72bcb040830b061658e5cc6c03f7618554bc2c65a4a5e0edc24ca69751c97a5014d63328e0b5fb9e67ab0da8faa5335585c80a49b0079ec6b1274b286e4eab0af77cd008b80d6e83e2b54794549dfca7222006de33b45f11ab79bf92fe5b5e04bf64174378261035e89f7355cf7b338e805371705c69efe89e4b1254528234663c315b34fa32a46e972c0625921299ad8c4acb1596d5e1b47eabfbd76432a0146dcb8b037c322528249476401255344d63459e22ea4153242cec96751ac7780491a5ebeb5e1fe194c898b5c4ca7a9a9736b858ba3775d51a58751be1ae19e379174514e622af2cdeb98865724280a897c48ba2ce50cf6e4a54930cc2d78ca8fb3ccdf09ebc1b025500be596860d0e124a5cab2235e7a5ce95a9762ffbc2da9265f0637efa11269625feb7a1948741949d4a15b6cf5523bede8e8616dcda18d25d1c72c37800dbc26420ad56fd23b3ec01f7052e21bad9d5361a158451d072aa5e7f8846aa631404ab4c92d65d051d1226690e3a3bc463d20ac21e80bcd2e4a5193ac513ea3f101f95e2296240fd362a79e0b7b9eb280358b913d59b04cf60d7990f2cbd04672d39308ec2cf4029637936cc2518c63c90d3f9801474416be20c38ca22416a205aa484d790e2af39ce392937a065ccbc74187384e55723a16f515be7a7fbd33fbc2d25c1d98da906f191291be8b48621571dad05fd5d00459e03c24c84762f10a4a4d882f5c3e1eaf735f1cfff786abebf8e436e1e6dfc77883db337006478756ad203dc1d0e2b95905eda40ed447237d2e08d30d44a014119dd24a804b2264fbc073749601905682280d636a0a9e753bf45cb3cefce43d52cc3fc6751317f897773588a24e7e573ccc573d4e44ca3188e617351ce3dcb4660c7504e88249cb87b25dfb22ee8f96d4e12653a8d329973ae0dc402d9d2600a7a40ed2b8bb1034aee8c3d96d6f3c622d462da7193c8716a3da915fe97b159f8d420701780ab0157d0bad98b605e472d649001a439932e1e3576fdd20b4c586a4b75e441d4285644344bd047c9a406b1e91a402bf6c624a2f99fea1eb94684071a07c887246496d08af41c6cbe299c43e94a4059cfc2fbec46e37ba68d773937d4bc6fb66c23b5eb1f73d65b93915d78f9c61352acb45d5a9475838bbd8c5b7f909fb3d7571bcc5844e7664791f4b6145c718be32b22cfd87efe01f6b217889f05e63e59072084cfddb7809a2e0dbf32afe4100cd071948e2c1637a40bcbffbdc3b1c5ca033dda5d600891ae6808f157b41efcf3bcbfc0d5c86008fcaf515ec118d52a31625837e51e0927591a584898580cf31c4f2e4f4a1ff710a2a14af5015582e8e34eb84e260598d3419a2aeae8335b854a2d516db15b03d88481087b2efe3ffe064df949875bf53a50662dbe693d49cf21b1038d3cca9e6b0a964a5abadb2be84a2d41ec2eba2dba8872ce69ef13e8eebca34f26aeb4099e6da891d1ec35e905f853973afe7b4d83c903a1e21f4db2c3e454a8f248bfa9aaaf917eb8a930c3838d33aaaf9a356e50d4e9c3a6650750791cfa646effc29a8ad6fb840e8170f6550ca38350aaf950d1e073f55cec2f8a05e5c6e6caeea1e0fd763b3c69f60f0296e61ec82deb0bcd87ed2e243fce21859eff9a32e99fc0b76e032a1771b3ad09297c73d6a05d3ca8b7e104bd6c201a7e9415221889a07b3d3f438ff9178e0acdc091e5c8fb63a170c657c260a1edf76651555416b0369451dd20c522b7ad2e065834dceaa39ef49d8c319ef3178c70fb0e0d8c3cb2422aea5a2037279c4526f568f809558068339402402904621140054286e246f7e6c7388b56ac8a1a957762915934586dcd80163c7c1079778cbb795bd84cf949cc0dace8ae12079955a8daba162f2c353df3c7be4fb80be1f9655b4ce977c761a79175c35b9f2a8774f353865b180584c9625eb3011c9f47249232f9071bee772047240862ddd8a153c049c3918dbeeafb275081ac1cae0d0f3ce1b3cce8e49b48c7cb4b218cf57be23dba4dcbbb944f0089517357dd114efe5635043a2a881a13199729ab000c9ab6add337571eba2aa5ba72edbba5c7521ea164e97aa2e99baf874abaa8b5317f7ea66dba3695d69d6c5a34b3674f1bc8349edff48395dea38ca31044ce530f1fe527b406dcf9dc3fb2d12ef4d5de2daa5cf5b5a77ebb3f5e2ab98bf84484cd082088ec599b30366a4c0b4b2a3bc7d585f81e52b0760c55bdb230c925aa33dc8c129fb5d73eaf6b7b6b887008c0af527f0984f408e0536e7b15e94250b98a179084fad1b8536b9c71fd1469403821860c7d93c3e309ef87e28013b1339caa526587916231756d87d3329db624bab9216c27648e1b748eeced10621ddd0df1a741345270f7e2c46f05f0239a2253297d2f95a450777f0dd543eeab1471f85c5b923ac048eb7bad835b3d8e4daa6d9b8d130ad0d51b58cc6ef34d3172d12d080abb87ee0f5333fadf43e0a64a591438326eff881b26bba8aa7ee76fd4123b271863e5921c2b7f44e443d2cecc54f88428b0dad1d6d36c50d7db2c2ebcde6c24e17b355c642753a934105a9c75cfb5ee12efb083a2643610b8b383f874a059fded699c7909b86880a82f631b6b3b1b6d9992c89c91bfc1d8c3567ac1c9d712116523ea1219f2534fe85b33aa523b30045ef0705d897fa3fa7e3adb0472e2155aa230ab8238b1afd59c08201e18ccc46760ab8d814f67b5820a617a2c2ca981220b07d8785bad44961963dcaa58eb38054c40634d7f806a88fb006dbef94cab85f15f7d0bd63b6193cec6f1d853a6af5889e10165c058565339580d1fd9c668d71e041a264b27a05ac2a92fff669a255a1837006691784a3214d1141311c92262c77ef7953145e57baf98bb378e0eacbd48f8bec435198b307e736c771d110e0573eebce11d6874272b4d19128267ad95bc40025952e33c46a844287771f99eae04c613d538a62037806bcb94089dbd74cad9d1a76dbfca9427467ab22def00990d798c6f566fe9f74f42276ec2a5d6ba3fc0b5719fd83aa97d13790a1153cb000dd003405da2690e15dd8440cafc27b500713be0e1249409404320f0d4cb66fa0826f53c6ba3e2904324947600876019707f86deba972459237dd92830432cbe4ffdcc3c9e6a029700dd160a0ab4050ac9208f74d0180f5de0904219a4fab8c30f438c5d1bd5757428f3e4e4f00339cdda29beebf458401503b6fdac5c00f928e3c9ba9d1c60f16db8032ff282d21df7fae488bfffdd893788a3570bc57a3549b1a4becac015523480d6a1acd6de06285cf1a6e5fa7ce11344a2940e092031a25d5d52883a686b6061701daebe3e0eb61626994cc3d14783b2d66cc191988a252cedfa66c67fabf2543508d03d121402480259aae947bccc33d1c7b8ddeac8fcc06d52b349aa75a0bb5a76505df16b4130f4089592a4f86a767032f6065be94cf86b85157753585447aa469f49cc1953674cb811d237a771885c113e0196aad3094990aee46a2830611358500e264ac3cf50c35247f68d26cc1d422336cd2f4f627a5f92b25374073f07140230da5aad1092ca917fae079cfd0d50091cc017733f9bb16ad7473847d1a6af5b7e765da30ebb27dda269236781854cd406f0364374f370c5ccbf8ba033d841e879a8114fea902924b07dacc4e960dcc6c941875a319be324032373fc9d1845d3ff618b6dc96060f7e3ab43a1f106450ad05dbc557c8504b9e21ee69fbf4852a15030044f87f6ef90e0f133b6838686e2a89625f7a0d3376b6ce8d9cff3e260b635d88650121623bfc732219fd253f7a7a92430726c9b47785f283e790eda8817f80e35d2e6b4d3104f33cdea8dc595e7952e39d65aca8d58a2adc84aae3d5353b3195cd508c70ab5866d0dd1a6abc931230a19ef526a9a2bcedd7cec070471ae96f6b9b637002fe442042d03f5fdaafbdb3051d4269e02e63b921d91721590e2ae0c33e4cb6c46d7a101aae9f580b6b26b5d5c89d2d9fb55353c6bafef38264b6c76533599d22598b997803af9f9ec8589673adf533d36cd8c1c0085df39def9f8a540f748cdbb39b8c2ae2fddb296e4b7b012c0119e3c82dddec1ce279c690d7a3b1ccd9af136896672e2c817df61f2783c841ffa73ff10767ea642f801e676e9b27b72389e63154933b87eaaa0ee35f17f74ebce7f1a3fbf978a6108478459fbaa4c0790e08ae791efacd77f91c169e2e3d0ce87cb3b360f012cef74adf32226df9d5b6257cd7c601df06912cd5bfaffd36746e2376d80e2a36c285c046435d77a70abdfdc2e934ba1040e098ee64debebb35941bee3a154e9ac23f48aa28aa48959d920a779d8c28cbeeda52228ea789becc28ecbe9971508d0f44a4214ba3f0ba255003603e70883ee40096044a8771be677c1f408f7fc34205ba3bbe9545c9597a8b67c1ac4cd84063a80fb165f28e5a706144b4ba5e762366c5731e27c6b3ddc49123340c7220ea6ac679e64d72d669888af729ec6394fa202855335b8966b7f391106924d800016ff9d1084a09b14c7323479959eed6acf5445b5824ce2b3e047b0b67bc5dbb6c722e700ac7e5ed2eeabd40c61b989068ddfbb87e499bb4c3699f5039b82c89466e49b58dd879d388bc92f41e231eee416a4df634dd05843606ccbabec96dc538293ae6f25539376ca64a4641acc93715bdc00717055f85824aba56aa551d50d2c54a9b7a2a61072f9a22189849f1d04543699ce782a294340ca6f87fb027d19fa48c8def2cbef297d8b4b44542b2ce433c28eeb8b9283d82aca9a30cb95c91d47b7762ea88a94b7377eb0639b8122350bc51f23fd6eb664ab68390f946c7a999479ea388cf1af6473da4ace638f240d35b192810c4c5a0e4e218a1129cd7380033a926672a7d674a9afb662b8f10db9fe42248ef4b09757671a1c6f089b4b3e7b7537ce440417419f939f57e429cd66312787f20404a4904dc3a73a22899c138375de426d97160eee8bea49608f69117a726d4ac5ecb712b83f119b110fdb3a66532f7a490849856e9c6746db4f61a7a71197a0b38ff8d78cb6162d0ebcbb4815e1d0f7e1313c0e1deef67de598db894563312c5c9dd616f29cd665df604eb3279596432455c03e61600795c82d8d9e5a00915384097dde15f20023ffebe9cb980bcb10bc72f207d85717afb04a238c480c0c9026291d2d43a3ccd6705f82d236ec327107ec2a87d06dc7e6f0e939f6955064dbbc53662a01b3ef286aba5a98152a4888c1fa2236c3a56aaa4ac314019c249c7e87024c39687e3759142a665d0e3701c1a15dc435049939f12652a204ed041459039d33b2c5841934b65acc451f6a0f6cb8c87e06b0804eb76952b10aca1d4dc8ce6990987645f386ae5ad683f00f64927d4103c712c6542435fb9512e2c918361e651e97aa2d6c108ed4aa25920a681c8b4dd149856c2496159316463962667045900bafa21039c184451d363e2a74a957d39b067ec42ce5406433edea60b3cc190e6046411ec7d9fcb5a92c9c8748ea87ab8ab92088946cebe03d1301bb8f8c0967a60d853669772ef2dc281998690f543fe4d800fc8c19593d06b9242157d41a424f964055dd15c391f871beaae121493b9a5888b61739daa6bc5ecba227123246d5f368172597cce72b526ca9769e0753e35da3ad8e48fb2eb274144d7df8d1cfb003a4fa7523262789f9aa88eba74668875d2d9b99487d28c6979e980b6a4d87c95471553f85501a8278de71f8192db85f00241246e34ed69939ff15310f98a5381c5e60b2061126218fe7d8ac08d593ef0545689cc97d17e4751bbd99cd2486d4a923bdfc70e019bc9740f41b51ed21e3a184e416b10d1160ab7f188e5ef59ef646a84e5b67dd87a89787cb0c7761410dccc08cccaaa7205cabaa07500c8c2ba9678d080089dfbfd360828012958a6001dd0e591ca44cc0698589532d1b1baf9d4f8918a48faa45a122705b75ab6a008cfac6a3e7da3c2cc2a56076d61a40eae50b8a22692f5fb69c25465ade76827611354c00977d0f941e40d7d985b6749aa402a5ac198c4037fcfe0e54f96ef2fdba1e8f14abac1f8e68349ad1d909a61d72c9bd1b9cc09ebd301201f952799d3a506e071c25f6f246b921180b172c0fd40f5129eef1ded59d4dd62fe7aae54960701fd33d72edd39f5ed604884cdd1386e190e1076352afcf5ae627c4f52a7b545f50ca404e47d82947f76717e4dfe6af60769ff1d09ac518e6247658403117c58f544ec8031fb0b3a08574b01f38982045154b802f2f81a495d1708d906cb53237c7700a1c4931d35046608a5fa80e532ca618478ab68a708ba8debc763f9ec69164d4a12b96d9b952f4be541c960bb1a874bf738cd8f433977785e9f758e1094894a5352ea32a0dc274291b1e56053290a51adc9db529242542baa62608a8d6d988b95a5bd8f0af7f3a7e983f572076b93c2d4c3b1707c9a2e14686b49fd1f2511038df269c585b8ea38d8b25fb435529bf775cd61c072507520dd03b6344675ce38ae69d3efb39216c43808bf6efaef6ae08e4b58f2bc2988217a837f2621b008accc145709788527ce055621d23e0fd9272d09953b3f4d488dbc86e9ae91c55c0ee01e2da904510ada9cdf00ea507893544fd2b93185653544e4ae38b5ba43a12d29b8c145c3149daff54abe87c25937bb384992222d4019a220a8338b80421240501e0a5f318fbbb728d2281745078ac3f56914c8a797d9b06c09e95dc63050bd4f52d5a61988cb45e76f73c5cb0c833f51a55c5b84897a65ccc1a667507df53b320472dfd3be17168b6599bbe7d1de90193a490811949d6060f13d666ed09adcb8bae4109e6e36b024002e77204105da85397a98560aa71c78619df08634ce982c23fdd6682ef97c20256c83e371a1ab6b3bb92f1181bfb65630e10808810bf4f064480f231aeac752aa87704686134699c6f22e19450a05687df431d970b35d36987d4e4622455f560747e2507d2fc3095c1c123c0125b8655206078a719e82d6143121b737eaf897c702f075e99ce7647fb01e4e1b413548b1c09bc923b09ed305faedec0f2e4c950d8be87900b0de453fb723ddc0427814ce7e5ff1ff60a1127154f01f252527fe1b21c3b47c23230b61861b1657d0484a09f57ee03d8e69246f98d587e02d99ea52b3e432eca13a9ff0b09ae1d66ba4d38a9e8736628a201bc4d9cec3250939fe4908353e01f283fed7ff6450abd24af4d500e1c606a9dceb95c0eb530ae79cc2322e0e9148f2a4204005903e66659a33cbd839d040fbb15babe49e5dc8e705372a29c6b49bbd2323c9ac258a6cdfed2c520e61577791788d71a2102cdac66296076625f41e9e13ed8831b4aec4aa6128a396ee55b951239952e0cc51f6eaf987575cc92f93b06b125a8afae0327cff19a8ccadcb7991e3bf0194b379723aa7bb4516026971e4fc840843c691905294a41360200a8428a9441c69d0c6b9232e4e8053dd40d0867635dc770fd922f6a82827b259f8b42b889765e786823e59f20e7477dca51c572e2703d4701a38d5a75c4cd40208e7e4e03fff3c2b609545141a84111c363d8c526a0765c9f7ebc11b4e2d0e4f822e667ed2353821db9739ebf3a978d9dbbbcfb0de9b8bb85cb44024351f08274338783f93b2833e575aa0b33c63f93dc6d2a30432e03d772c1915d89b567080994e28b3d3c24463a42344951a6bc320cf38c2ba080e6b69bb294ab0def48f4a7a69111099802922e82aa27896b667671625e6a11cefb73dbc2b4f7a5d31cc7ebe0da60eb4d00e568674d41c7be70b65b034eb9d4212830aef623ee48a91fb290ff44a195f9f573221c52de620282981205fa54acd1a1a0c393c0a96bedd57567017d94deb93152c19afccd645e33390a8a3714125c36ad49cb64439a1a2f601bbecc5d9f2f3df167ebc79c4d27615440953d05dd0e1a2a1924884924e18a6c98aa50f69596caf6fea21920145be58adcda58daff27d392f562fe1f04d58eba216385dce20aa1850f6412d27b1abeceab241d54236cc7b3ea3d1c40ec43b4a813bcc8c92812cf295028aead996958a959e311bcee8c425bb4fb2dcbce58c9b1c28184dbb0821b699e680adf1b0310618a162eee1b59275dc7188a7fd48f30047eaa05c625ce6ae2c6102b12dd71f68cc3f76d751d1705efdbd39a6e659298ddfaa1d6fdb1055a66a307119aaf1e321c75f80dd7d78b10a660275f73fc6040244f56b2547cdeaa22346a634adb7e3b3898903857e1143c78f3e543b8a35de70a4ef7ea473b25a2653ee9d5be2bfe628dcb5ce52f1078baf223d8dfb92780de9f516bf216259551459266fd9cfddce6eef0209d5f999bf34414045fb3b8a0c367adbff0595a46a19b5262b479d48605effff4cc4b391f2f0e53c39bdc837e3c396b136175ddaf5c441d88117c716f2c08e6cce5a94c9153a930badcc7074cce2e8db95011c4752a2438d99f9e3afe9ac8d97e9ff58120e7050fa2e19429fabe7a2eef4aae1d75110ea21a0c8657dd30a0cdc1d9e25e31bc5b35e5de6347409b8131cacd7e0e828b3089b650d222e59c3a1dd36a76827ca36450494888a0203abd104f5a62dd2028152fc857d6ff459a48ef84e2cf1a045da416955b4cb0c802130ca41210c62f0bd851e18280c6145b77a1184a8a8ca362b7fb8a9100547120d7b37cfc70a9c315d70a56cc6f859b28230814c2043844de83e5b38115edcb9c95ec2fea4cf0df88d4349bc1ea1b5b80f9581e3094a9cc214b6e9d1f9e8d32362ec796dd8aee1d66231bcf0e8204313624c27225095be05f0f55238b1255e63828474cfaede3c38fc4bce2a3978d5faa94adf8bb388fabb3ca4502a4057aa58993248e5fe1e2ab3b0985798f76208d41005392338cb4a5822475b60226187779125af8ff5ceea4162589a455824721c0520914802b1db03326747f12944c1c15e12e6669da067a9bb3631a46decc245c44823321176485a8428999c3fa1f45b71845b0425826ae45cc1fa8e3298e41d0b9a14a266f8e52c3fd8849d5a802bb014d00955092405720220a11e157abe0f9dc0aaa0843044e02e6023d42fa0a5856da6f6fc36840e023642f01f8e45e6627b0de193f051a09db0eb01cd29b6757eadddadf840912d41fc14f2694027d9c75c70e6b29f2d1cc899924e097ab422386a158000b01d9c36fcabc11aa489badcdea9625df3f27f999d50523f6a30457c64130de2b9b4c205341d78d83ffc0d5e862f11d2c8e2cf4abe0565dce793f1acf8e2979795af0b9e6688f70e5294751ab7a8135103180d03c436dd3d3748db6bd3a9b28ca41a2a9351c30847557f925c41894e1e30e308d6596dd07aad92e664b135c85b10a092517388174b18d93b5b321492f79c49ac4e5137a386de350ceb3451838d1006b26cd2d5c9f8b62ae3d1840cecddc9db6afe65f843df9025408247b7aad603a8c91280478b3dca920b110a33005019e2ca674f4f84d35a693f67aadb270e7ffaa49e4828cfc95ac35bef42d40ac3558c87aaee0039728e85e65a1cbeb362d7579ac0b309820323015d67798103471a301909d8166126f5d8b0c8b1c6b06d3b0004eb2620d3126bbbb288996d347573a94b3bb342d2e914b7cd25276a2e26a50b16bb583e230ec35ca4ba25cdae923c9dd0654248a92811926876ba1d885a87b5d297e9c1d7c878e7327a4f377a18b46ea4b4c32e025033a2c8487cc6dc0f097f642a465264f4ce77dcc1a13c43bfccf8d92e4cbdc858196430adcc8ad7d65a2549db022d5560f3559c227a15977625dc610eb1eaa984ede63fd13d045a59e75269580780c282325eeb5c15055810146456fe12292748118e0154e8779f1a0834e41e361130c2742bf1964323741acde202a0c06ff54e121e96f2f4bb15a1b6a8d3249212e0487d4816a0632bb7a570a9ae65d1ed0bb5e6896aca84445a0a20d15929404f50cc84ca9407c95932f098893a2a34143989534f81428977388819805ef017fa5e62ac48944981258ca915c49defc24c8768e5eae501d2d5782e8172995bb8d5653edcd0d6d1cf60ba4c006130feac5292e7236d69b61dd68de0776f6cfa0e7488138e270e66742e0549ef50388d38feab386ca96b0a265d62999f7a9e43ea7d1222dd162db9858207e1809f610774448060989563fad04a0b10b184b054a4a555c6c258203e96e53823bcca574da09bc56a11b5449751921fb1a521fc6b36a9421669976dc461b4bed1cc4423cb7613253c9e29aa5c7a64405c89e8c5762539d3174297b5324c2a831d6d1a358b02443ba3d8a275a26b3b3dd170d9616d3afdd68a8d2bf9ab32d8eafca099fe493204c32a83f9c44845c0100168912745286c0511027a940ee692df9473d76c450bc3de12bbf3c833e272289e7a0ab552b17a2d6c40a3a8368c7c5f8ca58242a7652b3f874a899a98d70cf1434982762c09c90e3ebe1de6056ea6e45a4353a4a434aecae2c9579c3c4803953ae4a5acc2a7085a5210232f0d3d511114366656c246b37a90c9b85dc6e7905272095d29769536ff99c7deaecb2adc4bd3122cdd2bb9d224221fc68d64350895cd0c5f0eea5d10a76c0e8257a077d656ed577cd64bc1e4b31b0bb831a647bad1c5be294f415c9fc56dc29594998235733e35d56e53a1b27572dfdfb7bb4270a32d89f75c721dd572f471414f047799e25eb3eb56a2cf5f112865cce57c7439ee6d4834b5fcd667dd8e5bd8871f9230cf876fe999845269696550d25ce5f9a53f5265088f01abb2967c33ae4736609673dc31f03835f7ebe83d1faf0a3d78400b7450214f846c7590e35712067b9a9479b05472b2b6dc63f3aa00225f9a772399d50c51254cf5dda54575b28306ce431d79ed70ac5addec386a70ca204a1b7c08b76b35167c716bf7da45bab520ea2fc684e4c8154482745b2c9ece650255763dc2d1d86db03a00236543ce9fc2964d26b8e9443f389c23405ea74e5a2156820f2f41d698d4bfdc39e82e2bb5075080b7cade99a75142b61f003f9bc02784faedd63042d51ff24b691c0d5206ed422f3a4918cc562b96b9f12b6a279d9c5141adf85d3cd941988c5aedc74d1c6786682ec7f8cb01fe36b5894c5ae03707267f20c0db9444ddabc297c0ac6a56c614d5005833254470a7869920108c49260ef314b95ec9ba1ced05d6335727398f85397e4007ba67e8c099f16e23d2634c1c7d2b5abc0e2bd16e932011d1fb5345147503ede590569e1c132c1e4c2b3d636897ed3ebb93ca54e8db08246603c31d273ed2bb90299bbbc3a4f081c60c8633456eab3e9b607abee5e4552b3d98b992674d39020cec3122fa3eab04df0e3217726e2d1ed075c893843824447202a28ffed809897f2f5eeb90429717a3584a76a00ec3697f43c1808fa547b2b8105b6135a82b562c819ca93b08c368fce0fa0c2ec54ee6174325d43df8e241dc48fe04a4e780303e27eb11553429110ca5556a6b4a3c7ca83b2a075f05d2fb45e1c10d7adbe0b9ce7671da5f29d473a3e160c01dcad54f71201cf1c7fc78a247722dea65d786d947472b9fe6b8587b7062de8f8857468549a1d7cf998013fc0f700c0e7c2cf0023a691a47f1eb501a27dc4060ee4f58da27cbf4115e01a408e0c0ffdc3fd08b9b767e81d376bf3cc9d26fd069aeab9a2da5775d85f7cd045d11e37b56c3b4f41b0f11a00f8d196997c2919b7d6ceee06a5c18e76994393180681e1a2db85d850afd6c1ef976431287a545afa2375d1cdf8d07fdb7aecf339c15086ff8691473983b125d2c84fd77e93a60be4f90a6577849f321467dd4275ab317924feec822d3268e7918ae58c410913ca64cb23bec3dc4dd277b231f7ef38689adf3d2c10016e09fbd0b68bff00733a4514e87c1ae154c1f6ae0fe35ba82f3301b7fb42aab967cdf6b2b524c1c90cf60e014906901342bec4e72f7660324fc305571c2a2fd23ba1f914adfcac8186b0817faca2aa5bc1f396e577841d08a5ca775d0aea640a77de9aa8e00b951747a45cf29edc3a09e32fae9ef5d8affe8046eba353b3332f48c20d8622ed18b6a4910d043ae1c33773436e75c6cba48322a8adcd29bc6a20a374f4159d9706ac133850a70beb434c4cf17a5ebd19665c2d40fda212d42b0d8bf6b15a6c4ce8bbe05f51ecf6689e1986e9a8970b4cda5660062025b01a581c0126c107220a78de642c55527429893dad01c31bef9aa2c1011439f235b7f410f6a84a0be0d3f071364dd5f6c818898cb1ce42e9a1718ac4bc895ec27a247b8c1d037b76c0742c126f45287184e71a8f4b45572f614c84efeee1bddc038d129182d770491b24e087e1f91bf933fac43996d8c5805a613dd451c220b87384dbe615abfb2c697bced58ce9a86fd9ba6a27701e2207addfaf49743280204da36b282ba6156980274d41a6230fcf800876f3db1e6e42ab06260d92b9983c58b2a6a3485483e00d8b6434b5a4faca3722a4deb5604d2c36040efcd19288bb80513e642218405da47d0f880e4813841804012603562ac64df3c45837d0ba7ec1832542d41f39aadcdf9034174ab27762638557e7905ff54e2b8c00b16a0b9c985b60a011b449ea25001a61fbe1d3d45fc2b4cdf4384a19dde61f721c481027ec7c4fa0b3160b25472ff000000000000000000046370df7e93105baf4813110e36252525d96e263ec8cc5e6ccdb6259d0e3e9d19bf20e90bff0b420ca5cbafde1d4f92982ba9602a8760daf1f3e4f02aa860f24b8516afdb6bab9c8289a75d9762554cc1599cbedce213eaa6520a362489793a4d8e762385147cccabcc1d7a1c0d7d14dce5d042ae9cfa971fa2e073149a1d4393a5a943c18d85eab837344ba6808213abae38216fe9a89fe07ee2e5903b5bf62e3dc1f644771072f8962ab89de0734d7d68ed793159cb09d6034b2fdad15e63ed2618fdfbd0153c695a5c134c0a3147c93d875b926782f7cc9d9327ec3a3c30c1486d894b7f48e19b4bb029452c6974dda03196e044226e5fd27a3f4d259890fa7274d89aa36e29c1e74eea41ca97122c3a09deaf42720965c9722709367d3fdec544828b993a079ef2a74f1b2498343169b65efdc8da2338936ca9233815bd48964356314b1ba1c7714a31542246f0bf95d1b5de52aa882e824b1e63baf193d0eb2b82eb387c435b697647223811f598e31fd19603117cf07812f3e3e475cd21b8fb0f1d274f8ed16308feee2387a4ec1e954270516a92c4a810820f731a0dae0f82abedb1cc7ead398e1404275125f87d8dc7a90c04235afb2963ce9fa20404535692e3ac1f9d5afc037fde9e35e2463bebfcc0adc715cb73ecb99d3eb0a963d3f29827bece075672e8e8d3b9072e73f81dd671eca1a4d403fb9f62ce13dc2d6b9479f83c7d2358f2e0816bfbe8771b3ea13beec07414711d4fedc044fdb3b6b0646a963af09af62cb96ebae8b174e0dad63f7a5ca92c45e7c0674844de7f88e9a71c58b74baf606b9b2b8871e0d6e2e670cb21644f100e8cc7514821f737f01944524eb7a71c7edcc059b4a8e6f6b481ffa8537a65dccbc961031f296977d68df6c95f03ebee295da6cba981ef298f34fb274e0ebf34306696fad3c4deca1f87064eaa2d7890c9638b39ee0c5ce6ccd0e1ebc731749c19b8d033ffc97721d9e6cac0e43c16c77defedd623031fab793869699289e4c6c0c6943c342b72626073cee8b6aeea38e8c3c054ea9e9c9e8e2ef460e02dd98414524537a7bfc0068fbaca33ec746b2fb0214786f2c8569b2c5de0eaf2e6c9b1c4b16db9c0e6b7f484f886ca6f0b4c9acc789d43aa18322d702185a97f6bb2ba290b7c5ce95e9b65e351322c7093bd230d39885d810b614937c72b46cb5b81bd16b3a9bc5781f73859a2e546c9be53818f2746bd983c1caf7c0adcfa9696a64cd2da2105c683444f1d34d2831c51602b6f640b968602d323551e68a60d19f3043ecca9c310d5d99346cb096cec71e9ec673581ff1c7b85609b12751613f8a06bfdd364b50b622d814b7b4bdd7a5eddb152029b3be4efcdded17227094c9494bcf63912d8641a52e4d0bee3cd8fc0a69cf08cca5ef51f46e04eb23424d3f00a1e45e0f3e728a6d289c07e943d86ac94afd36608bc84e42972d4fb1ca708810fd7bc12553343cf20b093f3fa6ff47a4b0884ffdf743ae5fc80eb8a7a59cc3b8eb6a301f880b71ca6ce2eeaffaa512ff81c1d776765d1f310e2051ff7e50b493cbb98a10b464d4342e6c8fdd4e782fdde1ad590596b37b860379996ba78a5d4985b3049630ead3c8a2db80b79371a254fceb5169c697a2c92435ae02c78efd6c9d17f103e9e2c78f70c6953a8cc7190c582cdff49eadda15dbe080e4c0b56033360c18620d963ab174b41ef57b03927f3bd9ca4ff1e734329a905c6cc156c55478de539da7bc86d61462bd860b6fbff515670a61efee48fad55f06925f65a14cbb020c1c20c55f09ddc3d456a4e2af86cd1abf42fe9c20c54b059bdd582479bdab11098710a36d35a8648c5362b1f30c3147ceb4f9ada95ca0b3f04334ac10739ec63d0efd2dbea033348c19e48508df62f5775146cca963b7a742c7ba744c15e9a46e6e42031a4f486d259604628b8af5e4ba79f4b2de46680821b4dc13d3bc7d1558813667c8217cd15628a2126695160862738df10a39b86dcdf4de90497a6bb2ab9449c6034d57b4e37f90da54dc18c4d18e57b21b222e7861219a789cb047b317e0e1dc5147c52850956a432e4477868a7df0d25f4c08c4b70abbfee361d4f08f5df5022c312dc7d7488fb95bafbe60da5a24af09342588f27f592d00a66508257c914d4bc364d7af424d894c8ba09b963f2cc4a828fb077498dda49a98d04af3ef991866f8eec15127cdcd53d82cdb1c2831c0fb225441dc15555879c36fcdc6f049bee3ccf2a454cc9638ce0cb7763688bd47d478be08269f41cb784e63854115b7b8b748798bb6b4a9cf26b8fa637aae306ef2fa8c11356136624820bd6eb714ed162c8d31500dc300311fcb75fccb14990900ebba13446192ff819cc80060739c38c43705f6f9ad2a3a3d01f6f08b603fd0cfa1ea5c398a3106cae4f1d8adf8b754c1382d58f982ffb843c557b107c47312415095a1ffb46105c87751d07c9624e936b20f88b94e3ad8fe300c1badec7815ec721fc627f6037e670b53d781cfe931f986ce9ad92c82de51b9dd107ce33ef87dcad7faa61ea60061ff828ad62679a9c57a27403dec18c3d30e6f127cdd13d6378f450ccd3360fa71fc28c3cf0b6933b8e7c73747b7130030f5c7a54c922645f91dc67dc81f7acefd0993d240633ecc0855ccf9c25adc3cc56995107bee394b4b1d355851ce5cca0036f2b3a39838aee757203628871cc600e468978b559df2dddf73490c13fe0f80f501515cc9003d7713c65eae9c3b2f3184055c693d1811971e0bb3d2b228d67fc28724309073328e305450cc000b11b33e0c0e69852634a12227e79dec05ac7fb1a6ad4add2ed0636071e46d210fbca1da50dec84f947d00fb2818d597c2f6392d6c048b290b73fc8c8219a1af8eef8b79a37c52e8f66a481519d5cf9236d0e62796e289512cc40839639476b3fee68396e30e30cfc7f143b9a8c3c29c79a19666024f5d5e3205207590c018e604619b88891e9c65a634c77cf20036b6e1a34baeba487df8c31b01f37847cd3ce10838981309811862ecc00c3169d93e6bf26e60d252235ccf8022f39faa58df956c7bdc0bee65069213a4e1e7aba5066ac7ecf53361778cfa194ae44d97bb4d90293db3f7f90f51f479ea28573bc989d267dc8e156b2c07a1cb5c4ab52bf0815b7726a677848f70abcb6b7e6ba8edc535eadc075f2d4da0fbad93e5641dffc4093c7c14fa402bb7e49345dce1418c9955d5277fe384e9f14f88e1d9bb44b150557439a140a664081b18fd22159129fc079ea3421a5a871f2e3042ef8975f0a510d9591194de052ba97d2958ace60021fd3c6f4bd694a5263dc208306312044465982e10c25cc4802b71963efbe5a688efd8f400b66208133b798bc25d847607310b929ff83724dee0da592c00c2310da43ee798eeb39c8985184fd2576e67cb9e0b801910b8e103c49c00c2230495b3fc4b052cbf50c814b9f0e5f2a7746c96184c08b7be59090bea36f4a10d8e8172564fab032c7a140e06adc7c3d57f207dcfdbea9f774ff87551e61860fd82469bab683fed8e3da0b26880721e7a05f2f39ce0bae2eb548468e79457c177c1cdcc47e4f3d3334ba60cfa3bc1ab96f3ce71427082017fca69cccfb8d229157120401e082b59d4aa97a39b7e0627d98e2648df51de56dc15b4a3efff8f18bc6bc167c547e4ae9f503cd69d3820ded1b3dabe628e70fce82fb4a2ac93b7b14da9f2cf8cf22d67a1d4da3b3fd810062c1fb78fe20b349070b2e42b64e25192e11be82bd9c5294b290f23b775cc14ab4a6fff1fb5ac1bddbe4c6cd9d156c2a73dd8b49dc8368ad8253f5eaf8b5c12644c44c00aae04d2a841c9a5aac07024805df493c9967fb1cf999a8603b2d734ec15b4a13bb26e42049b098821f0d6e21ba2b3be59482a9172db38f3cf47fac0f04400ac6ce3bc4f1e439b23e2d07021805db356d9ea38fcab3a645c18f6faafa8f261b0820146cc4d3241ee5b02e84d00a144c49e42887f0c0836c767d82cbff8165e9304a35280b08c013ace7cbdb88f9a3645b192a98010c543083188c51c6a7a0c82c5d009d6025c74a1529e528965538c167abc47cbd34a138086013eca847213dfe701080269894fb73070fb399e0268f68471d295d0e4c98002e416208c012ec7a90215da784d8966a83002ac15ece2e091d66da135b009460f3a94e90bed36c394e5e6004638c400093e0d346879b9ec70dfd32ae8c0024c1c6af08d9f26a7f8840009160234ed478392144d28f4282c934a96c9d293fec1fc1c741e26592b8bf2e410784200c077cc0012e28830c1aacc019f00f017f52af108023b894f5438ae5901ef1d208ce5a276fa5048db9bf0fbedf490a1500860fd84a39e4affeb6b6f4e905db9a43a954f3f082ad5d750f2c638c31f55df01e079a36dba40b3ecae1a78e75baa38e5c30ed1b2219dbb6266b70c159b88510dcce934ef6166c47cd39bcace9c34faa2d98cc71c7d051d73ca7b316ecc4aca91a9a2ed887b46027f5e430c6eff02a78b3e03287cc151d86081d8468c8826d8fc493073147afd8890537fea15b94ec1bcd745830112b273543fa0a36c7fa1bad7b2febd8345cc19ee565aef87b1596d756f0f541c8123f2c68facf0ace2a357ad86f5f1b7f5c05133ac61433d05005bb295e8e60a7b7a9752af8492675ede6916746a8e07284270b11346e69088d5370a939b8588a6f1e7352766898825bed09b93999a84bc8a0818fb1d5344ac1bae730a6e87b19830629f8fddeb78f8c9999e90fd01805e31ee5204aca2c7f0c698882cbfad4f57149c8aeed42c1849ad0538b1dc7131550f0314fb37ff4654a018015687c82cdedce9b1aeb402f58a0e1097e3f5cfb9f8a549ff41b4a63946162f03328df82363598c1d718687482c9fa28b4472e6a2327d8ead0937550d1b69b0a028d4df0414e1ba1e33d6f283d818626d89bfc912162a86482edeaf0d861c70ed3a22f80810a1c2081301c10c6053040031375ed491437c92fc184741c7894a886e6571ca061092e52f3f965f7a7b1a49914685482bfcf2429f7c7d714685082df0e4a55825bd6033426c1bdc71ce425296f28a5e0052aa0315a100317a8c008004aa02109563f4233a4e04682951ee9ec5972ec5ca132041a90e0d26e945d871a935afc114c8f65d647d1fb729d8ee0f55325cd39881ac1c77c7fa98e634f738f1b4a465724a0c1085e3cb7bc4573241adb19c0608c42008d4570a51e69ae8fedc30ab512682882b3dc610829da27826b0db30d1632c58f7a0ea3031508430c31b04003117cde8a9da1dc3c2f723b1a87e036969ec7bec91de6704370df52962b357ea5595e4054340ac149088b794c2f74749c109c564ea929ea791ccc63105cd0a950a6111a3a2e083ed3955587ec696f2d10ac65fa7bd4dc552e2940f0917b3dc596fec0faa7fc96a9631ed564d340c30f7c08e95eacf2e747fff4810971738a78eab9a3c6f0819b8ec53b078fcc32e5ae40630f4ce5b44cc9e1a1f3327ae04282f87be45144c87179e02fd43f07ab5481061e584939c4d4f83bba1f5981c61d58090997aceba142e8143b7011c98358879f42791849d0a8039763e4eba9a7d7ca381db86076f934accc2bca39707ae911b12a8fa02107bea3a4177375c4051a71602b85def3abf65cd963040d3830399214f3a3b3dfc047d6aa0e16573ca7981bb85037ad9e383a9ea34b048d36b0d191b6f35675ceefdb8003aa081a6c60534a0f558f9151081a6be043fff49d5163bba71c6a60ec638ca5810ff9c972e41c64c8c92a41d04003f71d7de8adebe4641242d038031369effd245be78e4a3a3370e9f34f4bc851206894810b0d5172903f27227a32709d3cfc9ee0711cbb8d63e0aaefd22efdf3030d31707f11c9ba3e870a2cf033c880fd814618f86c1fd77d7a8ddabbe9810618388f13b284cc41d6d6040f34bec0c6f78ebf3f1d739c1e59a0e105462562d0168d9efa4a078451c60b5460013ad0e802976b931b22753f083a7281d71c6bc71fc7b499b2a3e240630bbcd5ede5455bcbd9ad16b8a82f9d9bf2e7bc8fe040230baca5bbc8fb96cbee3d37d0c002ffddf519b463dae8ff0a57dc0ed1c2232d56e07a42355a5eb09b0c4b15f838565bd1e07160eff7b6810615f8d83de1252999027fd92dd4475aa4555d030d2930f173749dec1e561525a18146149868179d1cf14ef2268fe640030a4c7e9c5557dd5278ee9c81c613f8e49ebac3f441ced35771027b41828d64f6b1f3550c349ac05ae81ceb64d8c71c7a1708030d2630395965f1fe3eda8ef3028d25b03b9ab3c7d5fcacada34043097cf411f432789015638707ca302d80801862bce0cb28c3b4e0d04802e3d12bad86da8a791f3828430580a081043652c5d7cc99c390f912430c1c94a1023cd03802377e3972b38d516d3302eba1a318c273f67cc91781b5fa96accf715cb73a11f8b571f588cecf761d43e0a29f4e07a996257e84c07be0fdff1bd563fa08029ba277d22c16f37dd440603c96d0d3fd2ad19c1fb07597f5274325ed4ec3074c6994c70ca5e9051f6a9ac75e7e2e1e04c370800bbe8c09dce10b5e705143caceddb9eba2d9056b211d4535778f27d7051f63a835174cced1ad564278ce61c105b7259993dab3b760ed3ea8d8f9f9879cb505af3147c15a30fe7f79620af338cc2e2df8fbb4f3c8983c0e8dce82c929ada7477f360f4359f01f3d3a5b08dbfc10c682cb3313bff50f21f383c516d2e5c8b51cafe06e2a5a86ce925cdc7305bf97f63cdc9ed0742bb818ddc373f1d528bab18295e8d0e187d329665fabe026071e72de73b5181255301e858eafea3c52c1ae7706b5ac57314d1a157cce1e6f5aae7979879e820f3cc7518a5ae96f7b5a1c7c610a4e23a9599e4eb0f294820f838bd6646fa9a79c147c6c93424d779b46cb310a265ee5f45f3956144ce5a78fd3ce0d05173aee77f97ad6b8495030b5a956d15f73906dfb0413ffaa6d237f1ce7c87fe10936b6c58e18d3a4134c7ac4ea15bd39c1c450eeb6e2ef179be032628e6356ab9aa0fe7e3f7a1c72647c91896f27471d52321c10430c31c420e3085f60a2f4f8dfd973d091c2179760f34ff434e9225f5882ffcbe0ef6f13257c51094e274348bb8e2e2265a4055f5082b5fc3925cdc9241dd9f4c524b80ed6e1e4a02a92e032ddb23f47ea305850055f44821be98e427690a865794830b919835bf2d69ddc3f822b4b393b76ec1dc167e7dcd21cc999fa46b03173e5e8418598a3723082e91cafdd320fbe9163115cb21cea56f49c1b2cab0826e4f3942ff3da78a02682cd651d4c53e44afe8808b6ca4c9387123b04e3b17658aafa3885b40cc1c7cc0829b45ff0a013189041e80f15b4800c3cc01785e0a3f4ce347d1b73bc1382cb2d4fa331453da6ce20b80ea23f66573d8de60b82f5482594c594b60e1308f603ad8afe51fa4110104ce8a01e85d04cf6a1f2076ef7e3e8ee3c46f128e9072eef46f7c0ddc3cb8ffac0c5081a214a5aebb37ce0b234a4742947bed8c37da1872ff2c06aba6ffae8abcbe3e418442cf8020f8c4f5ecc71fcd47ea532686081174060bfb8036b1f47eed891cc33331340267c6107ae7224b1cb336491f0451df878c5e3a92431e5075fd0813defed583b42bffa44a48967458a8b2fe6c0e6ee642d31338b4a565545444444446535f14c85f0851cb8df5649a9b3a5836ff28b38b02f9dbd5fafd7d68703db5171624bc7bf21be810dc1dfa2ed468a6f6ee0a38e7437aa473d5d6de05daa35c7219a9a5a6ce026bb3d5748759da635709da30821ac420e6f3a6ae06f424a79bb59734f4e1a78bd8b9756220799f26860b7f5c3d114ad39e567604a2c640be2d152889b818be25ee31632b88729036771723ce9b5103d0a19383bcba1246d7a9bca18f830c6ebc8d1affb5831f01e591d74c68899cb30b06983f5b4f948f0110c6ccc1bcc3df6cadab9ff029b5cf42bf6876731e4bdc0d98ae618f32fbded029bc646344876aee55c6023c498fb2ced0712df0213b2e438f6e03a95365a60275d43538cf61e6a1698b452d915f3051d110b5c9e1cb24bfe10b1b42b306d3976cc1b722b7099b7aa77a3794bc4abc0ed6f8e57c7538151dd0ea2633947f353e02c789c7c6dc1bd7c29b09ff2d8c7f13da5cf11052e5f64c971fc010536c6e812d29656c71358cb243da9c427f576029f922a62214731e634810916b1ec3cf460021f5f42646aba0436bb4d8eaac7228712b8bde492979db6444c02771e4fb35fb6ad1443025f1ed55f8ebe6b5b4760ecd2f253b53c8e268711d8e8d8d378daab857c119810f5d13cce93d77222705b151d9a495dd487c07648a93df2fa384e0e21b0a1cd0e02bf1727b7c7162070aad3515beeca55fd80cdb8f53aed173ee053a6cb5cb1272fb2bde0437fba1c5fb556649b175cec3513dbac77c124f5fe280d213bae5a17ec7444dbbfd8f5489d0b6e4bfd3f050f51f76370c1bf67b8dae6dc82a98b24390e7a752fb7059f42dc831cfe69d3a3165cecae9354936f3a4d0bbe36777e94762b2f330bae634eaa21455295909105973c32d29e85d94f4c2c98dc9e1c4775f5907358b05b3eb1a7a30f7378bf82ddb5f4d8394ecb61bb829f8c5696bb3678c6b4828b90e37db81556b09edc935be6ad5cd155f015e37d94392daae066bdc404369ee0021b4e287448a89c1251a2755495b1dc31fecf185f060d62e002181c28061b4de072794cc9bdc3e2c79f1b4a6983094c88b9c394a3d42559646163097caa34d92d47da719d1b0b1b4a603c0eadfecb9724f0fb9a7d39aa70cbd2ca0612b824f5415c1f09a1c63e021f52d40e543c9cca5d0f17368cc0e5a012a4d5b42d864711f89bf441274446fa58379408ec7a640e18d82d6c0c81bfcc1ed279316d2f86182978810ac270401864d8c2861058ed0c217348aed5096c04818bc1de36fb6eb69ae80f3680c006fbd318123f9aaae7077c64aebeb776d98d6fc3075cbbbfe6f07d3dc58ae9059f2b86eb94a4bc602f772febffb80b268596ec28847c518f2ef89cbf4df4349248b072c1abe455665fcad131840b6e354b6a4e2e7e16f15bb07b51ee22e56187d9b605631f335c3a900e2b2db5e02a640d8b13a20aa8418b3ffe7966c18ea4e6405c6279c4b42c78f7a8edc3aa634ad3c5824d953ed9c69c64591158f06d9be249dba66ba8c62bf8c8b5b25727c6155c5caf3c29872934456d051bd3936af28929f99bace0377d9021620c39cc8ed558051fef7e5ff9e5fa49d40161100c84e18030c805c2704018c4026138200c5281301c1006d9430d5570ffeed1bd749a1e4bdf504a05379e2ea45e6d8f2b846e2879a2062ad8582a398eff9782e4e886528d53b07193f5c5e8b76717baa13430051f478c1af61ea68ce942458d5230b1a2c38fbc2a7fea6a908289103763a8a45b992432ca50816619458d51701beee9697fde715c51b0d933e9ee583089135c50d408051b8389774e53279a1a0656133540c17dce2c4b8d94ab43a599a8f109bea3cdafc896c5724c0794612e40060370605af01bb0440d4f70e751b49798fab24d8e6627d8ac1e43c813aaf3e300190cc840c143d4e00417fd3ba88f2ddd2823052ff80a941a9b28316a6882fdc8f2c5bff330b4d41a99e0a67537771ce858a881092e95d8e5b0b28698347943894c0a352ec1d46647e5a1e696e072e36d5c4be50da55209c624bec4f5f8410e2d093528c16fdecbdb10bc1b4a93e0cc7bd2e5ae9cf99704b7ea512c63f2b8e9b38d0497ae93fdf8f9c71a9290603aa456ba1191d8315948a8f108de62f0d698e358d555c10c62503ea89105dd18826578941b4a8de053f61cef886e9c4e0b5af0648c81031a3423785d4b265e21b4523d460c68008319a4a06a116cdf98a4103ac49ceda08622588fe021493bf286d21830704b0417f9fad15254886073734a8a915cb266c10183161c106a1c82b3ab0889f2ce285adf50ba32041ff73e56cd607bf79542304962aaa62eeb0f5d23d420043ff93fe559d08e320383198c8183ae16c060063808ce24e63052d2da9e985bc6b7e07d05c178f8e7babd1f44b21e0826c4747bf943ed30ffe3885003107ccc9c18f25c3c75d4831a7f60b3ae3ee438523fdcce0d2517a8c0d8a10217c0a00c437e603bd3fbbabe52ca1c7d43c9ae50a30f5ce794b2cf834d6ec9583ef0b1a3727091ceca7fb9a1944fa8b1076ed56ff2d2e38c1d74500f7c4eb79c53e40e72fbb262428d3cb01fa5497845cd7156aa6ae081fde8317310347743690c18f81db8ce51cfb81ee4dd98dd501a830c156c0d3bf095e6df41eff2febb1b4a64103a5b841a7560436bb4595c4dc990a203d3efa1c70f2bbd624bf6851a73e023f56439da29c50ed25ea821075622ffa7ee3257879e38f071276acbfc730a518503fb17530e25920e704018653420f406763d0e733bbacaf1476c0b35dcc09b5977dc1de4c89bd20da53670b6fd71de1c3347be1a12d46003fb51aaefca49b3811a6be06b243286e4ba8fc1afa1062e87ff41534388532108851a69609258e420f3a97b649e26d4400363c1c3cfe1497d64963f035f1d96a7df38a237793370d12c7328fa52997d978117eb4f97ca9182743419f8c820995b3a66ddd18c813731f53d49193170d92d4b26f14e2fc112066e7fb26bc851eb3dc782815defd58b4ab9adfefc0237eaa17a9e2647fbbdc0571e8fdb253ffac8a30b5c4eae261dc7c92da5b9c0455a8dfd51accc926a0b9c8798235bc22af6aa053e3a0f92c5ec965d22ca0217bcfd424decdba039a10616d8eda053e57852e520735c813b49b97356678ec8991b64180984d1810a8821c61849871a56605dd368d61c2cac51054e3a95e78f37256f4f161994a84105d62b3744e5541ed8f81108c3016140c0013b05be430f11a1366a45bba5c04e0e7b3aac8e7bf3e651a8c2f33e8e5ea1c045bc1cc7d173d41223fd04f623861c9ad27202df99632fb54c7a49cc267096ed4378909edcc3d87e0d480e3598c07db08eb672a5ded0713596c08203062de800d4500213d3a526c6addc50320aa3031100c1ff11438c1a49e02c5dce71e0afa139a6379460f08231aa4a0d245c96a3a6c761991994412e3082310c50e308bc86afdddb471bef9425d430021f471a2c6dbb799ace29029be1e739d64b9dca1f22f0b1a43c144987c09d69cc6331e5c98a8e10f82e8f24a706d38b3e08ac4e9654d9a15aca3610d8fed32c8da75993f91f30595ee22966bdac8cd7f00113725b99e7b8da3da7174c5b471eff5e36b3d0c10b26794509d1b4a76369175c65da8bae90fefc2b37b9e0c633e4586c93e4f471c1ee4607b99537784cf7166c56648ee320affea56a0b3ecc414efcbacfa2762db816f1dcb1b36fbc4ad2824dfe511c17cd6c5bc15930214da268a46e09a92cd8dcde7ce5d1a284d4c582d75ae90e1e794adf010bc634f98bf8792c9eda2bb8881052a3460a0c57f0414fe3dfc6cb6e293204305ac146d48d07fa153058c19e7734a162049fa89655f0b5163d22ab5405dfc1273c57d705462ab80a0faa7932dbfba79600062a18e938ed871bb92c76fc29b834f992fb96746df6300563397eaa55f12805eb81657aba0b91c1d2a460cf2ae45d4ef038d2370a3e3c891b37c71c42494c147c0e2749ec5cd17aa450f0d939479d1f42a06043d3d8d9d4762425d14ff039ac90d7d367d3247982f768d327adde0e3ef14e30b5717a4452cab62c9c60e3ef7fb43f7aba9ddd049fabb375eee86322a526f8f8f32f42b398092e73457f895a4132476182e9b053ea146a5d8289af1ba5c3cefbd4174bb09ede2f845c31727c6725384d8fcb538a581e3d5282d3c81343d7470b7dd924f89a1c684841752a3c24c16ad7dee46de4eb084582efd471d4dbae8f1d4182b51e0901e3112446e5018623981edbbe9cde413dcce50dc06804d7f6913c65fbde473e23d8d8a697a7438f45b062a79d62e6e8d773ad08467cdfc35add93ba35115cddf98687db8281083e72d9c79f3e128c43b01d47973779c2c3b4794330fd71e5383cdc8e3d8e2e04dbd16dce41a67775c91382fdbd98badd3774faf82098982ef6498eccc9122308cea64cfa329b7f90974070a31edd2a254d1e270404df1d9a29d93430fec0f5c616d71c727e8e3330fcc09d6e88f6e4ae5396f7810dbaa9af8310bd223df8c0b7e458d874925053b9a1f4021598327e0660ec814d0f5a39423a891600861e18dd4bbe7123d5aeff3c305ee29179877e34e9c6037b1e7b948387664e46c0b80313390a4f71246b0776d2c358299d11f2ae75e0dfb378468e3ad150d2810d0fa2e668de1cf8f3208867c8fc561901430eec45851c78f889031f7675cedc6ecd963e70604caa52ef428ed637e70dac6ffa1cadeafe4479dcc0c49822bf8f6f44efdd06d6fb6d3f160f36f0d1e7903eda6bd3cb7a0d6cec98eba1b55fe2460d4cecdfe9307d5093b14d031fada52153670f3d93686023c7d1e34535cb693c03f7f6bd9dad3ab2c8998189e641074619f8f81ea656cbeade412a043806186460fabdd57ea2671badc01803bfd1a7c4fc73dcd0316650460c080c66906088810d1edac7993b8af2fc5906c0088381a1f604607ca11bff4f2194a93400c30b5d60528588f8c71d648871e3c289813000630b8c6632b7e82e185a28168091859b0118587801185738a52da8984a749924538fccee26460ca1424e91008615b8c99f357d9c22b3728501605481c438021854e0abbbb752e8f81962ce14d85193be1c1ebf3bf4a5c0757ff42629ef47da61a2c0a4ecd4f17b103ce48f02054e3b0617cbf102e3097cc762dada9d72471fef0426e59f546d6bcf314d13f838d773664c60229ee7e40e5f2d6a57810dcc2600c61298ac4be3c9f3c4df1f0c25302e390e738e7673b23cb5004612b8241d7ae0c9a348605c2d791c5ced7fd53f02971ab2e598584afc14237051153442f4acf60e2f02abf67120a931c729912957008308dce68fe3de0d4dd7be72018c21f055bf51fbd155d20e4a40802104b6434549cb92536fccc10802df7953c6a09b0308f6eb66338f42f903de72b47a62836073b2febe781dd68582e05390c89763cef7ef02c105ed38c72871f5a31820b8dca2924f2aa528da1ff8dce9db41a71492333f9c2e0593a4397d603487ea460f63d09d0ffc5f568a740c1e52b5076e7d637b054b0f5cab699d455ef3c0fff779f01c925908513cf091458f1afdb903d37f9fd3d573fe1cc70e7c0ebf63c7f9af03eb9bdde7913c7df0e9c0a86436bb3c3ae9e373e0c49374981f7dafe572e02ba59a1c72448ec38f3870621bb2bf0e073e5ea49f776d0e62dec04b7a6787ccd107ede106c623fef9e6c4f8ead1066e837fdc7f9dee410e36b0dab591a5f6b57eaf818f228668c72e313f450d5cdeff4a8be419bb34309a21ee45c793975a34f0b14d7d4ebd1bb2da33f0317e0e53ca93e868cdc08ee61c6f927755a96560d3bf3b8a7b1fe5a564e092e78fc394628af6730c6cc7f9aa3785ea78a7189812fd28a5eb5092ca30b061932fa45fca1c0703bf962b72c5feca397e810f435f8dd8e805ce2d74554551a910bbc07810d10fbf24e87ec9056ea3c7ff2fb7052ecdfe73e8cffbb86981edfd0fa3721c7143cb029fa3ce919249d0945258e0c6d3b348ce61dc4a5d8197bc132f657a2c9dacc0a7eb3389163c658cb20a9cfbd4644f1c0f36492a30492ce57037e5503153e03c543bf82b052ee767d6bc5d6d46a14ee39b7de981029bf7d2fd47fe098c5fc7da69bdbe7177021fb3e3b8ae52a5b2de045e3c8e2ba60d13b8c81d47a631e97f97c0849468e9a343097cdeec99636692c05afe4c2d4bc1022081bf0c2ad1999b3e2c588023f0932d2a7987943c65b1004620dc8394325d5b04eec7f6a2f344e03dff9b7bf687971d021773a81963fd441285c0977560f69a726d14042ed86fb0dca14f0b044ec326c43a5847447fc0a7501fd9ea967a9402f880c9e157112cff52b4177c54fcde2cbb9558f282b12a158dff9382bbe03a3d481f724e175c84851c62f472c147bd16a4cc92e7c0c3053b693de4f1289aa2760b7eda3f4e53d59662660bb64ce266fae530f5aa051b8377b449bf532c8b166cbe7854c1727de669165c5aab0e21757a782159f09eaf34a94e98552816fcc70bda99c3942d42080bf66a6f638a7ff539045fc16b65ed38aafa784582aee0d65eaa2fc5cc20116c0577e7e679b27e381182ace0738e589fb4b30ace3bb22cf1fa62c8bd2af8380e3ab458c8d2da53c1c6d7ed28799cb6428e0a26e8e5ac11cddac4e3147c78971b661da6e03a94587b9183fc38a5603c6809f1f385144cc7ee5929a68c821d9d249973f471f3150593c1ef2e5f4377e886820be12d28d8a8e3e9715ca123aa9f60efd2443ada9c424e7a828f93aa7dfed8092643a8cbfc5873e59013ec48e4283fcc951fc34d3052bb316b7bc48f424d70ea15ed73f4d498bd4c70a19364cb90539cec30c1fb54bc1cc790af357609c65677bfcf35278d9d25b8f01c64d6e822b5da5582fdb8626ecd5795923a4a20c9d6fc833cdd24f8f20939a6525f4fd149828f4bc783cafeb50f7291e034a34e6615fd387320c1a9a7a81c2b7ae0d779041321fa974c2bed3d1cc1edd54799b9a3117c18a35d247fae440e46309d2f739c73d89b3b16c149e7ec211545b07ea51da40d21789b08f6453b47b446043b21636fc7610e414cde95bf1a43b039cc25a93efa95650ac1dd74a839b014bcf642087e5c5af36e0e4ba73208a62af3e5384ddb478a20f82a3d4d679625424a20b8d431ab532ae5ff0b08fe23654d195375d2fd0367a925a634bdb935ea072edde71c7e4c1e249a7d606af523a7d8b1a2997c603c468f791bfd72977be0ad5483af957a60bc34a57a550fc4cf03a379bd37b35cdd82786037686ebbb2123bf10e6ca9744d8bc59446b403d39d4ea5d5ab03b7d1227fb2774d0f3a702153eaea184952720e8c9dafea5dcce4c0c4f61c9264e3c0767c7a2167eae54038f01a29c450163569546f6072dc1d7f7db4e4c70d4cddd4469deec5b40deca5bbe74817a3e5880d5cd250125912633689ad81e9de6829efb857871a98ae9c69d35d2bba69e045356ddc5c6a2e7934309ed473fc62a926e9676075744434a47c361d6660d3b34d3ba79a98ee3270da1173bd84bee864602a99a98694bdd43206265245aa25a5889d18984ee983848e52c894090397463d59927c75e90206a6f256ddf8fb052e5864bdc0f48876debca1834c17f88f7cc3d3947b642617b8c91042e8cac99fe41698e81bcc2ed846fbd002d757517c5d5a43c52cb09bbf22425a2b4288055eaa4d7f2a7f32dbaec0447e0f6c93b9477959a1f0345215b8f7ce8eb4d73dde890a7ce09e1b9da39a021bd5fdbd77d19e839414d88f3f9a8958a58f211505dec30f69c173a0c078e86d133ffcbdfb3c8173d3880ef59f983e4e60d36a8acc21a7095c72ebe83d5aa3ea6702978246be91aef0f597c0df8478e8214bd4f595c06f57a53c9aa24cfc24302d314858cc0bd97b24703988695b9d276bce7104aec3a2aa648dc0c6cd0f215dc74cd751043eeeea38979d25d31081d35497d26fc463b487c087648b88f614ff324260f2583cd7ca418020b09d3fa19d3bf4c6420020b07124e7fb4731067d09f0033e969cc27a34be472e017cc05ffadb47befa385e7bc17b65a7fc0e7e1ea3bc606c6248a943e90d95ee82b7d01a4143db063b75c15ddcd0fdd9c2db82b9e0538b550e3dd2f788b8603fece89edd5a72fcdd828bd289a57d3992fcd982f1d8a343a7091ee5d58291fc122fed3ee575440b269b95e66e8d939da259f09a35a26f8fc6c88a64c17adf7ed6e8fff14a28165cf69f87155cdced030b3e3755c2b2e7159cb8557a7b4a65d371051772581dc7f4b9fe7f2bd8ea1c996bbd5e2e9f15dc7ff6ab607f7b93e9864696bc2ad8f48fc183ec11557b2ab81789d5573974aa382ad8aff6f455f7f13dc729d8abf330c433514d394cc127778f3d664bfb93a314dc7bea4bd2f1a4602565f6c6318bc1a347c196048f1f5958a44b8b82cffc496269219fd5a16023e78cca29d8861a14ec7887e3e1e96651f309d6a3c7d6287a828d1e639b4b0ebf9ce904939fedbe597a62c47082bf7bb3fcbfb516b309be3645f75234c1771cf94359a499b499e0d24b3afa1f13ac7d4cf9f1a997e0ae376e4f260f54534bb0e92a59094e738e7c2292e5b19460a723dbb68ec81b9d049363092d0ba224f8d441524b1f16093687b667453cd3fb20c1458f8fe0ef265e6cb7ac12cd116ccab86649ccf76a045b2ffa31248b1dd48ce03d43c899838e39de22b8eacd91df9dda56041f58584445bfd02c117ca56c77f51ecc24426c208255bfef899acccce2c5c621b871df4d6e1d593f87f5c08621b8d0d93bbeffb2c997d2818d42709df3a5f871b6ad98b1fb810d42b0ef597573070f3ca47b105cdc9073d0992fb7871a41f45e1dc3235d2f105c084931dbaed546bf00c18dc45cd9b1849c5a2a7fe0e39452e8bedacf76ae0d3ff0b9a232bc36c7f187ba0fec6877248f434a2b666df08153f7b0e3eb45dba54f07f6d0978e6f0e82e7fcea81ff8f2faae7bcafeef50636f2c08418bfcd73b87125ef36f0a0bbdea7a9abdedb8007d8b803636ad143b6903506a9edc09b4e760d76d571c89a3a309e13343c4dde182ba503d7593a7bb493755142a8d2c61cf89c935d78669ed871b41c98f81d33ff85b60b4fb9a1348629a305ef486cc4a1da061cf81c071e87ee59dec0c7ffb03daeccda7003173aaf3ceb6aa30d4cca5779a9f7d2fd48850736d88036d68036d4e0eec609c99ca543bba1fe8789010770f01450ba4dd848830d34d41958b18eb610f4e37de430035b27adde967b228bc446195837efb0fe754d447b316c90810f724ffa527f6c126c8c81f716354fbe1127d81003933fa5a47d1fe4f829250c7cc8902da92347fb1f30701b747bfa43626e56bb041b5f60a3e4e68e1f6f0ef3dc0bfc4f4b7ea9575536bac064ae9cdd827ab4de41aa6c70818d1e564a77e9a38d1d5265630b6c59aa7674316d6841c961be59e042a744defdcb96dec102d7e579ec2d5dda7ecf15b80a6e2a5a1a4b35e956301b55e082c468952b6d06cf49950d2a70dee953d33e07c9828d2970d6ebf1e307f95a1f471e6c4881b1507ef93e654e694f36d8880263693446a7e4aa95bb63030a7ce5cb6d8b8aa8b4adc5e2b1481c1087c30151200c06ba331fd3140800182c268d8502411427a2b03e14800351221c3a242620201a12141a0e0e140e8502815028140c04c1c0201810080683a1a08bac52c60f693771eb0f7c761f762dd2ba4e513bbb8529e25ae5eecb45fed7f3854cf14f117b026c4be28ca599a5dc1dc0c0013ad5d11cdb6701a58d773b5c5c9ea737e85837754f4c9cc607e8b00e73ef2ff53ccfd5b6f2190dfbad99da6277613d41820a29bd227d2f8e7a9fd7f5ac4c4b6fe9a519f9475677a107eea48a018d6968e3615f8f1279d529bab95c24a712b61bc39382ca81e23d4fe4995293df6f1af514e9ef453933c61350c08535841b479fea59538be9f88dfe42cd856bac8e943f75f5a18a35759d22f3caabc5eaa0e5889ed0d59cfede5ea2f33a97ddd22dd94895d89bccf4795744747ceeeb6c0e3d8be8f82cc56a6303a93c429a625e18f6944e6d8c9c961b8d61d38a6097bc392aff695d315f329f4e1673f4e8cff4066fe8b9cec668892d99aad6fb99f68b482404aaed9b7c9c6c58c9b0c302f62418d46044c7f34b7b64715279bd207f62f4947296f1c05cbb5a82117ebf7d0af7bd4e4775ab7373c8c77d4e7775d57c66b3b29c17adb5a7ec1dba871cf69c05dfca0d03656f2a350dfefbbe29207c3d5b4ac48fd4b768b4ce45eb467ec8c8f97b7de4f36c3c8883887c7b55ed147b9a4c940d27a82a3f587dcea736cc726eae8521e7fffc1cdd61b108ac6b31bea17de473756bd8ee3d0dc999ee69fd13d66a4321625ded250da36fbf3916c2dfc1851ff0bdcff2b16633d31990e56cb2a9343906c394da1bed23d2a60be2d0aa3b7db21338fc833eeb5b7c3de62136cdd2fb62e9fbf08353abdddfc19ddfa019bd9518cc63e6c3ecafcc895dc6e1efe955da6b9d209886eaa99ce73e6c3799ed7df86c347e78d6dd0313dc8c04b579d96179124ff98043b0a7c719b9580937ddb1fab0bcf097cdc836609996c7a40d2a454f3b81a95b24c602b2eacd72f41c38b1bac783799cb66a4d4d178904276a1154d81cdab24ec495c4d88fa1d89e018e89678946ab7e802514cee592a7d1c47c1840f8a223dadf85a1a5a65d511921bc10aa7b5342c4e9353c15f621013f21233cbcbfcd95931f2ef531855eeaed730965257c52b380d3e508df63daf70dde5e7e89b80ef1a857c8a9170e62b8feaaa3309e43bd6fc075377c0bb2fc40dcc3f1a8fcef7fa761e76098300c6a2c4a6bced9a087d0ee931d3791eb89edad1cff5f5a84de0eec40e78ed02412e6dd2cc1111aa84be12f80346e2414d264542e117da0400fe81c090157c30bf9b06674e97b619d6205143cc30fcbab63f0775f18ec8eb7a71508b313e352506dd041bf3de7de8bdbe8b2bbe9076589b021e3dadfd1955a2c2c84875a6173227724d2515e36183e61b793499a70e46044e4c29e76248166d80603e9c48ad0d5027f1135a128685351a2550d1670051400d24ec54b00dcf22e7d86fc904011a4fd2c0a20d41dca1c129edbc3948878bba4b95834d83d5ebb2e08afee5913f69ed843b1e568b4e12891241eca75e2538d8413021705383a13bd6bdb265a8d5b80b0c90dee0e7045722b08ae45b7b4c8edf2a0e914c3f9b434bad1f5a2e047c06f0a7608aab15e73a925abb7d70e36ecf8cc0af9f67a43b7163d7ff0d6cfce8318c61a0896df32dcac802808ff16711600c936ac483070fc0f2ba4cc241694520081a4b81102a0d02b301b3a04522ce02708f9e8cc4732a00f58db0dd94092fe0b49a20d443b860f9afedeba6cdbe4036bf296fa20990c509777a830b34b25ef19fb6f045ab4b5a482027d953fd7dacea712d40ce35d531ad051abf55504607a37672b0500bb2fd91c93d32d7b635538dc0e7f14765583c467463b937992cf1a9def480dbbe6b4a3815ea8398c9c10e42bbb34103db7748cc554a8ef470ad2be2631a73b345f2e7ae651c8ed519da1a2546edc3f130a57fca122b037e50a6442805a3bd7d6086ca64197ced77140e90076504cebaea691b06a960d0d30251329f5a226eb850500a27c5c2580652e6619de2d4093ea3fd4f9e8e27147244503a14131a27224440589fe40fb588c8cd0a05421070afcca09aa2e901cd139cb4bd06633e045d6956957d79d9233ba1201ef8a368e4d2c5966f3c8d5901a1a04d1caf751189aee788709a732c7648c9e50725fbff751e60dad3b752548b0d33bff5f33520c151bc11b8d5804448c768aab198c1fcf68c5f6743d05776433d23418edb2c56877d3902a4e530c56401dddc8837fe9f130b0157d92a1dfaade9bbfd29ba2b036455e78db6fcb45a41abea30f10f323b5241e75b0f34e6ff68b52624eedde8dd5be4fe18ca28e2944eb38c1c4ffe07d5e29c9bf018e8f22eebc7c5fb6374f85770c846a4d3c0f1616b0fd1decb28f0941ee9212be3d83d499c6106952ed67c8185b42558c1fcacb98fd7bd6d5986399dba68dedac2d4a71ecb70c3e8752b7ee14f71b7832766f86bdd42d7a7fbe6efa5444faf4042c9361ef512d8126924bf9322171b484ded19e5274cdda5807a7aae0fc51d364eb529711368734bd947558ec9418ea579be4184895d030936144fe93697e774b777ac81a701f75e6ba471777d31bff7e1b84f228784d3a9dce208b022f4f329359397ac93203a16b69376b1d988d11ce5091aeb66ada142141234fd93661b5a2178f6646dddde07a46af10649844b097e5865cd5dab2fc680b5df777fb486848b38b2670e242daf32420e91f6c1a6388159e4952a2dcc41f46a19c950e281124fb3952df1a4add9e09af345001a00b9c31c1725ff1f844964100d5c292e2f43d1966263276b9d20b0bcb15949739b4170b7a55aad1cefd38aad681f8bd50402247fcc88ad3ab52440872c236b87140ea3857e28be36dc85b9302168d219b4f45321aab5aeab7955b9fa812511e9925a74f291586a8d25ae0b44cd7765ec1e87848a06377b02f0116392f326604e8a95cba36391552c1658d02d4d9874da46e6093c71a0fc3a3f4d4098fbe167ead54b576253b08845d2483f0a824eac4a8a02a880249ab082708b51dc7927a0de308ad21500c355a12980f6c9c062e1c508320ab7038d5ff7a19619c61ed9946942c6b42419533682462b2f3decd3796039c960728c85aae134e65af3d61ed10839a4fe822366266b105d4a085adcac859ed144b8be8cb86b1ba230f34b0fcf7dd4a8c8391a7f5776891fcb2e2486f0fdc6bef6b48462aef285678700ad4598c9883cd7aa9c5ad8552750a39f39f502917b169b42430a5ace7431b952c9e410d941eaece4a17ee213848b9355ad57afe593abc9c4fea348ac2d10ea7452009a5343a13c184c6dc102f1ea8511dc4125a089066937b4d6cef512da9d512f417fd38cf422675110c519c04dd2d6adf3080cb87e3989f6ca5ce9ba80116064a404bbdc3b68007d2ab1c2d9f7ff25cf3da98bf4cdee10cf02ebba3de4482509bda1154c601a1fd1ed79da392d1c93bd0af2749dd0eab8ded4d7005dc739c4e344e17b2cbda28094d1d5f26963472d032a2d9c9a646bb03952105b70e133dbd996b5d48d3ba79443c87cc5321a3480c92a49a7103160b690a50383c5124f8aaa8306b7cca237446fe3d7fafc559bc611572b1c38d5c0b4d0b6874693610e8519129f9bec37afdd856cf7872cd194ff3c345062319c22012e637d636454fb58f2f04f63585d4990884a559808e2c0f5327e4962130343b0c2b7b7588af0c16a9a057830a350aa19605d52a10bfc5f8fd12c98cee5006f041588caf1e8acaa74f924fcd2fe85bafbf1cd2d912c4c0bffabdfbf0ffa004c8093803a0ccdf7e0058fe8ba6a96a36c9d2029f7050cdc54ce84fe8a90ca3fdbdecbd375eafd4639cfb59ffb9beef6334805e1a4800fe15208284a75f29284ffa60a872d69a3e9cc0c148b996588899c58a102498c5ee8f3ab1244ea954acf69fac11560d4deddf871b029641e65be06250b95c724740d090642f332b866295b4f530b34c9b9f210295cc63b76445246e6c2232b0494e86eacc71c7ba78880431320c706c799915c3672154c3bb8eb665e03271f942f669f798d126d70397303ab331d3348afb4ef945add708b5648af7057a34e79e9048dd9d7646c31888717b69c1c06120cdc5c2d893de72735811d06470bb213d45b471600b7c28f0deeb4d1c68fc0f6bfc7c84de4a847269a41a4108081b4f86e3bcf01f2c90a196cf9d097c4d256b120aab11252f5bb84291685c9a1882f5eb30cabbd39c052790ea557b9ece5a21a977d1754d9e2a75ca58a88caf638dbe44cd635694b1212bc845c8d4bd0a13b59812a1e2446dd31fd498a9c82a7aa40cd59d0b37fe93e30e48c177e0eecd984d57f24e8a6825000ec43a744f25a0dbf4c99d6cc28ec58f502cea64b45e1815699f012e48b6022326ee5e59a178a65c0f6fb7349f022f0267d64e4565189e701bf369328efa001de136981a9c7a8c9adc2a6b5a24f03955364c732ec8d4f96de735c91ae66e1d42d09bc69d994bc84fc84f3ab7a24a04af28c31b51ff0c245bbb693a552a404b504ba025b865a885b05d80f606aa490f892f371fc9ce5ef7aae04c42c531cc85607065ee93e1b6e04fbbe91af5e353f4137c71b6824c3e5d5bc4aaf2c42679a28c0a1127c49775268de8d06073f3462898606f53dd46bdda814ce06dff56d209d9babebbfa682902103ec403b7b3a67eda646593954db636b6c566a6f6b6d99b35193d3a7e7bd414578a0f9ebf6380ffc6c75134265d07d9eddf9f0715012ded40ef389323e21ba89409c5054188d963d9274eed6aa843aa89ab93c70b0fb5c1aafbcb9acdffd5c00624c0636449084677d14e9a4f9fe077ac608e377bdcd09ee77fb4695fef5f262f0f5a739d8b1186505968df2a709ac8b6fe6f9648ebb912d3148dce2df166fcbcff5fb8906b6e55e76c16440fa2dc46ba4b07dcbdf053671181b14b2867730ded89352f6762d2131897373e5c1d2d7bd17311540319179def5269eff093968da077ae2b3ed8ca8caae1e7cae7a7f2deaa2b4e71e61b6414d3a69fab1318c0e66e87b2f12a57ba9218f72be4682939e883f6eba3c084b8927078b24775ec1c439e8003c8f2f345592a2112d355234281e3f4f1102311f0f72a88dbfc3cfdec5049b074607b95bcf1567d1bb1d6d366dcc96ab88e4319e1cefcc0d01f2b4e3437a1c18a62b17e8417ef1750d1327d132a97f2f4906a535b9024e0cfa1ca426357ba51c2fe7e2b98a37f44ccd907720877eef03ae6d17464e110991647f5bb9a6fec32f3bb62625b6bf4055ffe483885ec0f27fdecc1c423b2c98104a42020be21e269c80b01bc1a42e7fe51cd9155ec1990923da1083112d481a5c15083853c152603bbca66fd55e490038d8431f95b27d32913c2b14b2b97091d4e121a224d4056954a8b4fd7ce060786c19d864bc5374de2b0d95518b3b636353b3e7a81c91efa6b3ab0ec149d7f63805ceae2aad12b7da114b59736fca562438d149c648548d7f152625388ee3231a8c2ba1e607efcf044f41813d5c35238ac945594250fed10506be73716612c949c3961355029f1925adf7b612e55e41f091b1d148560c96896e67adaa1a63abf2ea2ac871a4213b23680ed1242990214562a717b9a0db90af49430775dd16bf014d87e72d2164933db038a8e5fae4c038cb34e2bd2896624a1d5d14a93fb4b8584764579f9a855d8c619904aab0857e3b01d906eb6e4a3a1d9c4c384e162cb4eedc9af57205978ce37df4ae5ae90ab408c2c4b87dff06e99e1711baecc5cb3076237b71e4adcd93e7e59100328b45e873c3a8e6f55728f6d1d22e3c87837dcf5c0c7516361ad67492b9ca8a1a08a4e2e810755a849ffbdf9d46dda6c50ae75635490e1167b043c323ece0a7d8caa7fc0903178ef9eb52bb89bee3db2042d1f62807131cdc9841f78280d50f542b382378ba84fd4b9f5cc3891c6d64aa86dc0c953d65402a32858facb6645f2fb6a7ebfbed22063544059636fa1bf80896031a21a7f81598c22964620823af91f0b090b7312a0555f96441199481bdc5847c30d57991a8e12230923c0c89b9b0e7b1ed11fedfc66e435a98e921c4381de0dd271e98e6413feb35ad233651db414678078d3f895ef03bc76a8e9955a588cc3b0596befeaf8d07620f9335c122477eda59962649a91354d21069a1b657bdce1a05587550d0bcea2bda8b4c3a3460b0daa9b92793800606031cdb544a267292f1f2143a5b47f2039febd2f004b36a08c382f438c14f4891d475c768174785d0402c9380e2d4a25708aa57c0f859a040e6a56f8007daacada936749dda53ca450157d142d6934763b6d98a48375820cbd0ddabc250ac69923a8c9c32de27a2fc292db36a5fd46f0bcc3d5e81fbd1214b864a4e0625de6ea405f8c83e49060575cbbb98c71430691cd0317a6682a1dcab873bdb8c60dadfc3393b0702365b6f62ad2c68081870974b858d42c4a70d933682e154c02110253837f7ed03c6ecdb0f98ef169aa09cbb0fde0a6622013bbce2498ec9f57641406a16322c49a6e3488d42c6aa3380d44482353d1833ccc271ad9c5001c64cb186f4689fc49b648b63ba67c834811081088109827a7a3dcd6a963c3fdfa11f30c459ec1a368c0f022373af45e496a279d20e2708b2988ec00cd3feeaa7015376c553193e329a329beab58c5391797066842eb1cf7c66175c94840a22210bb04fe043d40bdb0cf98ac77428bd8ee27c1940ce19d0e95c7804bfdcc66dd845408cca814390920d50cc832aae55947429feda4863a740343275d70ff280a5b14fe2276eb0027763441275266fceac95c0a8d50204a992690fa476a817de7042f07a559465ce92370944fd4af0119f1f19c4facd912b97c04dc282f3163b66cb872cadcfb04b4d074a9268294e7f251911e8a52fec35dc6c214eeb28645b01a917965eb92ea72378c11570fc94b17d1c6cbf2f73a3a9343d96760cc3e6b1a51250512d84f6705a38366c852b7d745c3dc8d0f7724997d2c91b40a0103d6509501ad6d01527087dab45ddf12090e9db0b0e8848e9396601947a40bbf8422b2280a5babf82922655129eeb7fc370f432400e801a9e8f2bc172549b9423dee89cfa8610f4d230888a122a91b3ae9d0ac1994e761358ec4ff8e0dcde94c36ec9acf3328e580435c58c1bdd2e1021dc771a86ddabff4447c30f7e5b9c11f129f2a5b767302c4b50ede857929d40a6f2441ca29e37205b19293660411e6e6b6867e9221792fcafad3cc23a5e50c66f177e66e5f69d8dcf2cd006ba2b4b670366b9c70eb6372e3ad8e255fb017d18ce5a8bed9feaf199318daa919a41fcfbdd94350b29ecade1ec2d9b28158f5068bb513b692c0daf74a2c40d4f8ed8d913ec4e8e69723718ca38fb624851a9c516ab8bf26916f54efa9bb55b8dc77842985fa18f7a8c9b9ec1e92f1963a8e2bcc67cfa2fadc6b874d519981714c42383e87b7cb806c7cd8c1a94ac9a21d327822caabf8c3e281160e14b9e71e4cb82af363fda44241d806d7437a3235f6c6e5f8b4042ce9a7b34b7e4f344dbadaec390d9ffe5af9a9ca96709ac331c1a326ad5af87a12af109f4c73b278e99bcc4a823808fadbd8e0a67836f6fb02430327fd5ea3446e70812044aaae5546ffcf18fdf76067304016ef91f7dd871a1a08d2fed01cdae33b282149dc73ee21d34eeda0ade565985dc774ca80df218fb9c323dceaf4c2fb248e1973d06b115ba80b181465eedde309e8aae127b7820bfa082ac6fdf13de1bd44bf05e5a6a5f4a0772a781b375282f4612724f498795d7aa8dea15eedbe4b420f95f4018feee98aae3d3c7b07cde0fb0da1608511edf4ebc8aaf337ea5f9b4b1be6178814e3d471ec253b416bf5c71c155c1b391b26d4d8df35fea25bb7f300e926b1bdf1238e0a976faa3dc4234af696ac104e6980026220171a72994c05ed54bf1a38cf5d6befda0b9d0d750499877ec99ed607a52f89dcda1938a7da65eb506a2b31e5fe3adaa7c247ce11ac93c46fdaf464fc6a8e03db77594bf609fdd3266420da26402a00f1d89a42495c03c0592f1b5d1d61cdc76910287abf9e1f22d0db65adb5a922dab45972aa763df2b50907f39b94194021c9c0deb811bf99f91e48a0aeb9bc424dc22dc4ed1855d24931a936b04d8006554fa9971e957e1edddc8f90708e4684813756aaefa33575150db8761c2a2d9e76192f853eed6951afd4f11b9db4183fb7f402300926a3a6d6e8466496150805b3ccb7d6a9c0e908daa0bd9b0c0043382f8248e694a19461b8cbf9d59a773e193e8d8f32e470820ede8c5ae5b32e604d2bdd294e6d62f6f374740ee7e31abdc508255a46453f6358a90afe9a1221eef95d0c6c15d612837eea073748919fa67c271a04bd921306b93744cdfe2dfde40912e20a995ec1da3581d28085e6f4c7c3a9943741177812fca5ac282c3f624e481436d7950f5f86e709d6633c01c65f55f422ac533f179ba7bcbde3eeed755a7f9f9c97502d288f0591d57eadda9954434379a9138836e15bdae9f2df166b600521300209fa207501e38047763572d1deb63522ebf7eb98e2fb99050d0ff40a17428f77886481f2d32922e6c69023180ed1c173780155dceeaf14ce84f9001a2403ae8150c084ffa01557cbe9e3635834e08280001fd01bb042ece2114c7b8df6a9399f568a15de84ad603382a90c8cdf3bd1c292920116b1b8581428c4a8ed4b8b759b88f5c1a5e00904421ae8f7580323d03a760a70f5671198cebe093a12560e57e36e0315515cc3ad4db450fb2b90a13ea5c3eb8ec4f2f49d881f21ff7348b88460857483550813ece04aafeb2d04d930293c08ac2eb05753112a209832b6f7259bbc2eb7cc53097eee0a6b825ae3a1f8480f1d24055e8125a887a5202a77cc8dfc73181a6c82ae4024c007deb050284af037e2f9023eea874c0a2b20b548dd88d803f82ea83e8a10b7648208fc795ef066ec0b02405e306fe2aac418aa6f091885a7392a010776081d20879ea00440088426fe206102bc4a10e500fb9580115f4c0fe33ff044c1ce0462081adc04ae7004ccea14dd57e2c4bf016f8422f04fd241056fc292501583510e4f7a8f1d7d5fc409b9028e8823e426b67f76024f8bfc7f21980fd201384c0e7b824890087c012ef52bb8b6aa5cc4334b70da1a040d9306ef4169880c061dfe9aa0ee4d1f0e80c8e64fe5f157385eb7c82f72a57f2ac15452f463831c0204d7004ee0f412be070030768523613ec0375c7a545f85a3e20aab037304ebbd1b5b2d4c88508f0cb9e98fa4d016eba2cc82f840365889f1c2b9e00e42042e78834c52b14012d8075366a9c049b006cae6d21c5d5620c68b224b4d479423fdda86203db8146866f60049759ce9c0bc153c7d2e33e4a4fa688cbc4e286bc68eaf57b0d567896af7438c1b8aded1849a348e479c1689507c341cfe2511ca051a237b91279dec54c67dae6347ef1d679818cb39118060bc98d541826eb2a7e48aac198f09d3e2936fde877a9b15efe1da2e41748eb2b2684ae8579ced6395a899f05c3cc7ab048f103c6bc52747feb00d06e43a99a03f8bd47a3d7d3a5a9f610979429ff4715a9f5bd52cb8118a731301ba2ad4662a91dd485beecbbaeb9d44d29beb1c497837447a7d981032d6326ac9dd6cdc4450b31757f7b878b03ed2273464032bbf5e425dc5e6f8ca9f731f5bafee3ebf31dcc1ad4ae867a4b7eafbc39eb72f6638fdf9c3516dca752f42706d21076f75e561ed79e8f83cd9217a8bd36514b6e3f34bb35f9926ca2106c59092cdbdab7dd65e43be26c14984edb8501045df750f700902b872ae4f46949f15aab521d1d861e8d185912ed1ca720c84779d8c521fdee6c3de7c48bdf938907b6efb643d12141640e8c147370aa1171fada71f377e6b10e5e977ecd1dc9d3bb6d3a9cfa0744497de57ca5b043ab8da683634e1142cf5b6fed2e9c4066e277e129824181509532e1ed12ba48a51580d59db4107ca4f4cb2eea965f9ee38ffa3a8c6c039b0727feeb97c05d3618eab14b73ecba0f25053b7148483a880405912c0366af146fe2e642c2d8fac883d2570890c2213912b119a682188955778690e838820668898e87d89ea27314e8e1a770a508cea39f347c6abd5a2429685be5e0088d1605a8811d43ae663568011081f27f78dade4f31a4990292416ea18c83ee5276f58f243efc3dbafb33246ac316cbb3a6c0736e8098fed1ed4143d84cf192cf9e35d0c46417cc6713e998e176208777aa4814d6678209a1193e92f7b9683b5141f1b8462c2b1a3240880540ba94e4f845e0c601e3559371b370b1c382613171e3d6ff9e90b58c63e0bd98d21c8b493a9347c8446e9b588d849239f5b3116aa2054932ca153ef8bd07ef19e610c12ce8e4af538aee9dfa7fcfc5b527fc6fc199949a35f5ff818eb70b0bfda904eebbafc963ce0dc461f0392cd68e908ea1248861929c36b55e5cd7ddf08ae6975c132d15149deb13c95d5f2362b3bc6b3e4d89e132799bf7b5e588c6b45e64f2daf01d3e08bee2c72b54635ff122bfce32f2ee82b7d03e449a909f97c953b9eaf5ede9cf1cf7c811e010ff1f3a8b659c2ff47b8e14a1da72710fc6b9fee2ac918a88ed2760a254963f5f24881552f0ff4a93cef25ca312ee50db3b06d9e151e2887ab7f2adad5a4e0d24a2dcb9e7ed32ed8e52d361b0b3730151225deb29a049951171828a2880643684ceb96daf1d9553d1c286109c907b9d1b0bb9b9754a29f1495949cb41239b53018ecc168c7b1e2cf0db1b1b13e8fe6658a65d7dad03d52112bec9b0280ad7e624b9f6339e88599da5240d84bd0a74cdde824f74c8a8d8111a5f6a69b0699949016e88bc1ec2e5fa753ff3c44d7da185071c0e3e1ddb02cc410bb5952129fc390d844d9a631787a22bdd2f96d4858feae911e213a0241efa29070020d6f48813a74b671f6f16d17bb255a1d9ad0da7b539bedb28182d741d1e00eb03620c23d8a97f202e81941cd394c397c295eb4873caa16080ff160d3904e1a301a8bbbd6004c599bbc88b24ec33e24487b2493294196ba7c30a52d2590df5560a590d61d49dde8bd6d46daabcc9df6349cee79a0586b01cf6e124628dc570081d03dfa8ba543081d7fe8422208d56840c1504401caa2ecbf2552880e65158771ee3e632573526870a597c1c665865c11755cb61c08901f4b23383e42624152bd171e7ff41ed7fc7686dadf7f3e5581cc6343e939e409c9eec06b64229c9f566750ea0e84d5bc43273439c71b60f519585645ff2956d548c797388b4fce850d80eccb5872674a41d14b73b70d5304fffdf1d87e51906baf9c2099f09542bd9e7813af7f1d179f9ee78acf27437656814e9888ac7121f945245e0e01b32188364a2c4474649888b7f17424fda91410543c0830a0f7f8717b6f9d05ce493df29d96b2a0df4cd4fb078e8fe5144ee53796e23e67b61b4ad91c432e17347f30abd7b18ef2b82aff8d6e83d93ddffbddc5db210f4cad1a529bd1b58e9c2e942daac23fb0c9c543f4447d40ec939b2bcf6c6d81be864912a48a5adf7aab529a4bece41b419becc040bdecb6afbc6d9b49973ba9a74b17c76b018558027e2e84b10a3440b2a4d001ffffffffffffffbf4ebd655bdbb5cd3203da97526e69011492984b29c994524aea17e608218d106cfbc2f6b31bbc04330ad90a850a9aa913ce652d1955314d16214e7873295a75e89b0ba14df8a7a4249432419af004bb2073ac9d4c33ca84ffe225a2359a4a3984092fca499babc4d09c4a2ee187bbe79439c4949558c297912966372be17a77753e0be79f32947074ca65c9ac840af7241c214bfe4defc9362c09c7d2c42c25fb4878325909234b90f04449d1d6848b1b3d3fc20b3209224b562e96ec08d7cc34644c677f1d37c235b93dac8bd889f965845bc2781242447d70cb22bc777feda0a38a70dd3f663231f8bfa989f0433e965ad6f0d929229ccd5497b493877074884eb23ca6a8e519c253825ab414657ea715c2ab110d0b57c266aa1221bcd41de4c3c4ad579506e1e6d774316d1284f35e9224a609f5244c8170b4a6d0594b8070edef73498207994f7ff05b94d81827730cd50f7ed812ddcc9347114f1ffa94bb930c3a091a830f7e0a95e63d59cab424688c3df43147b7943d7a7036dd9470f62699d6e4c12f5f9393a8223a28f1e09760620e7626aedddfc13755bfb299243171dbc13599494e66413609aa835722f3952455be74990e9e24fcc6953057979d83b3496a7abf4d5e522b074f5d859edbfa33370edec9e1b249651bf47070fb64273bbde16f7c8327b8492699aab8c15f3fe1e4f9ab0d5ebe8de2b3f229a4c9064fba2e2999983b0829aec12b557fba04e52969a8064ff2183589258489280d9e798c6e4aa4978a1d1a3ca1447bd26b31a6ebcee055e5b4d983d9ac6d66f0a28938d9c4142a83dbdb236a7d36cc58880cdee953728c2234064f96fe9483f2123c460c5e4e954cd9785c304f189c4b972f9885b42f0f187ccd2948cd64d2c4a0e40b9e9cffb593d41c2ded057f8332150bd973ca2e78969596e73d973a9f0b9e4e299b50c1df827fc27470bfec6e522df8b32925995d67c151c265e7fc3316fc8cd9bc640cab70ba82df29b7d6bb67a58c159c8dd92adb08ad4a962a3832a81813b6ddb3880a5e4a8292846cc779760a7e0eeba4477aecb2540a7eca966e3165143c55e1777229a1e00715d7498ca67349e7139c515e962d78c868824e70ece3e234668bb162131c8fddbe9a66c3db638223469cdaaa35c9547796e046134d76910bd24d76d2f0f2e9ad12ad29c113a299c4aa609da19d24389fe173c5122b9a641d24f8a7c36bc57497e49c7404ffc772650fea2f83690c2338fa2a89366a4529256314c12d496888cc6818ae5d30b1a414240c57e6be748e3941a8c1f0c4543a3e338949c5030cdf4a2841a9365525fa85efb1eb4c83cacc24bef0356906ddafae79d30bb724f9cbc25829994c78e16b5f88c987b85777e1e8cf4e1f224d50daebc2ab4ed27a9998498572e15beacc953165499ec685a331b7874b2774e5dcc2f35c39297daf2dfcb48f31cdd6c24b19eaa364cbd27469e1db7b0e2a484bd2e667e128f9b88ea954d62eb2704da62409bfc4422d25366a4e85859f5cc62aa5f9156e6ae5d195dbec4eae706389316a92b173feb215be9885880b9e148315ae95da2093ec156307133156e1b6f76950ff9d5dba440c5560a5e45c2a5ccd15834c5d366264092a7c4dda7395a0738c6bc9291c25c518643e33dfde14ae9aec2393d8d9424da570acba52fa0b6e279848e1770e756a55e44b7a46e186becb1beb52949a88c293c436dd1674c52026a1f0733af97afc34df0714ce5a07ab14a3a689e6136e6629dbd0bc6d9af1841f54badc21ed4ef87177f2c65449cb0973c24f52948d6f36e17c5736b12f65b58c35e179a7a86e82b05333e1a8c77c90a1196407139e74de3978d6eca5da4bb89d338ad4a8a50425967035a5f9f63541e5be125e5be64ba984094a4a29671a25a8bb4cc299b9a492ee7c2927497876820ee2445a345b23e1fddf5dbb6d063541c23917df5a4b3aa6271fe1793ee5b33d2ab64347784a86ef0d67c1abd4084fb68c2f1965c43f6abed286b6084798694f7248119e1c93ba3f71b3a192084f6cd26ebfbe4184a72b47ad987308c7629aec6792aaa4254338769a19b2a4f9a510be7aba1eb751254d84f0d2834ca26c4a39e507e1f7a88e66c24810fe49e263a5cde3994281f094a48250da6349383980703d9558635f2a68fefcc1f512356b7312934c397ef073a59c3be59c66eca40f8e6be8ec2dd97262c2076f3685109e354927c9ecc1ebecd0fa908d59367a70448652795dd4dbcf831b16f3a551e3c1af784da9cc4464eee08789dbf69578f4de0e6eb6646e26a65f8c3a38a6d6265a2c0bb2ed17830e7ea5a98fd9ad366bfd62ccc1b34c2928c1945225c32f861caa7c29944ca21c0747e96ead36f10da18583a772eca4a4a437b8966bd6832ee1834e718363ca332bc6943638f25146c64d52ba2d6cf0748d9c201a624ad53538aa2e25fb3bd5e08aea92fb6419f39a064f6706a1bd349c8d8806bfd73cd7b87ccad2197c13623bb9e62ff5318323837bcae8af0c6e0a256325f92783df266decb7caf1e78dc1d3a3e77a63b68f1d138357952aaa2d56416c1f06b79208334bedc1e0da064b3176fbcc67ff05dfba36556352d288ebbde099f429a9bf183c675217bc9cda4a92432e78e3e127ca4b8991a52d78f9744e7d92ac900dd282db416a725192054fb4f0a144c9c1826b711a73ff27db3c57f024cbe53985cc0a9e2ca2a29bbc65094aaae0acaaa57747934b9750c119cfb3e9c65328a99982676d1acb45d7c592143c4912a1442dad29fa1f053ff797892e792878c1eedf92ecf594c34f705468cf615927f8397b4e1532de5ddb2678720639499bfc965398e0774ea2ac76094a46cb12dc4e5f7239a534a22a69f8196e54eebc2ac11f3567929804b153ab1849f094ec71e9528e4168550c2478399f8eb96b333d45c53882f329a59829ff099352318ce028b9356538955489528c22f8bed95e164d6a50e6303cd173d06292123a9d290c4f635013cddce3c50c86339a949c994db6c912186e275d2a7b3899dbe47ee16af24f51daf9c20b67d14ae5d43d5aeb85274c3061654e3da579e16b4a194ab4945b2ebbf0aac4dea8f5f619d5851b4e8e954e562d815cf8a7d2880ded69de4f2c015cf826a7923f73e8168ea65149c97da90f1f5bf8f29e3dfb492dfc7462284b62c96535a185b3a5f47a2c99851766729fbcb93d648c2c3c69724c5e426c508262e1561016e4bbe5f305165e78699d8f1093a2ff0a2f86ba9c63ada90eb9c2cfa08488d7dd0a47a70be9612bac70cbd4c5f6d32abcd7ac12930ea56aa3aa70947a7cf9e5646e662adc4a1b9d2425a8f0d4682a29fd770aeff3988d7b8550da99c24d42b6560595c259f70ea7f348f50b29dcba92c48c16e351f829f666a70a1334662c0a4f305162327571283c71153c27e1ccad4ca0f0b46c66bb3ce227973ed1673bcd9441e3093736945213a34e38b275aa624a4d7f8813fe8cc98775499f6268136e2865f2b78a34e19bbc59664b27a13e27139e785fcc667a4dde8309e78499fa311984303197f093d69ed395419ba8b184a7dc2d9a8b327993a6128e8b6c4c25bf554a794af81ec4af67e8684a924978490861d274cab1e19684ff96e96fe548f856b263569324412e0d0937f72541c767f7af1ff195981446cd8e7094ae249c5fbc126884574989a91a7412f36557028c70ebec83952acb4e979540f2c75451849fe9315c90616412a46604fac6201fc2f734aa4d8e495fd2e21ac2cd59548cb1f94ba64b0be128157d534b9884f02a36a3c5668ce12f3808ff3297a64fc2d5c74e108e0c4a4c499c52fe1ebb403827bdc807a9314038327f4a685bf6074fa7cd14bde9dafac40fde6813fcd4f57d70d5469c2449ca3e25990fce7b769d9694e46055cd00640f6eecb56462ac44b992a407c7635a134c45797073ca96941219840764ec641bcd4e74074ffd8ac574da9bb972ece09df8d59dac491d3c59aad478d02a6a64123af8612edbbb0425da5a660e7ea866f43ed1eccb7f39b826a594b22de5e3e0774e6b8f6ea1c9a20f072fc87f50c289312699fb0d8e8ab94b53c7da0d9ecce3a3949828ea326df0bcebfb533eefa0ff65839b6257578965ba73d7e0c9dded5183e31996efb22c69703df7860dd75e524c1534b8bd6a3f4a5bcc90969ec1519b4f0c972976cb976670c4c56eafd3b00c8eba583906cfe1e97b32782183c86c9faf82cc8ec1dfa86de25a46655c4e0c9e567509156a3b8c4a183c79fd4343068373f2f9cbc8d240bee05df4d818e35d97d602f1829b3d35abf29d1cdb49902e14acda6a6444d45be32d798f4aff188370c18b9916d4c4f419c4a57bc005902d70a5ce54bedf44455f00d102972984eeecb4ab0dcab1c3055700c982eb76628bfe12362de6981c6490f1c98c1c5501102c7852c78507798fdab071054f9d12e26ef36cd967a0b1031d3ac8b8b143c78d1efc0e2bfc9f438334bf2c8ea4074a666ae5032055f06a438651e2540e2364260042054f9237f668a68a6af5c1b17643478fb3a443c70e1578e048003205afc49483492728c97d0c714108508e1d2e501029788212bc4dfe2a49b8d3a60d902878d9b13d4851bf6216c2b166860e1d3dcc18410281829b041953986c213e53480390273897b4da289951ed31c5099e546e8134a18030a1a401b2844d9034ac24e98ca19992b8ce6c478f33ea00a20447b5dab3db647e024812dcac50eaec52121024b826a9d81cddba52c936470072044f69d5e0f2be22a6e4418ce078bce731d93363e98314c1cd1821deb4a4c3f04e0625c774b17d08c393821653626aed2cf1110cd773ce9d4cc78a10d91fe29d599515e203185ea813726616369d2c4ac2c72ffc933c49d945d7dbc5ce175ea9646e9793a4c2d332c2472fbc6c4174060f9daa9eed237cf0c22f51dd27766576e1dabce6243ba7e60a7a5db86292ccc154a658d6f823176e0cafbed9b7a9eac2b8f043fd05e57de92d9cf18b8f1a4aae1915e1c316cec88a3de6315b0b3f4d129364626d52f8a085dfd7f561b39b86cf69168eec51525292b6c79f46168e67534273ee8d353f167e67d216252aa77cb28685dfe2dd693e557ba9f02bfc998fa136a497ceb2ddade0c3159e9463b6f28f492d876d0b1fadf04d658931560a4ac57358e1fd28d9c46493abf03644e73575f2a71e57157e6bd87ccd784eb29b0abfb2cf656730e1b15250e1f927a154954cd3ca487c80a77044db4eb3c969aacf8438803f7c98c2db9cb1a2ad9af29847a5f0749dbd6cf9068d4187148e38ffec5e2a69666b1715f1310a3ffdb6758a5a27c9413902c4831c3d5e070805366c84b820042cb061c3860d162010e0e8912306643cfa400ee34314de56e774375525b557189161100b0e171fa1f0ab74c718ecc48e2dbd0d1ba0f0a492cbb0ec58be1efd847fd144f66635514ace74b0830325e0f122b833f340d50e8483c7abc0042188021adc10c0293e3ce1bd8675fa203b7c9a0ff6e1a3139e687ffac4ee5c7a5ce4c30727bcb4f0d14ffee826fc3731d9dd434d7852f986a8992a138e380bad3f9382096f6d7d4cb6eb745362838f4bb8fe553d9fb297927c04870f4b782a284d9b212c95acc14ab8692634a72ccb991df441093f6a34138eb54978d1ea447f366582cc8d6306e261a6840f49b8196a4e9d2445114a1254c7c3860d497c44c20f2f76bac23e37d00ebec75d0b121930e20312defa26f570317c4e613dc23bcda13e9979cd28318ef04f7d93b2bb4cd2e824eaf164dcd0d1e3060a3e1ae1678d26f5c5497d6a1d843c58d023c7413b3cc0821d243ac8e8800e17f0c02d7c30c2f1d0edaa2527a93cb4086fb68275584c49640c29c295d53e113b4f223ce1a430517b3ce33f46849f498caaa828d17b3bdba183073b9019668c20c4033b74e0e8810607cad0b1030466e400012c3e0ee18f4a4a7d3e7b4ee55a031f867046a792c3a9e814155121fcec1ead644ac933460f211c1b390b7eb21e84f79d2e092b23355a6705e19c64d278cefaad0fe34720dc3ceb318c7af558f50108d77492948a4952f5f39a3ff8d92f17e353f40925f583e39943589b0c1be5c33e7cf0c16d4b1dbd4e4a0debcac71edc92c4a87ce8c119e99abbb2736c868f3cb831aaf6670ee517333c860f3cf8b18493655af3f9a6e70e9e9429cf78a960191ff4860f3b38a3e92cf5ff7a096e0ac3471d3cf5d755dd314247190a1f747083922bd7683a392e5b7b898f39b8be5a659eb2a652bb85c4871cbca0954b69860e3e2756b6838f38f86141a7cbb1c2c834baa3a38cd279f880835f32879d503993428a3ef87843c1b2b3e6d25cb2697cb8c14f9b2fdf694f0de1a30ddeba9f98b7f213222638d6d8e0ca5892bf2fad6f92f226848f3538b2e131bb7d85ca68aac1cf50afb1de744a1afce0a5347cf6cef02dd1e02741893d3255ea2d6fcfe06a12d7577217aea46a06e7547ab7ab3229da7567f828831b8370ffcc61c1c10719fc7f91df4a49306144e7051f6370c62429a8bf9283e764f91083179aae9498b29669e1c247189cef2487f4d19a938efd07183c4f1e4b264bf132968371f8f8c287175cefd4ec6e0da6550eb8e1a30b9cb418272e38ea2dbcf2956492e67a0b9e6675bf242bac054fc99be24688daf759b2e0a59424f94bca549b073fb0e0092227fb7cd07f5cc14b3a093b7df7224de93eace0657977aabb4f4a663faae0a6e497bee76bf39d0e203ea8e0ba2813ae930a16de3f1e3ea6e067253179104a8dd0e1a40c1f52f05693eea0927b7638930090e1230a7e124a3aababd31a3ea0e08f8e4950a93d5449fb9fe089afd9f01ee44795886af8708297b46bcae4a2d7347c34c1199334df876d18e526267851a4e6924cda2cc1cfa153ce5a56417fe7d3f0ed735f76a694d2824509aec9b2aa129fe4f7124c82bfd173bc9c1494ecd943829b4a26d1d446578f123b829f5cfb4cce18c14d51353d6ab24b9fae3b317c14215957cfd1e4a0d23a7aec618c61f8d79acd544ef2765d1843189eab9b4962c8f8a44c29186eee93fc5db47d6f1a18aef6b88997d459a7a15f3822fc2e0977196ed3892fbc4afab6c6af62ded3bdf0ed64cd9e6154e62ef1c29199b34d8c7225279b77e169a99049ca319f42b3baf0ca8216916977d25632178e8cf6ff397887ab18c4859fc65c4b63da6ee1a75cc1bf3ea69613ab0cc402335210e201b38573339ad32afcdcdc7486316ae1473fe12da9fbf9ab092ddc14a1bcbf676dd326b1c218b3f0ccebb2879eef9853c3853164e1c9336fb6b10493c4fe6251a6b459f3c8060b37c9d5264db43a1edce84116c678853ff225c9418f27b1de76852383b074aa495a775863b4c2ef94426953a28415be6576d3a92fe7fbba57e1fcc69c3d46735b8caa2abcf2ba20c59494100f84e4302347421f98c318a970fc4b8cee2c17156e0af1bf13eab5bf621b366ed81ac63885d73998f06ec289a6968c610a3f8427a541c7602a87394629dc382997545e496390c26f194d16b49cf8f56514feff95e5bae06b92b8e007a8046388a28bdd3116c6b3b44eee33eed48f565b7f804a30863142e19f7f78c969adc600859b3a93a6c8de123ee9136ede3a9d2d494f954f9e702b5dd2ee2525a9d65fde20743079143208d7bc664eb6f12c2934c4830707b4478820bc241a52b3d2a970312924108e5853255b9b88af753a0801842bdb569a95e1b72af607573ce5ac4566d60f4108e903229b103e14930efe623a5b65101142f670217af046ed4bfcfde6380fae9f70632aa36892ee5e081e2ee40eb89adc659a8ecf983c7532b93c1c6b1962072f7410cd1a3dc70621757045093f29f58df84c2f840e6ee93225beaa846c97819039b841cc6387d614775b9283e7fdb1c47b13073fe8a4ae9d55a632cb70703c89362a991c0a7983db62f549b67f0a9d5337785292848e17150bcf671b3cf541491354de32f98f0df9fc474d529235683fade152f944c6104ac757480d85d7c9b9e4f24d2a2b869034b8d95b2667acaac8592168d8b292ba95a5d7ca6a6227badc8832adb5d96629849cc155add2544f9331055d88193ca1a5ccd365cbb9b7331e2165f072f41cc38bd081410819dccaaa26dea43c064fbb6913a36752ed139e1122064759a6de58f299a76f43c2e05da6ec260f42c0e088e8f0532ac542bee09cb0d52aaa15ab6ca4215e303c68eda0426751e2d81f8474217bcd639704c71a32810ffe048c06085810e286102e1cd755d6a97121227ee2ee2c14b285668327cd9a162d781617935f095a5469cddeb01d2159f0d4e4d8fc29b7b66f88633f040baec69769cfe104f353052157707390354a1eb1b9b7dc8e1e676020c40a6623072155f0c3eebc52da890a7e7609cb67494db40c9a827397fc93a9d50c6a355270e304f1d5274933db7714fcbff093c28d160afeb8e78b7556266b2a4f70940cba35492ad8a993e2042f877b8ef38e1f2b65131c33416de62b77fa2699e067a6cd256ce7aafa7109de565c2e95849786331f63d75cc792699b125c932f971cfbdfc4d61592043fa975d159b935cff748f0ca2e5cf4ab20af261572044f1294e8963f4c428ce0274b3f49f44aaac269438ae08928f5281aedf2a5576318ae299d820eedb030fce021a47889f5bed9e31ac170e4337f12d4f698ce39a506309c73f7d3fc15c524d4f88527c8e0294a8ad0da3d1e146af8221d835a12c497985783357ae16fe512343d798aa95583179e58963309176376e1c5562531a627d97febd5a0862e8e13934c1a935afcb9f042a3896ad255ceee142edc4ea33953a74a11678d5b38dff26a5faaac610bd4d252d53cc6523ebe374929949c6d3ded1ab570c3c8eed3b1e7844c0d4ea1062d3ca9d15a4349f206dd658d5938a624e92d9af4b11ab2704eec6cb484881aa51ab170649bb8943c5f98a49260e15772ed93b47798bba4573863154b9556932bbc24956ce752526c38532bfc7cd299c99a84f6a4c40ad734a891ebd09bdeb40ae7e2db7365af0d995785b31ee33dc4b69ab053e18ce7f6b97ca5aa2e0815cea893abfb4cc807119dc22ff93ef7669329fc1c7397f624052b53520a7f54ca1a27a6a706293c19a3ab9f761ed3f2d41885a7740ee5e15ab49fa786281c193c574a3108efced4088517a3fd491b99eddaad018af2e7203376ed27fc8d226318b5fa32a527fcb693a4d0e17527bcf2fc229360c209bf242d6ae9275594ba093705eb92c474a5de649af083d8f632a994269d3299703526d9efa231891613a9f956369d82bec6257c19a1b93c98df75f66b58c2f5eb1c224bf4876abf46253c95440bca934f094f95e7d3d698fb363a09c7be93e8fca731d32809379d4cf25dcc45a20e9dda3d4dea21e169cca8a8f03cc2111393a798fc7365ba1de19ca7cf5482fd67efba117ee73221c6ce92682746389f744aed5db2b7a88bf093d860490a4a9570a222dc54229b7e6249878c13e16dc93ab13144bb578c082fcfc77d921683aae8107e8e133c542d9ab8134378995e72d0496558490be1e852d26494bb95a0238457ab3177855857ea06e16679eb7bd904e16d50cb59b172095f6d205c53965aeb3f80c0e7939c248bf9836f7762cc24d39431fbc13b25b37cdeb4f421e57f62e1839772694fa3376992abb2072f43ac9f9f1c353c470f8e7b2ee93f4919c476340f8e1aede459d4a92e59c5835f6249a89693b47cab77702d78fea4c4143bf81b5e9b654cf28c8bd6c1ab7abff9381b659bd2c1eb542fca34837a14cdc153cf92e54b64cc3629074fd2317cd05faf99c4320e6e8cb4ac3f1d38b8296d3b3da98ff6e70d7e494274f5c9bbc1db204aac147c94acba0d8e18d3a933792a2b61367892ce29f8895057425d83b7224e9368a11a1c55e7a2d4ba3478f2894dea3e06254e0cd1e06fbfe958159ec113b495aaea246706cffdac6c947c19bc4ad34eaa6458fafec8e069b32498649e9450d21b83ffa56e179e93925c3b31f8395beaa06310174d1406573b780795b764d3ad81c115352526a95436576b5f70546b589149b5fb52cc0bfe59b68fcd5092d6a02e6c822e496df411173c314e35a75611a2926cc193d3adc499242995365af0c48ef92cf839b7fac9b294b1bfaa81052f5d4cc9546318c75a8e336c7301b6a8710547c64411954ffb75f975b3a861054f94a7f74b22c4b1d606031e3d921939cef81bbb8053d4a882bf1ed38926a54d810119093a860a80440d2a787974cb6c545b11969f829b71b24d366d1fbd53ac420d29382a664a26e524cd53d328f84135cba938eb8a4d0205df359c4a9935f304578430355127f59cdd56a8e104bf929c6d9427ab09bee934234657123e1e66420d2678c9724aef27d59ce85766a1c612bcb70f9d4f32318da5a620a1461a8fb8f67c2895378538d636063594e0cd6b8d099e3ac69c1a4970649204316e33e2d81a4870d475081d63eb1a47f054f455b86d92e43219c1134e565dd0a31a45f036b5d34a5e0f2b771a86a366ccb744cdd5410609c3cfdeb85ce2130cbf5492564b101f6078176335a7b9f42ffc5297d926dfa619d5f185efb5694e79280f0d26bdf03b668b6d397607f131bcf037e8fb8e4136bb7073a89cff4be7a03b47175e9c45f1de2498a532177edeb0a135658d0b3fc368ebe499bd85a745aaa5c5a5a5ad8b2d1cd5996d32f7c90aa3ac85a39efb2dafe834b19316aea651525a979d7432b370d409cb7565f2f5f6b2d867f409c2843b167e34613d97ca1883c51016bafc976726e92b9cd336a16e35bc24a132de8c0402882bbca03fa724e6f094a40ba202482bfc543a989884509926a78c0920ac784dd9cdc917c82a3cb198045f93257231d33a5880a8a2aa9439f9d779123ba9382bcf5dcca620372abc284a926278ebade0f7297c4ff2765e9a63b978a67074f0527a46e9ecac1a8eb552b8499019cb394eac7c6a258090c2919a934af2d9aea6d31f808c42cf529220bba445e19c50cb12735072d2aa79c0033f304307395af08394630782c1091460089050b8266809af29568fc98f63cd860db4338080c20d774a7fa853326f7caa00f289f5c47622198413209b607e2ec890a24fb461a36f478f3304800a104dfc692b9306158eb53274ec48366c9041c67b200b29403251b6e94c3741e32a83897ed6e6f35fb9c95fc274a59498ee4c92f2692d913693e6e4645d9f5209ad2fb99a0e3a96ec9c2fcbf13b5c0008104a9c4f48ef52f94b290f90495c96b3267d722294dc0a40247158caca97d96d8938b1a38538a951048984df9e1a640a52030209e7ab2ea9ac8658007984339629be42a6d15ab205c411ce89164e9ae9d4f0556a845f16d3090b1931c23769fdf55bf6156db308ef3af3a56c49d252a65684f76325e61c3e9d08c7f2a64a7acd92c8204684d77f3526671f1428b3809505f0eac698c01866a3141b1681a4070f34761420003f301638880100e8c1030d1610200048078f860ecc78321020001e3b12700200000000000040031c903c181080360080070f63830002f81e7f6c142000dfe38f8e1d360e0080014020013bd00e92efc1e3c60d070020000f3800e6e8b103070f3200d103f1f883a7b35cda54a1254941ea074f76b0f8d9e83d26667df08210dda333a5b78cf0c18bbb3ba9e424d9270fe3582b65a03274640f88073aca2803957180113d608e1e3cc871f2f08333f080397a1c1d65a071e3460246eee0ff5c29250899c3b17c86c6591968c70d1ee438374e8e049dc1a3ad871939908719397aec4064e438a30c1dc90a46ec80376e2060a40e6794919481ca30c0081d30470f3270f0f81b37123032077d4e58ae0e79e5e06ccc966eb3d6913878b29da04987abc820038d54e62370703b5b8926e7134db4a037781e266cae5c991137f82656bac7a49949187ba40dfea6888ea5a249c86e47d8802cb7bcc14bd2c81a8e5a23ae327047d4e02555f297bab524530c1a3c6c03236948d014ad53e3743682066fbb2f79d0a0ef64f8f5032367f0525ceebe4ed9cb548f636d2f306206ef55c3e23a2ea6fe348fc2c3032365f0b235437848cb4c2ae9841132944e7b3e694aaabf6a640c411811038e84c16ce81801038e7c0147bcc06c46ba80235c68d3e53b2cb5ddd59855a6ec4a82ac93c4890ec71adac1bbe08c912d782ff7266282aa05cf7a849674266ebd732359f0c4746ad0a7a247ac8605ff4657261393365c5d2357f05cfc820a3a4647acd0d5d869bda554d1b2adb24997be3ed748155cb1fe7a732f51c151d154614912b5644a6d8491297c9d3d44c9999d4fa4c09dd858c77c5a9c9dba944e6a240aae9647cd3146050a8e52a7f306dbeebf1c3ec193cb1c3dbd5a57ca718233eaacb53bc42fd5db044705cbd94c32d99b3f650242e5ec9f434339b2046fee248deea6bbc1481abe5d7d7609db297c14e130a204cf746534fd75d66b9f91243826c5e612ba0499c7d711243826dc9755b83872042fbf9aba4a52beec9f1b3182e7d6eaabb9c5926807478ae049721af5be1e2c839a458681c2c860f8ee276d4d8a689b8ac0f8857f99b67bbdf201324375b0039d006f07ea91c3178da5abaf9b8bfbac54963d6374b5a42f498af4c2ef7c16d761ae1e6de585ff9bc3ffc489fea9c3bb703c98ca77a5492c10d185f7a2f557a6745c524a72e1c6972c4f92d4ff1b1a2e1cb1a426e749a56eb1b6f03d861293d128b23a5b0b63ab57543b51a48523ccc6848e258df8ed66e19aece1572dd876c84c16d596b859ccd59d875ac86aac0f3317b3665c9f51468222b1705318a193dda5d461926c1081c589bcc273f1e0171e6acb6458c415d78a1361c5ad025521920abf3a8926e9d2a2c2f9326131575092880f9de2444c514ae1891ec5b49494e5d16344486136467122a2f0dca4f9594b9d434dec1222a170eeda63d558470414b956675d5da98b55dcc58ecd944294f040e4137e6837ad14cae4094f8e9afed7a429d209ef73526ac4278f08277c0daa4426b56a726e130144426413ce68b4f429d4ff29318926bcffa825894958d29e2a2299f0d53d26e5625a4c38dba33ecbbd0625d60ed423c7a52172093757ac7faf882de1af7528d9fb531d2295f04e1263ccb499bfb3654a38ea7fb3f5077712cea8c7a80da619b37f24e1c628316dd53466cdfd8e924422e1e5cf1af449efab588220a198669273346123f2087fd4eabc334511174d8ef0c263ee7c625552926b9146784273bf7f0a97968497117e1ea1c4093626a9fa908d165984a222fc79d14a7e29633a2713e19c1232b52631846dd2df21c24f231e9fe7447ab6cbdd219cada4ea4dbf7a6708ef4b8ed99238b710ce79ce575a25c3a8247f4708dfdc04bb3341733708dfe45ce1bf32a396ecde8908c279f3981593e8196d0a84a773c858c272a628a0c18d158800c26cc840e40f5f5d7a0aa33378227ef02a690e1dd3db81481fbcb0399f7c9d63dbd927c2074fe6a0739a5ce9e63e06f7e0c6e8a54b32ff51440f66e344f2e089a6e65cdaca3f2e8c638d470e3d3c38a3477a75bf8f2a4f9ec81d3cfdd7bd72ae881dfc55cd49a78b8cef31c113a983e396ec73e5e0be5d693c113a58b6766a39e334e6bd535082ce492c3207b75249f27627084f66de89c8c1df10dbe43e423488c4c1334925d1d2213e9f5b441138b8c13c2c7f524a30cbf937b832dae3b73fe90f1d22e38c3272f06017718327a894fa4dc8b5e38c2381481bf0d0b2682f372e1b1fe7c640840d9e49ba4663ae18f3d1640d9e8e661f43079d1abc12176c5465280dcea5b510ea27a6f2b9d1e0da98baa4677426a5b367f04db0f69c3d9beca44d3378a54ac7dc5c922acc2852063f9d8a91ae41e54e840cde6fd0d817f6a9fe9268888ca194e7e647dca6c5802261f0c3fa4f7f776a44c0e0da6b124df0e0f9821f84ff28ed1e2c27f18a78c1fffc314931e3f29c94d205376d28595c94bc696352840b6dbcc875857d67d85968225bf03e0923c63e5c98f24a440bce69c624e98f75b97354240bde5c677efb64b162c2388860c1cfa4d29625133c2ce65cc11b1d3de4ae7e2b78a9566387f6dc4949b10a5eda7a964a57279e3a112af89ac4565b26494e5a4e44a6e09fdc08db7cf29d8814fc1c63a6a376e9f858120547f9d69694363d1128b87e974b8e0b13b4fa2e66883cc1f1e097c5fab36b947837ec7870224e7054d2ca36ab8ccbcc399126785abe6f35fcf4faaa66e428c18930c1fbdc1fd7596d3c9125787139c7aaf0c15d3da682481a8e925d42f75889abcfa8031125f8e997bb44395bf90f169124f8a7cadd2e954604096ed7e6afd4ec27854a8a1cc133612a626db3c9304a96206204b7840a139476f12ba51529825faadd3d43936ca3510c1986274bde60dad714228c0c86d90086d9c01b84fcc26cec20c41766c38c905ef871c1b39a9a92bb537a8d7208e18517976427e194a0c2ac786408d9856361a207cd18ef563a3084e8c297f99c73c838132ade40482ecc062edcaecb8a3b215ea283ce8d63c697a16347e2b35bf855258ab79b106fd374b185b75d97aaefcdc4905a78fd665f9ece3708ad68e1e7ce96549b204d45ed67e1e5faa8d96252c951ab0e84c82224165e79b6a9d431f66c92c2c24b258931fea285f50dee2b9cedad5ca592f8b7fb425ce176e53bb53f93ec3b15d20a4e561373acb63c261c8bacf0c3924a259fc5ecb0cc2afcbf58e5eadaabc28f9e93f2183e93688553e1052d4265f42851e1a9bf7d095b5f1f4df7146a65134484923453f89e325c9f7621a5f04c5d5b7c168d145c2a8fafe066c20a198527a63abb8d4ee6561d46278488c2b55c41869728cd3a49a17054ac650e93724c1a43a0f063844713f2999459cc2736694dd46d5838c4139e86133227f1944e38d2653429b73984137e30952b99124c4999cb904d3817ec52eb5dd49d52856802f1b220a79666ecd3e6347a674ad7ee92d33a24138ede4a25d3c3c7e5394330e169cb49dcbc09254e8ca6117209bf4dfe31c1336b644b2010422ae155fee8aa6a62fae8f80619680725fc607723b5efa44d2dedc8914c032193f0bf62c9e6717d1e2b8f634d0737ccc881a3478e1e4842334b692cbbc96a5b5ab9a79989cf7539e57a98a16347ba71d046c2114ff2ffa84eeadec8811c0881846bd1a3bae6ec0b61bad39047f855ff224598609bfae4f164dce891838c33788438c26b8d4fdebea2be01010b42421ae146abadca781393a91fc2084f3b9a3ce22e54abe82cc291725f9b42e7ef4214e16f2879ab11a5e562851792086e4e74cf10b2c7b176e3073b7a98f18620c2ebfe98d76e2d0625ad9043783ac6128d696f88213ca56e93d41c5eb17336a4106e8bdc86932f2e688410c2038b41a824ae6582d25820188a43027128100a2cdc1e00031408001834268d0482e17014a6d2f6011480044d2212362e2420261416141a1818180c0642a16028140683826150180c0a0442a1f0759c237b013bd977dee17e4116ba8898ceee02846ec6cfb5912d1c5793b6e8b2e934af00b9d57830d9b6bc2eee70fa4906a4170f9321dc06fc002dee56d2c3c83b83c481b5fdc4ddb1bd978205f1357a0a6e3695266b7b4ae15595c79955ab534bd1f800b81046f8b726249377978efed9e71478c071abfedb9de803c542d383f7f74cee3c3de4d65506971860aff08484f3254eee1fa4c751ca2b4ba48dd8d02c0cade7905209e9546419c6193fa3a944bb6bc0a47e3871ed4f12229561893b939ce6eb22d141093556b3fe772625f249934a0e6cdc8300b84d96f86323305118c1c0b7f4a13145523dd1b9340ad03e9e593c0d8a3c9fc7c7900f9dbe79d689374de94fcb529678d5c7235b16c40920dbc075b5142448f630004e8806fc62b0e4a90764a13c9d4fdc60356c5ff0444957eea424332eb56faaf8518cba5f4e4eb06f13a0c88ac47ebcf7df3d2713d73721b92883205cc62545c4303ee26b5e89f74a5a664f1e98e390a9e92ddd04438ef4abb557aceeab9968fad88dc0484dcdec6ba6a0c0f71cf8db5424bfaff1c078211f88b812f5a5446cf68a2f01b6245a2141bb01b4b1fe1a15582527620c2d366cb3683bbc3bb4cd4a4b5c8a737eab65b9c26fc13183737aab25f7c04f4863b5c499262ae488aff9d7f2d21acdb27e23869fc5fa627068f293e81b5b1c022a163efe7d558c65fb727fadc93ece3024312239907f0790224cc51d3ccee47a463793cde24c9e111e1df5e4b25bc4cf0b73ebe08101da595e5d5454f8178c8fd92f6be94a19247484f39f3835c03009d7e6c98b97c64aeb3d8472d0b75d632f4b6ae6901943e89dba02879e7f732f33710412ffb045ff8342ba51c5492570e259fe60cb8b7a0ea3750eb8a89fb09e574a8a9b7ce4c762530e42c5ce9e1a25a774d4e681a9d413c4de1e8941f49706380a372c342849008e39ec05a517cac6ea61daa47b45c0e2e326b0eefeab565e263bb5f6b776e21eea2fc793eef39a7dd3d80ad4b3d5eb8573a852d0e129088de7c08809c9c04304a72394961a99474f08e525a211c748aa3608d3c0d965217a2536c25dac75d50e0d016fb150971b050246442579cedffd17dded06467ebabe5a4d8306fd21a82bc9f50a6f5a44c4dc7341d3f0cad7917ea9ad1f101feca68b96c27b4314118aa5c1c0bcbeb0aa24563a44d368af25d106bb06c91f917c477be39016d95e1be895fbc8c8e4bc636bd0bc58abad37c4d27384523b4f5b43c7c59277987567caf9c34d28bbe60f60772b23954ae29ca4ca57c8bf9ee29a9a752d1191bbcb297bbcf3becd7ffa320efa6b8b4ba1ecf196ec087bbc11b1d3d2c6ad78af8e69b5b356cac400f15aca9c35a06661e47aaed0e72c90141301602f45fe1144dea918863b057e1518dc0e15b2a06922c522f6af434d081662a09786eab66909a0c04449ccb3d08460a693de38a725fdc7c82ea3c6aa07fcd397fbc3c32301dd0d74897f462e97e8ae808e3ba888b6f1c6a3a1c8db5a6f4ec4de343bc08c24585a7da30597d70717adb705767e6abb507890d6aee044138275183941b6eab1373a9bc6323b3d428c95e6121318bf63e9c1c49c7b432cd623012f4ad77a867cb5cc4b20a5da4cde299dc93194dbaa0101e5c239e0f3265ac74c8395d641710be3fdc4dbae8e84ee64e34861aa52a0eb4e3cd6a6098c8b9ad61b41a15d799564ec809ad5d337fc522cf78a7e4feab2c6a102c1d11a029a60341a274984a0118b4e7278d3f2becc0e6faf3284e13a2c24e9522a19ac521532eec5f5c9aa2006555079764b874b50f1bfe5401ad542ada8d82d026bd3010054995b09b1f2d727a78b59e1351f23ae9e50d694f103c50bb1fd8bb59375a854e601d4b35b6720399c29b7cc4253d5cdd2aae9878376724cad20b7d886410e81665ac80bb22c82ef182358a4b066c4086bf30253fcba87526e0453d908b85a3302b12706bafce02a963f09701268c195bb70dd9afc2dbe0b4d17ff0ed1c1d6ee35413d26bd45b3ae3446e85ed8a7d55b16596433231491e6fbc8b83daf55599359c8cd76dbe5cdec4a0c20990c16df8112b9a813f310e21d525567e82a134693ac5ce24f3dda1e15cd59d3c0e0632d5b82df283cad60af80786f2a3b65ffe04b6a0818ef667524f444ca5b0b6b8916248708483a38c09ecbc322eda60d6326bdad9372e2b7d6aa2b9e0ebfdbb2b5ca90e340d201693ce3bb9a0507c7daa96821a655afeab3a0a00513acb424b3828bc20983a40fa2c84d96664c2f9fb8f94de1a2810f0e35a56b5b724a80e8df9b5c38b0f27c7bd6f0f5972e6f7b3a4179ca444422b6e25b8941bdddc951ccf51fa1d242d494e375204f77d50c65cc44d4e7a00c5dadc0dc428e561d7733e2f8737ecb1ab864aff07e55ca80c7c44cebfb1548b440bcc6004cdbb8308d9b17d41305c5a569f1ebab1d6dc96e2778a7b1e0562f88deeac8688fc66953f759526850853a2995d96b825739bd374bc3a36a1901462ebb9f4e5c417a3d2dbf410f8ecc8eb5aa2966aa2f09ea153b1a0675b5ec822813bcb3ce5a4abfe0c5262bc3eb3c481afa3efad45699651ba2614ec77fd7bef6dd0d0944f8599e9df295b5f9e27c2f10113c8b71213f78bb8e37e02ab1138580a4088f2a208f63de936a65f9ffdccc4c466acff12ee716a10690113ecfe226770effa95677ad257e9b6e6bb4e69cc961133a6d5f06b368998468b4b5e8f446801e8944210589c020a5b25899a078abe3d364f970a1bd868b416fe2d02ef91d58ba3b509265c39e2e23b68c539c34ba8aa5423ecb1424f68307036f9bd8bee3e6262400cec4f40c918013702a76fac06294ec0e0107dd41fdba4c36a87a2d18cdf196b914604bb694e94e486a65d673c34b75b9176da71047b3426c1bd9979ea9029a25360aff8e89208ab2e29ac5dd74e4d2f499eb5d0ca45e2c7400352877e6bc57cbf89a9a314716bdbd23543afff4f45faaa5d5f2c27595248d05d3feb8b10b6462a192886321074b49b67e58e0fa29972e4b225a95627f0bacf65014058c9d4a8e21711f975e2887da63da874df70ea8595262d96d8630908e2432f60d8c621d6af8cdbb28a91e282a422886d3e4a7085af0ede094131a85f59168a26828ea08d8d4be7e39d9ef84d1545d8810da19a9a26e06384432ed0c6c7c322f39a33e5fb49342c5ee679283d99bf41ec9de908d665d29f87c9a93a0f7a8fb90f10d19c9c44ed978f07b50f49154ca0c2a85ba6ce2f31cbe26f2454db300cbc99a18b3831c0c3e2506f62b3bad0afbba0de61591b38e1fd9dab642ab632c222a130ecb33b543ae9405a8f5e570da7338b9dff40a29321afff8ff74966115216104e80eae833cc9bb31173d13ac1108f9cf125af7929362685f39974017a90a3708bc19035c2bc657b779e06f33895035c2e211f3b77f15a88a0be7752105ef2415ec95ae429fb3ccd6c448fceea02f833cf6ed5084550a026bc2acfdb907fe20335fd2e23d1b771171a4e4ae723d4c10be06d8c45c0759a0f2f05f72669aa668d83f56fc380737629dfd47c76782230e9a130810a45397e34506bfafc59f7d1c9e153ad615aada186eb50198cc2f9ced546674c34f853228181db2f1f804c61db4ce547cabc9367b1e1fce6c940df630c2fbf7958deea405dfa1b1448b93c419b4c315043c183462f651034c68488b6300d46aee8a57902bccd09f124144712f1a3ea11714b70b9b0a8321b21349c00427d4f9be67279c05c6446a6016c05db2129012c6a319d1b87c82b597d2777a25d4ebaf546e2d883a3663b367e89a8ae3fe5435b4615a466438e7d126aeb268494d480093e7e05fd7bb71da26c7c51556dceed88cb8cbe150480bfb107351bf6b3e3e71b5222b2108c7b41c936032097d73ee4aee705cea528290e617b3c3af14d6fe647a264eac3c089bc8846d70199d455cb20dc57247861236a324f0323659e36e48587df280d4297c80ea9a04a26be252034c26b7a5de16e25aa9a2124b6b738c69822fd8953ebf84869810f9c4b2dcfd1e492c51d5ffc4928d51c273114013f840f43b3057f6bb6a7323f42a455e89299992a54676d980de6051d57b2b0f8ff00f05981488107e40aa43209ac5ae69a39571572098dfb1ff8a86b3372dc762350d75a1500a7023b29841deb2411b9db3a2a7c3c84d52f92bb2a5023b42b7e9b76e3f85fd2b18493ad4b8d8b1a8f78d09cd170c67b8b6ec88abe92ef0393883673d8e98b3fc3569fad858606bb930667d69dc99f07ae93fbb005403046f4c825713b9cbf047709d5167b8caa4a2703e22a8057202326c40955a5b9a6afded762814cd7a541a6dd7ac46a95ea0d2bc04d2dd68e3b774579c8d764745512b699b84e747f9c0a283675e38b4f52f100b6872729b2822ed3cca21d807dd8a2d6052a6ac10f3402a04f8014b57caf353214a7ff9c5c9444bb60b59acba61176d167a40e4eed5776b5064b85841bf0a4bec4c40765d457f284a770ba36b55f37e518937f3e23c048b333859000734dba9776698356a7ad5d65aa2747d04fd505ccdf0b26501389aa59b77adfc9b5cf41c22828df7c1dfb61d26f61a7721f973f10a5bfcabaa775f391bf82cfd1c2ec21aaa3f98d0a58e1e887f6f2378697c8387c9393d821e2b2d06cd7987a6830c88a052219f98cc0851cc27a02c827aa777045629d1e19babc7e1c9ec6ecbb401ccf41b7783560e7b5be467160dff8a2a2d2a9e55118a8e5c92d357e896d5df3e1991f6ab01e68292fca17680dd090cd544ac3e1777b56bb7422a534538897441fb5e3e65134e60c19dae28a98a12291a2dd0e91e0336c567f036bd321b677dc35677756c834f68504966646df6349871e4fdadefff4e1f059952cb80abde39c5482950c1652fcf0ad78f306d48740a78108f659c045274877e984375585ce4c95fe1c7a0a4141b9e4348ba88f162045f96ad7c8acf339bca55ad03dd517d569b14cb042d7b8ba7482acf6ac51f478d46b341df9ab0053ab78a74817956ef8a741feec35ae07c470776d57a4ab1e836f399450a53d549d1e65b298560793fea1443183501d75582d45c90a3181569c857194c42b2495a6e1fd611b0feba2e768b6d0ab1495ec8f39c4ca42904209e44a07d40dfee04ad448db4006c5b94faee13553d185df5221bf82e3fbd89013e9038ba7b2c960390a76d935e0fd46a87530f574e4ed01eb24f6b804adf3d541901ae74af3514a3de6422a0a848b8045655f6d33156a0aa7f0bff95eab9cf17ad56201290a5862fb86c6092a1a3cb4598d934575bcd7a0c11d82f7b9f3c18808ddd07a8bd29aae596cc74eafef37b198d087b6ed00113f50c006094d87661f6b6ff4194f9a365c21460dcfd1955471d0b64486274eee10964d6fadf11d104ff16db068ee5983e7110a042f23b2853814b7ef0b83b52cb6f997c9cd4374cf7474e5441d0c8f750a55b1b21107582a6ad0a2762309bebc882151882d367d85d17265e3038d9eeaba7776c4079028e3367b27ed7ebfd62bc35e62e040259ed89fdc3c087f963d6e29b3c57d80ee2a3e2722ac938fe8c6c77cdcd6bd116e17d728e87083963c5731ae91de7d128de4ec9b751d1466c02a867ac4e9c914ad04d0d01be9a1103ab9c00261c052356e81eb97f42301064502c50b48bf4ac32cc2861d18f13e09b00889a0e44f9258a072e950bd380b88782840a8b5234d5ccd4e74dfce72948ef0501fe347001154880cb7abb9ecffeac24a90c4da9f87eba5ddbbf3e7de77ad1e98f74fc9978fa4da072004a86d5d03440901e392dc235a6208ea12bfc559d1915a9cfa07c1a2e4d4815046a67144b9e6bf20fac496d9a44d6e981546d3614a956d686790d6d1c53e1dd4d12df2ee2638f6dc65b983b39f731136e949d43b0662fc9106e2b9e28e1c92461102aaab8a68c4e8dddfb83599803537636fb88830f8afe7ac070e72f6091e6ceff24958ed68d14baebe77d772b4e783f49212c67f36cfc8a139336b3b4c7476b8e70cfeca9ee8918c639d2052163a9229ce971711bf3356a9a840b689a9cfbeddf399298d16fd286594d0ea38fdfe8623382450aeb05fdc87604be970b9263527cd870e4f949c1be6513162095d4549b561e32b2a023d745f04530a6de46849d3cace3f69b724ca2100e317f8029fcebf088484a2804911317762d52f50365f0d5e61183133081ca0833e5ba8ed4602bb843bd494c30887e7823aa10730a6b877bf4b1b34bb9bf29daff39765befaa78443d45406a44775c87cedbd653c835b55852df95ea33d2f1317b730424680cb5b82bb4856fd92fb04535ef835d81a2237a28b6b1969e00129ae1bf76278ba58dce33e4a06ea7428d422154b09475764716ad5c402bcc75c049a437ad5d907b5c99dc0d74fa428acbadb1125cfa78368f115ca76bd7c278da14dac10c55baa4bd94ea7b610335c17d5cf2ed9559021ea9f13adb900ca43a5b9863fd330a8d2f74b1e0dbcd1c9b1c22ef9cdd585c8e6b5562a95d5927c009c7d25945189284f06ae58ef2ebc1728f5e718833b24621b65a26b9d3e0f415cd78ade3a558c40641539d592f9c7fb81de672fab05ae47311e68c70487ef352f654e43be87ca9a508440b5d29c64b299236b6b874c53b6db50792f5b150a65ec599e4365bed0143ae4def1d7ab94aad2392e4b3197c3b88b714cfc16fef25d356337dbfc68cceaf48e0d09b8c4286c88594aee5e26ed40f33dd9cba7142d60c422538bc279bd6ed595293a339742bc31fc02cf25a3e7cfe24b6d3ff5d8c061a81487508b8f8df7bae153ba355395a3b675d992eedfce65a48de3b931d655f4bb0c2d47d2565be9c83db159fcf9b595f5dc25d32129d35bc11b8e02735e8ee8f83281b9bb2b924d98203cec4ad496904a0e7722b0476b3d2380f99b7e9874934468fd023443842cf053fc1952218a4c7d187a29ff1828c3087cabb4db22ee549cffac7b9e715490e5b3f80a5acb32ee4439a8d4e027294134c1aeed4270f764b4295079e645ec1cca1845230a5091f360dceb35376de3788e2e95401342effbec69eb685cca9bbaf0694fdb13061c007edd14b9c8b9b88704a1b9afa41b7eca47e5ad7535437439e64890f4b32289abba684584800440c420271dd92111f89cb2dad53df522229e18b422455fcb6535f1de17d77b65b1bc5c2faea194595a9a702103cf0c2503dd967a47f20d557e98813fc5dfe8736dccf65bb032408a30cbe42958457b8039541c7f8ac0aa9648620ba29c8214b30552f3f70b695105b4f0031f0d8ace482118813f259213d212010dc99f7bf2cc0fe01cc9a244cdb7eb281032a045b74b1e2624a3396345a9422389a5ad30597e2a3bda8dc92eec34aa43d6b4d5b6482f7c6f0a39199eb5085dbd0878f6cb7048b682a5de6a72477b7d246cb1e65ed04d9c32c1616cf01b5b0b4901c5c896901aa5822f16e0d8e93c9168375e62ca529a1732ef82391a09d75c547180004fa200564b4a9e094dd976eace41e6f574a39c70530c8231c898b41ea8f721655529b162793a4f6e300262bceb9c43324bc80d814c8cad726ee023ebf282517f02f32c8289edc49511ae1858b7ef84abb214ba4c3e3ba79cc482228267324c9a052b2d503406737b58aa8724b4b90761c883b89591b31a83af023b580f75466cfc349e929a820e24229c599c829ded27302d5040a365e15985fbe35eca6a22204843b7ae9971eb3b431a715f4b8f13cf610dc5443258692277c082d5528917cd7c2225edc2958a442eb0ac94f803e53ecdec10595411ffd0939e066b66d34b2893efd03924eca4de8e1df2b586f7488588e7cde4f748033a2694fea20ed04d292a8b4e6b0edd3aabe03951a22104a0b0c2566b6b895ce710895983dd98949f890881f96dba48f1ed934e63ac7ea2296183913c9d911499df76cf218d731bdb530e16ed5fedc8fc7f3a1a75c47a39aac3a3bdcc5d14a2e2679fa63142f3da2aa2338817870ce50adfea935b9a455a7b5d94ca34165e2a72f58a3d42c62e3b46483ea64e2b9071aafb555f8800bc24cfc5e4b986a3b787e57287e4e59959c2fdd1811a8eb0413b60fe1d912d557125ce67a978eaa1ae2434923947f61310989f1f8bc94aae4448bbbf7272e3f256e7fd8fc65431bd70731c5a8a0368aa57332b2f879c7f227b2919f97492f999d2710464a2db5244cce58fb6ac499410178073052b7ba596a71afaaf0d1909a2c21150f7c274be256bab3ade5c3cb05eda2cd9f5072d0c0ee7948c9d17dfe8ad36b34d42435e081e03981fdc1fc4ce29d705e09725212cf2ad30b764dd982fd34d433cb622076bcbe52a5c133ff443867a3a2fe82e5bbb6b3a68777091b3376e8749228602abb599a230df5e0989c377981dbb1614292354322b0fe185178add5a62b84134ad0f009fab6d435b868128c5d0402e5e8e27dddf6e720e9b422005d962dd36951f29eb987b1314e0244cf6ecb48abfd6e690378768215adfa5414e10c99787c04daf0fb683f823009594148391b63f4c67f38d269f14dabd278bec22a024ca3964b6ef5d81b51eb029d8ffcf5f954f47e25eab680aac43e2d10f5db67706bcb6b056a95cf629d17306c4a866beae02d90d01214308ca8fd28e4d44f6e379893846a214f0fde2822896550da03fa1269073a0cfbba498c3f2983cebaab9e20d4b23d505078d306fc458eed3bacea0cf7024e49f0e892a37526076c0b3a1e348d8e05a3eb047c4b572a7d2e423382f09d7331f60ddfaf9292c564b9aaccc469ce3afa3d392382437e40f143a23803607498fcf4ee0d2ce630b19d4ebf15a7a594f45048f5274816cbeba0aba043429c155a9bc254f08bac7547a8c4cfa013b369301df73972649c68c2328bc4e72e77a326aef8e6a69ce74f8855a49f47571309220e73b81d5c44987671851745987ff08fc0ea4bbdb81047aa6a2da8cd07315b49f3699d35669546495f3b2cbdf79a7b28003cbc206121ffe29e92a8e6bb992602b586aaaafdae64fdd8c183b4759a58d4a4672c6268e2d72e4964c2ddc1432fe12f4895bba04e36806487e61edfb8a97d4a10edb253622fbf973ffe86917a181dac9d73a863e711cb98482b50f5996f758e4e308e9abc990d31bbe47eeef37814b2f22a062b0f0c1c05aeae7b93f271db21af9dae3877a1a7e1d45711d825fb8b913e7ecb3177b816ba701fdc8235f978fadd1424269ecb45b0f8bcb7eddcf6cc42ce9f61c687f86986ddc343271968cd317a39f4e6361cc10afbec1ae21cd50fd750133c28a9830389c5da090ce8dc9a00bc85c80080914ee0a1713186aeb3e2280a56c2d32d198919a546d972c2ff1d74fe60e04f852d10066bb63c1381f71784907de66fe37c40411c350e24fb2551c5a2e1dafc0fd81e360b50fcc74b1134e1f5f2a938daa8edb0a0bbd142bbdf9a1f56bba8162385333754d645c0df20fde083ac91ea5583126b8bb39811863b34a32ef2a8f14298a91beecba28d28c377f35b1164544685deac936f413ea2ff9af459102b6fe4dc82c4c39d78a9f45782e8983cf6ac7d0ba7590ad84bc744b05d60132a25a4908a1097d992505f0740a4ba25ee446759e59bbb5dc5aa4bd4d962dc39c3d6070969ea98954eade9ce6347babfdb2cc4ef5d6b865a21571bc95ec7a124ff6637adf5d972b2c387434cc8a30ed1d06266a63b00adb108c49e4f15769057246978bbec8ba3b0ffedb31ec4b6b10eea7997ead82044869e9daf4a49ee622c69e727d42b4f50a8d45d7585145a6555aef25f041b5ba902009495d5f1fd9ab73aace59cecb0f8aff17cec0f0ea0a661718db36a6ca9318dc6a36ea364d7c7694a211c2517fb77a56662c6d3509d12685892be99dbf4213d89051b16800ba2d77d9ca35ef2fd69083ec50fd67509e495b36c47fc290acb486b402ffe38ccbee1304bf41c4651aceff14ebd27945d217b08d122843221370aad45e813a11284c80c8103839d066a2502e7ddcfba03661df67fedb6ec95052bd6110658d838d00d1feef8c5e35fe90b29f64b095e921f90b735ca773009802b09a98385cf1f36d7a982f278ef2e2ac5632f66f8724425e35da70c8eaea5702ceaf8c5bea8f2d6a387d54b41e951bd3c195ab704ce8b32061dc74fe038f16f7b52c7e77534a50aa6e96b2a18a0f3bc7e5f58c9c1453633923b30949d9fb76426464399c16339431a34f6ba1a2e051be4b8a9c9a510a4500eb780a0fd53b832b852f212c5a4202ed285d58793c140854a701826a0946d360b8c4027ad1ea5f098d18a8158cf3238c95eb3fdd9fdaf268a833c99e4aa874df4637b71930715a5d907fc60550b2500f9d9130e4573ad0d74c03cb728c1127652ccc04f620388e5b4faef09620a8191264ae54ce118d7b55d8411f512952215c1834cc85be92078aa6af80ddeca1365cc4935ea8169707b0923c2fc3d7994b374059724336b68fceae3077d31177461b80f03f5bc859dc016033b8b8acd8fc9c8366b9d809cf5c027fcf55df4d55c480e0b0f4708c56ff81d61665c419c67e6f68e6cc009298a7c01163a75e2651f1bfc738f49a38d9d0cd2143166116234e6b95fc64d2763ec27dd9dcff4a7cf6ca7c69444b5eacaa8184d5b831cb51d66ceb4eb5487ff48c63b214773bc73910dc21778a655423b902a4e6a06a3cbbaebbf9cd5c64c56498bf0e7e4bb6d037c2a23d3aac0158d1e6b935e5e6d093964d2326229cbed1423d0c3895aa397185c0af1084ef20aafcb8a14156456873e95e9cd473bea75d7ead48bdc22737206de55c21cb810d56867a3c5613e6665997565b895b2b145011569e3bf54e83e467badd9495e8c76e2684a9a3550b80215ea17cd84af86bd9606c865aafc7ea78444c4fca11edb9317eebed2775f320d4425d494e59775c517e262563f53d7da8b4c4e72d79da700b763d64ccfd2c4e6596950465b0de69c794b2fbe49ec06cbfa96cbec4bb18c4485f0adf6e1a54262dac6e87591ebca7c53e058a16251fc2fdc20b3213783893599b4be1a8951a622f1555e6c9cfaeddcd5fddcf41ddedbdddb5a25768386a5e6117d93620a91285e8e98d4584d9e44e1feaaee09f022fa67b7063a717fd840d6047d71babbbf99317776825a63a562ebc0e0a222d2e19aa06b1ffc8f49541f872b5a42eaa3acbac9d7232e55aab8864d4bdacc0624b80b668a14d2a3035846ee2a8d4879b3c15a21351e935cfd739db6d9a43b5fc49be476c54b0833fba957e0c56ed93032edba7f91494989f6bc1da10889e8099268c7e9288ff2f03b336635a2f6bb6519d887d14f4530975fda8f5f5960008bcd9179115cfb5750ebf9e95ac76f786496178c87f01367e836950c9f2a53df2984c13d0cff610faf6345cebf43da94547984ef988cc0a7ccbbf1bcce22969525218a53cd08bf5cec0b74effa801752e303f5ee68cbff6a2bb1c4b98592fdf0af5e9d2012f84a96dde7505fe3c55dfe655959c6f5b0a050bba29e43e3442a76dd22640b2a0d88aa4b3873039b029fc1e1148411d2137878bf8604", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0x3c311d57d4daf52904616cf69648081e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x3c311d57d4daf52904616cf69648081e5e0621c4869aa60c02be9adcc98a0d1d": "0x1002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d6318141ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9657ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0x3f1467a096bcd71a5b6a0c8155e208104e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x45323df7cc47150b3930e2666b0aa3134e7b9012096b41c4eb3aaf947f6ea429": "0x0200", + "0x57f8dc2f5ab09467896f47300f0424384e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x57f8dc2f5ab09467896f47300f0424385e0621c4869aa60c02be9adcc98a0d1d": "0x1002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d6318141ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9657ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0x5e8a19e3cd1b7c148b33880c479c02814e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x682a59d51ab9e48a8c8cc418ff9708d24e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0x7474449cca95dc5d0c00e71735a6d17d4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0x79e2fe5d327165001f8232643023ed8b4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x7b3237373ffdfeb1cab4222e3b520d6b4e7b9012096b41c4eb3aaf947f6ea429": "0x0200", + "0x7c215a8bceb5e4dcd92f78e36a5fd0ff4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x000064a7b3b6e00d0000000000000000", + "0xcd5c1f6df63bc97f4a8ce37f14a50ca74e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb363fc5191ef4dec4f7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66": "0x7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3c21badc8f9053b1f1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965": "0x1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3ca2c608870c60a0ccaa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814": "0xcaa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3d823137badc90c9202d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534": "0x02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534", + "0xcec5070d609dd3497f72bde07fc96ba04e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950039f7057bdcc4bc16175726180caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814": "0xcaa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19503b5fa0e23c4a4cae617572618002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534": "0x02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19508d1b0ce8b7d45af961757261807ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66": "0x7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950e6240f6cb493659d61757261801ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965": "0x1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965", + "0xcec5070d609dd3497f72bde07fc96ba088dcde934c658227ee1dfafcd6e16903": "0x1002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d6318141ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9657ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0xcec5070d609dd3497f72bde07fc96ba0e0cdd062e6eaf24295ad4ccfc41d4609": "0x1002d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce53402d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d6318141ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9651ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f9657ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b667ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66", + "0xd57bce545fb382c34570e5dfbf338f5e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xd5e1a2fa16732ce6906189438c0a82c64e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xe38f185207498abb5c213d0fb059b3d84e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xe38f185207498abb5c213d0fb059b3d86323ae84c43568be0d1394d5d0d522c4": "0x03000000", + "0xe8d49389c2e23e152fdd6364daadd2cc4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xf0c365c3cf59d671eb72da0e7a4113c44e7b9012096b41c4eb3aaf947f6ea429": "0x0000" + }, + "childrenDefault": {} + } + } +} \ No newline at end of file diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 5e45e1528a105412ca3862e13a883b30661a62d4..9daa60fa2635950c5e0dafd40e398c3bf4edffcd 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -5,12 +5,8 @@ "bootNodes": [ "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/30334/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/30334/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/30334/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/30334/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow", "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/443/wss/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/443/wss/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow", "/dns/boot.stake.plus/tcp/41333/p2p/12D3KooWBzbs2jsXjG5dipktGPKaUm9XWvkmeJFsEAGkVt946Aa7", "/dns/boot.stake.plus/tcp/41334/wss/p2p/12D3KooWBzbs2jsXjG5dipktGPKaUm9XWvkmeJFsEAGkVt946Aa7", "/dns/boot.metaspan.io/tcp/26032/p2p/12D3KooWKfuSaZrLNz43PDgM4inMALXRHTSh2WBuqQtZRq8zmT1Z", @@ -24,7 +20,9 @@ "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30337/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", - "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5" + "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index fd56b61115d5f16a0c2798f73181a42f07774b9c..d3e884284b4a9351f5ce7df5ba8988935cc885ce 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -5,12 +5,8 @@ "bootNodes": [ "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/30334/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/30334/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/30334/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/30334/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J", "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/443/wss/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/30339/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", "/dns/boot-cr.gatotech.network/tcp/33130/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", @@ -20,7 +16,9 @@ "/dns/boot.metaspan.io/tcp/16032/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", "/dns/boot.metaspan.io/tcp/16036/wss/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", "/dns/boot-node.helikon.io/tcp/8220/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", - "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp" + "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi" ], "telemetryEndpoints": null, "protocolId": null, @@ -87,4 +85,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/bridge-hub-rococo.json b/cumulus/parachains/chain-specs/bridge-hub-rococo.json index ff20d8fb4825ef964ff36439100d56715ca8c07b..6b430678a86c88d35894819d6433fcf605270d79 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-rococo.json +++ b/cumulus/parachains/chain-specs/bridge-hub-rococo.json @@ -4,9 +4,7 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", - "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ", - "/dns/rococo-bridge-hub-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPZLWbbDJzEXAHPuAcVssPrjQLyZK4nvvmV2ez6gy2FQ3", - "/dns/rococo-bridge-hub-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKWMENpCNH7wBVQoHLwQoWUs6acAEmfdV694v9jCuJwYc" + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index fbed89729c79b70e5631753feec73f71cebe401d..dde3d437f416fadac1b2769bddb8385e1759443e 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -5,8 +5,6 @@ "bootNodes": [ "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", - "/dns/westend-bridge-hub-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPXqdRRthjKAMPFtaXUK7yBxsvh83QsmzXzALA3inoJfo", - "/dns/westend-bridge-hub-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWAp2YpVaiNBy7rozEHJGocDpaLFt3VFZsGMBEYh4BoEz7", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", @@ -16,7 +14,9 @@ "/dns/boot.metaspan.io/tcp/36032/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", "/dns/boot.metaspan.io/tcp/36036/wss/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", "/dns/boot-node.helikon.io/tcp/9220/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", - "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp" + "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke" ], "telemetryEndpoints": null, "protocolId": null, @@ -82,4 +82,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index e17958f1f68324ce33b16491e037e7035baf787a..003c6373429036eac3642d7eaeacce40d1af0f84 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -5,12 +5,8 @@ "bootNodes": [ "/dns/polkadot-collectives-connect-ew6-0.polkadot.io/tcp/30334/p2p/12D3KooWLDZT5gAjMtC8fojiCwiz17SC61oeX2C7GWBCqqf9TwVD", "/dns/polkadot-collectives-connect-ew6-1.polkadot.io/tcp/30334/p2p/12D3KooWC9BwKMDyRUTXsE7teSmoKMgbyxqAp3zi2MTGRJR5nhCL", - "/dns/polkadot-collectives-connect-uw1-0.polkadot.io/tcp/30334/p2p/12D3KooWPrJ9VTn3GEs2e7GQs4zoEFiTFcjXFNbQ2iDxFDQAbstQ", - "/dns/polkadot-collectives-connect-uw1-1.polkadot.io/tcp/30334/p2p/12D3KooWGFYW6hJYn3pkpJawyMk6souXh7sznK8yvPmVQ7ktfHbV", "/dns/polkadot-collectives-connect-ew6-0.polkadot.io/tcp/443/wss/p2p/12D3KooWLDZT5gAjMtC8fojiCwiz17SC61oeX2C7GWBCqqf9TwVD", "/dns/polkadot-collectives-connect-ew6-1.polkadot.io/tcp/443/wss/p2p/12D3KooWC9BwKMDyRUTXsE7teSmoKMgbyxqAp3zi2MTGRJR5nhCL", - "/dns/polkadot-collectives-connect-uw1-0.polkadot.io/tcp/443/wss/p2p/12D3KooWPrJ9VTn3GEs2e7GQs4zoEFiTFcjXFNbQ2iDxFDQAbstQ", - "/dns/polkadot-collectives-connect-uw1-1.polkadot.io/tcp/443/wss/p2p/12D3KooWGFYW6hJYn3pkpJawyMk6souXh7sznK8yvPmVQ7ktfHbV", "/dns/boot.stake.plus/tcp/37333/p2p/12D3KooWRgFfEtwPo3xorKGYALRHRteKNgF37iN9q8xTLPYc34LA", "/dns/boot.stake.plus/tcp/37334/wss/p2p/12D3KooWRgFfEtwPo3xorKGYALRHRteKNgF37iN9q8xTLPYc34LA", "/dns/boot.metaspan.io/tcp/16072/p2p/12D3KooWJWTTu2t2yg5bFRH6tjEpfzKwZir5R9JRRjQpgFPXdDfp", @@ -24,7 +20,9 @@ "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30335/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", - "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc" + "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index a40512997daa546f7abc76e45cf8d6a3f09e735e..e06671faa04e2b80a9c8e49badffc7d38c670a6d 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -5,12 +5,8 @@ "bootNodes": [ "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", - "/dns/westend-collectives-collator-node-2.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWP4pJg6pZUpxETd8Rs6GmS9FeRCeNtrBerqZhUyEPCiPp", - "/dns/westend-collectives-collator-node-3.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBbrBYhXxFXhdio3AiuaqMG26pn91SUnd12gJiVn2Wh8Q", "/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", - "/dns/westend-collectives-collator-2.polkadot.io/tcp/443/wss/p2p/12D3KooWP4pJg6pZUpxETd8Rs6GmS9FeRCeNtrBerqZhUyEPCiPp", - "/dns/westend-collectives-collator-3.polkadot.io/tcp/443/wss/p2p/12D3KooWBbrBYhXxFXhdio3AiuaqMG26pn91SUnd12gJiVn2Wh8Q", "/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", "/dns/boot.stake.plus/tcp/38334/wss/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", "/dns/boot.metaspan.io/tcp/36072/p2p/12D3KooWEf2QXWq5pAbFJLfbnexA7KYtRRDSPkqTP64n1KtdsdV2", @@ -107,4 +103,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/contracts-rococo.json b/cumulus/parachains/chain-specs/contracts-rococo.json index 09108e9c0995aa2abd52277eca6572c62fb2ec79..422268a5efdb2f8efc96da27dde92a92b11ac9ab 100644 --- a/cumulus/parachains/chain-specs/contracts-rococo.json +++ b/cumulus/parachains/chain-specs/contracts-rococo.json @@ -5,12 +5,8 @@ "bootNodes": [ "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", - "/dns/rococo-contracts-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWEVU8AFNary4nP4qEnEcwJaRuy59Wefekzdu9pKbnVEhk", - "/dns/rococo-contracts-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP6pV3ZmcXzGDjv8ZMgA6nZxfAKDxSz4VNiLx6vVCQgJX", "/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", - "/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", - "/dns/rococo-contracts-collator-node-2.polkadot.io/tcp/443/wss/p2p/12D3KooWEVU8AFNary4nP4qEnEcwJaRuy59Wefekzdu9pKbnVEhk", - "/dns/rococo-contracts-collator-node-3.polkadot.io/tcp/443/wss/p2p/12D3KooWP6pV3ZmcXzGDjv8ZMgA6nZxfAKDxSz4VNiLx6vVCQgJX" + "/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh" ], "telemetryEndpoints": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index cb389b4b607b807a24c4f80bc2f6a37e0ed19a7b..4aad4dec236270cad841a54e45f2265c67a2d24f 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" +license = "Apache-2.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -11,7 +12,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { version = "0.4.19", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } num-traits = { version = "0.2", default-features = false} smallvec = "1.11.0" @@ -22,6 +23,7 @@ pallet-asset-tx-payment = { path = "../../../substrate/frame/transaction-payment pallet-assets = { path = "../../../substrate/frame/assets", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -35,12 +37,12 @@ polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} # Cumulus pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } [dev-dependencies] pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} @@ -52,19 +54,24 @@ substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] default = [ "std" ] std = [ + "codec/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", "frame-support/std", "frame-system/std", "log/std", + "num-traits/std", "pallet-asset-tx-payment/std", "pallet-assets/std", "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", + "parachain-info/std", "polkadot-core-primitives/std", "polkadot-primitives/std", "rococo-runtime-constants/std", + "scale-info/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", @@ -72,11 +79,11 @@ std = [ "sp-std/std", "westend-runtime-constants/std", "xcm-builder/std", - "xcm-executor/std", "xcm/std", ] runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", @@ -84,8 +91,8 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", ] diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 107cd5c687328bce1369c59cfa7d4fcbcdd377f3..81d78baba5486933ad4667648cfdae95275a3f73 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -192,6 +192,7 @@ mod tests { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/cumulus/parachains/common/src/lib.rs b/cumulus/parachains/common/src/lib.rs index cb2ac1a1e3e45d7af1cba07c8dddbba1837cf199..68425a00b35890aaa0ca4fb5457fbf356cf81b23 100644 --- a/cumulus/parachains/common/src/lib.rs +++ b/cumulus/parachains/common/src/lib.rs @@ -17,9 +17,11 @@ pub mod impls; pub mod kusama; +pub mod message_queue; pub mod polkadot; pub mod rococo; pub mod westend; +pub mod wococo; pub mod xcm_config; pub use constants::*; pub use opaque::*; @@ -72,7 +74,10 @@ mod types { /// Common constants of parachains. mod constants { use super::types::BlockNumber; - use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; + use frame_support::{ + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, + PalletId, + }; use sp_runtime::Perbill; /// This determines the average expected block time that we are targeting. Blocks will be /// produced at a minimum duration defined by `SLOT_DURATION`. `SLOT_DURATION` is picked up by @@ -100,6 +105,9 @@ mod constants { WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), polkadot_primitives::MAX_POV_SIZE as u64, ); + + /// Treasury pallet id of the local chain, used to convert into AccountId + pub const TREASURY_PALLET_ID: PalletId = PalletId(*b"py/trsry"); } /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know diff --git a/cumulus/parachains/common/src/message_queue.rs b/cumulus/parachains/common/src/message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c9f4b840c9166ba9380f06970ad15bb1cac14c6 --- /dev/null +++ b/cumulus/parachains/common/src/message_queue.rs @@ -0,0 +1,55 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Helpers to deal with configuring the message queue in the runtime. + +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::traits::{QueueFootprint, QueuePausedQuery}; +use pallet_message_queue::OnQueueChanged; +use sp_std::marker::PhantomData; + +/// Narrow the scope of the `Inner` query from `AggregateMessageOrigin` to `ParaId`. +/// +/// All non-`Sibling` variants will be ignored. +pub struct NarrowOriginToSibling(PhantomData); +impl> QueuePausedQuery + for NarrowOriginToSibling +{ + fn is_paused(origin: &AggregateMessageOrigin) -> bool { + match origin { + AggregateMessageOrigin::Sibling(id) => Inner::is_paused(id), + _ => false, + } + } +} + +impl> OnQueueChanged + for NarrowOriginToSibling +{ + fn on_queue_changed(origin: AggregateMessageOrigin, fp: QueueFootprint) { + if let AggregateMessageOrigin::Sibling(id) = origin { + Inner::on_queue_changed(id, fp) + } + } +} + +/// Convert a sibling `ParaId` to an `AggregateMessageOrigin`. +pub struct ParaIdToSibling; +impl sp_runtime::traits::Convert for ParaIdToSibling { + fn convert(para_id: ParaId) -> AggregateMessageOrigin { + AggregateMessageOrigin::Sibling(para_id) + } +} diff --git a/cumulus/parachains/common/src/wococo.rs b/cumulus/parachains/common/src/wococo.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cd6121135a3321369fee2132b696aef8a2bae81 --- /dev/null +++ b/cumulus/parachains/common/src/wococo.rs @@ -0,0 +1,17 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// re-export rococo +pub use crate::rococo::{consensus, currency, fee}; diff --git a/cumulus/parachains/common/src/xcm_config.rs b/cumulus/parachains/common/src/xcm_config.rs index 146671441453df76c48776e0b202166aca37361b..4b0215d672b2e3f8b5602976524edfd01940725b 100644 --- a/cumulus/parachains/common/src/xcm_config.rs +++ b/cumulus/parachains/common/src/xcm_config.rs @@ -14,13 +14,13 @@ // limitations under the License. use crate::impls::AccountIdOf; -use core::marker::PhantomData; +use cumulus_primitives_core::{IsSystem, ParaId}; use frame_support::{ - traits::{fungibles::Inspect, tokens::ConversionToAssetBalance, ContainsPair}, + traits::{fungibles::Inspect, tokens::ConversionToAssetBalance, Contains, ContainsPair}, weights::Weight, }; -use log; use sp_runtime::traits::Get; +use sp_std::marker::PhantomData; use xcm::latest::prelude::*; /// A `ChargeFeeInFungibles` implementation that converts the output of @@ -78,3 +78,106 @@ impl> ContainsPair matches!(asset.id, Concrete(ref id) if id == origin && origin == &Location::get()) } } + +pub struct RelayOrOtherSystemParachains< + SystemParachainMatcher: Contains, + Runtime: parachain_info::Config, +> { + _runtime: PhantomData<(SystemParachainMatcher, Runtime)>, +} +impl, Runtime: parachain_info::Config> + Contains for RelayOrOtherSystemParachains +{ + fn contains(l: &MultiLocation) -> bool { + let self_para_id: u32 = parachain_info::Pallet::::get().into(); + if let MultiLocation { parents: 0, interior: X1(Parachain(para_id)) } = l { + if *para_id == self_para_id { + return false + } + } + matches!(l, MultiLocation { parents: 1, interior: Here }) || + SystemParachainMatcher::contains(l) + } +} + +/// Accepts an asset if it is a concrete asset from the system (Relay Chain or system parachain). +pub struct ConcreteAssetFromSystem(PhantomData); +impl> ContainsPair + for ConcreteAssetFromSystem +{ + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + log::trace!(target: "xcm::contains", "ConcreteAssetFromSystem asset: {:?}, origin: {:?}", asset, origin); + let is_system = match origin { + // The Relay Chain + MultiLocation { parents: 1, interior: Here } => true, + // System parachain + MultiLocation { parents: 1, interior: X1(Parachain(id)) } => + ParaId::from(*id).is_system(), + // Others + _ => false, + }; + matches!(asset.id, Concrete(id) if id == AssetLocation::get()) && is_system + } +} + +#[cfg(test)] +mod tests { + use frame_support::parameter_types; + + use super::{ + ConcreteAssetFromSystem, ContainsPair, GeneralIndex, Here, MultiAsset, MultiLocation, + PalletInstance, Parachain, Parent, + }; + + parameter_types! { + pub const RelayLocation: MultiLocation = MultiLocation::parent(); + } + + #[test] + fn concrete_asset_from_relay_works() { + let expected_asset: MultiAsset = (Parent, 1000000).into(); + let expected_origin: MultiLocation = (Parent, Here).into(); + + assert!(>::contains( + &expected_asset, + &expected_origin + )); + } + + #[test] + fn concrete_asset_from_sibling_system_para_fails_for_wrong_asset() { + let unexpected_assets: Vec = vec![ + (Here, 1000000).into(), + ((PalletInstance(50), GeneralIndex(1)), 1000000).into(), + ((Parent, Parachain(1000), PalletInstance(50), GeneralIndex(1)), 1000000).into(), + ]; + let expected_origin: MultiLocation = (Parent, Parachain(1000)).into(); + + unexpected_assets.iter().for_each(|asset| { + assert!(!>::contains(asset, &expected_origin)); + }); + } + + #[test] + fn concrete_asset_from_sibling_system_para_works_for_correct_asset() { + // (para_id, expected_result) + let test_data = vec![ + (0, true), + (1, true), + (1000, true), + (1999, true), + (2000, false), // Not a System Parachain + (2001, false), // Not a System Parachain + ]; + + let expected_asset: MultiAsset = (Parent, 1000000).into(); + + for (para_id, expected_result) in test_data { + let origin: MultiLocation = (Parent, Parachain(para_id)).into(); + assert_eq!( + expected_result, + >::contains(&expected_asset, &origin) + ); + } + } +} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml deleted file mode 100644 index bf141dafebf2cde02f3eeda6b71841b02ae62282..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml +++ /dev/null @@ -1,64 +0,0 @@ -[package] -name = "asset-hub-westend-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Westend runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -assert_matches = "1.5.0" - -# Substrate -sp-runtime = { path = "../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} -pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-treasury = { path = "../../../../../../substrate/frame/treasury", default-features = false} -pallet-asset-rate = { path = "../../../../../../substrate/frame/asset-rate", default-features = false} - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../../polkadot/runtime/common" } -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} - -# Cumulus -parachains-common = { path = "../../../../common" } -asset-hub-westend-runtime = { path = "../../../../runtimes/assets/asset-hub-westend" } -cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../pallets/dmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../pallets/parachain-system" } - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} - -[features] -runtime-benchmarks = [ - "asset-hub-westend-runtime/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "integration-tests-common/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-asset-rate/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs deleted file mode 100644 index 51fac43be1255948a9ae72044bececa3d907f2c5..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::*; - -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(629_384_000, 6_196))); - - assert_expected_events!( - Westend, - vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { - from: *from == t.sender.account_id, - to: *to == Westend::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) { - AssetHubWestend::assert_dmp_queue_incomplete( - Some(Weight::from_parts(1_000_000_000, 0)), - Some(Error::UntrustedReserveLocation), - ); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubWestend::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 676_119_000, - 6196, - ))); - - assert_expected_events!( - AssetHubWestend, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account - RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from, to, amount } - ) => { - from: *from == t.sender.account_id, - to: *to == AssetHubWestend::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 676_119_000, - 6196, - ))); - - assert_expected_events!( - AssetHubWestend, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account - RuntimeEvent::Assets( - pallet_assets::Event::Transferred { asset_id, from, to, amount } - ) => { - asset_id: *asset_id == ASSET_ID, - from: *from == t.sender.account_id, - to: *to == AssetHubWestend::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work -#[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubWestend::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubWestendSender::get(), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} - -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubWestend::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubWestendSender::get(), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); -} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml deleted file mode 100644 index 7ecf87158241d443c9592e9da85a82dd6eb26729..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "bridge-hub-rococo-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } - -# Substrate -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} - -# Cumulus -parachains-common = { path = "../../../../common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} -pallet-bridge-messages = { path = "../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../bridges/primitives/messages", default-features = false} - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs deleted file mode 100644 index 1c73124c512566702c62cadff5fe2c9e7bb75592..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use bp_messages::LaneId; -pub use frame_support::assert_ok; -pub use integration_tests_common::{ - constants::{ - asset_hub_rococo::ED as ASSET_HUB_ROCOCO_ED, rococo::ED as ROCOCO_ED, PROOF_SIZE_THRESHOLD, - REF_TIME_THRESHOLD, XCM_V3, - }, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubRococo, AssetHubRococoReceiver, AssetHubWococo, BridgeHubRococo, BridgeHubWococo, - PenpalRococoA, Rococo, RococoPallet, -}; -pub use parachains_common::{AccountId, Balance}; -pub use xcm::{ - prelude::{AccountId32 as AccountId32Junction, *}, - v3::{ - Error, - NetworkId::{Rococo as RococoId, Wococo as WococoId}, - }, -}; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, -}; - -pub const ASSET_ID: u32 = 1; -pub const ASSET_MIN_BALANCE: u128 = 1000; -pub const ASSETS_PALLET_ID: u8 = 50; - -pub type RelayToSystemParaTest = Test; -pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; - -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { - TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), - amount, - assets: (Here, amount).into(), - asset_id: None, - fee_asset_item: 0, - weight_limit: WeightLimit::Unlimited, - } -} - -#[cfg(test)] -#[cfg(not(feature = "runtime-benchmarks"))] -mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..dbf7e9c9a700556f4e5b1da69c7373a0b84f9e9e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "asset-hub-rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } +rococo-emulated-chain = { path = "../../../relays/rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..c19d05ec73075b1a2fb8e483bd7ec783c93f0b9d --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::storage::Storage; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1000; +pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { + system: asset_hub_rococo_runtime::SystemConfig::default(), + balances: asset_hub_rococo_runtime::BalancesConfig { + balances: accounts::init_balances() + .iter() + .cloned() + .map(|k| (k, ED * 4096 * 4096)) + .collect(), + }, + parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: asset_hub_rococo_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: asset_hub_rococo_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + asset_hub_rococo_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: asset_hub_rococo_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ed22b3cc4f5c543bf9e0d989e3048f206403daa --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -0,0 +1,57 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, +}; +use rococo_emulated_chain::Rococo; + +// AssetHubRococo Parachain declaration +decl_test_parachains! { + pub struct AssetHubRococo { + genesis = genesis::genesis(), + on_init = { + asset_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = asset_hub_rococo_runtime, + core = { + XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, + Assets: asset_hub_rococo_runtime::Assets, + ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, + PoolAssets: asset_hub_rococo_runtime::PoolAssets, + AssetConversion: asset_hub_rococo_runtime::AssetConversion, + Balances: asset_hub_rococo_runtime::Balances, + } + }, +} + +// AssetHubRococo implementation +impl_accounts_helpers_for_parachain!(AssetHubRococo); +impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); +impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0ff817b6b96109e7f57508bbac16d672c76886e0 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "asset-hub-westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } +westend-emulated-chain = { path = "../../../relays/westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..78d6478fdf1ea0890abe2f081726a130762e257c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::storage::Storage; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1000; +pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { + system: asset_hub_westend_runtime::SystemConfig::default(), + balances: asset_hub_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: asset_hub_westend_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + asset_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + asset_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4dcdb613ac2059aa2a97a44a96e68342f96b9948 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -0,0 +1,57 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, +}; +use westend_emulated_chain::Westend; + +// AssetHubWestend Parachain declaration +decl_test_parachains! { + pub struct AssetHubWestend { + genesis = genesis::genesis(), + on_init = { + asset_hub_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = asset_hub_westend_runtime, + core = { + XcmpMessageHandler: asset_hub_westend_runtime::XcmpQueue, + LocationToAccountId: asset_hub_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: asset_hub_westend_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: asset_hub_westend_runtime::PolkadotXcm, + Balances: asset_hub_westend_runtime::Balances, + Assets: asset_hub_westend_runtime::Assets, + ForeignAssets: asset_hub_westend_runtime::ForeignAssets, + PoolAssets: asset_hub_westend_runtime::PoolAssets, + AssetConversion: asset_hub_westend_runtime::AssetConversion, + } + }, +} + +// AssetHubWestend implementation +impl_accounts_helpers_for_parachain!(AssetHubWestend); +impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); +impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..43c0f5fd14c9b2ba8d0b493e0e84ce93cb6113ca --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "bridge-hub-rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa9a287adf88b56eff1746a06471343504418024 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1013; +pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { + system: bridge_hub_rococo_runtime::SystemConfig::default(), + balances: bridge_hub_rococo_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: bridge_hub_rococo_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: bridge_hub_rococo_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: bridge_hub_rococo_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_westend_grandpa: bridge_hub_rococo_runtime::BridgeWestendGrandpaConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_westend_messages: bridge_hub_rococo_runtime::BridgeWestendMessagesConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + bridge_hub_rococo_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ea0c9513abc392bc0558b0b8d04e28f8291f742e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impls::Parachain, xcm_emulator::decl_test_parachains, +}; + +// BridgeHubRococo Parachain declaration +decl_test_parachains! { + pub struct BridgeHubRococo { + genesis = genesis::genesis(), + on_init = { + bridge_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_rococo_runtime, + core = { + XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, + Balances: bridge_hub_rococo_runtime::Balances, + } + }, +} + +// BridgeHubRococo implementation +impl_accounts_helpers_for_parachain!(BridgeHubRococo); +impl_assert_events_helpers_for_parachain!(BridgeHubRococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e5e6fd7073933bca21683faef3459afdf1ac8e8b --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "bridge-hub-westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..2eb7e0ddbd29a92f21169cb1b70fb1c803ea2623 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1002; +pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = bridge_hub_westend_runtime::RuntimeGenesisConfig { + system: bridge_hub_westend_runtime::SystemConfig::default(), + balances: bridge_hub_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: bridge_hub_westend_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: bridge_hub_westend_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: bridge_hub_westend_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: bridge_hub_westend_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_rococo_grandpa: bridge_hub_westend_runtime::BridgeRococoGrandpaConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_rococo_messages: bridge_hub_westend_runtime::BridgeRococoMessagesConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + bridge_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4a130ac1f27d92ade29d109c7ce6f784b8be32ed --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impls::Parachain, xcm_emulator::decl_test_parachains, +}; + +// BridgeHubWestend Parachain declaration +decl_test_parachains! { + pub struct BridgeHubWestend { + genesis = genesis::genesis(), + on_init = { + bridge_hub_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_westend_runtime, + core = { + XcmpMessageHandler: bridge_hub_westend_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_westend_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm, + Balances: bridge_hub_westend_runtime::Balances, + } + }, +} + +// BridgeHubWestend implementation +impl_accounts_helpers_for_parachain!(BridgeHubWestend); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c55b10d7180c83f55f1f0f9cdf004fab91abedfd --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "penpal-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Penpal emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +rococo-emulated-chain = { path = "../../../relays/rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ab32a977d7113a1f01fc287d591f2cc2c550266 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +// Penpal +pub const PARA_ID_A: u32 = 2000; +pub const PARA_ID_B: u32 = 2001; +pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; + +pub fn genesis(para_id: u32) -> Storage { + let genesis_config = penpal_runtime::RuntimeGenesisConfig { + system: penpal_runtime::SystemConfig::default(), + balances: penpal_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: penpal_runtime::ParachainInfoConfig { + parachain_id: para_id.into(), + ..Default::default() + }, + collator_selection: penpal_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: penpal_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + penpal_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: penpal_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + sudo: penpal_runtime::SudoConfig { + key: Some(get_account_id_from_seed::("Alice")), + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..f9a422bfcba7fbbcd3211f370e13565c06cfd4f7 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod genesis; +pub use genesis::{genesis, ED, PARA_ID_A, PARA_ID_B}; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, +}; +use rococo_emulated_chain::Rococo; + +// Penpal Parachain declaration +decl_test_parachains! { + pub struct PenpalA { + genesis = genesis(PARA_ID_A), + on_init = { + penpal_runtime::AuraExt::on_initialize(1); + }, + runtime = penpal_runtime, + core = { + XcmpMessageHandler: penpal_runtime::XcmpQueue, + LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, + ParachainInfo: penpal_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: penpal_runtime::PolkadotXcm, + Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, + } + }, + pub struct PenpalB { + genesis = genesis(PARA_ID_B), + on_init = { + penpal_runtime::AuraExt::on_initialize(1); + }, + runtime = penpal_runtime, + core = { + XcmpMessageHandler: penpal_runtime::XcmpQueue, + LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, + ParachainInfo: penpal_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: penpal_runtime::PolkadotXcm, + Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, + } + }, +} + +// Penpal implementation +impl_accounts_helpers_for_parachain!(PenpalA); +impl_assets_helpers_for_parachain!(PenpalA, Rococo); +impl_assert_events_helpers_for_parachain!(PenpalA, true); +impl_assert_events_helpers_for_parachain!(PenpalB, true); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..325c722951739a917815835fabdcdb3f2df6c4b5 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } +beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } +grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } + +# Polkadot +polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } +rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } +rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f5f3923ead9d251fd3f6d0c9903fe29cbc2d7f6 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{sr25519, storage::Storage}; + +// Polkadot +use polkadot_primitives::{AssignmentId, ValidatorId}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, get_account_id_from_seed, get_from_seed, get_host_config, + validators, +}; +use parachains_common::Balance; +use rococo_runtime_constants::currency::UNITS as ROC; + +pub const ED: Balance = rococo_runtime_constants::currency::EXISTENTIAL_DEPOSIT; +const ENDOWMENT: u128 = 1_000_000 * ROC; + +fn session_keys( + babe: BabeId, + grandpa: GrandpaId, + im_online: ImOnlineId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> rococo_runtime::SessionKeys { + rococo_runtime::SessionKeys { + babe, + grandpa, + im_online, + para_validator, + para_assignment, + authority_discovery, + beefy, + } +} + +pub fn genesis() -> Storage { + let genesis_config = rococo_runtime::RuntimeGenesisConfig { + system: rococo_runtime::SystemConfig::default(), + balances: rococo_runtime::BalancesConfig { + balances: accounts::init_balances().iter().map(|k| (k.clone(), ENDOWMENT)).collect(), + }, + session: rococo_runtime::SessionConfig { + keys: validators::initial_authorities() + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + get_from_seed::("Alice"), + ), + ) + }) + .collect::>(), + }, + babe: rococo_runtime::BabeConfig { + authorities: Default::default(), + epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() + }, + sudo: rococo_runtime::SudoConfig { + key: Some(get_account_id_from_seed::("Alice")), + }, + configuration: rococo_runtime::ConfigurationConfig { config: get_host_config() }, + registrar: rococo_runtime::RegistrarConfig { + next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage(&genesis_config, rococo_runtime::WASM_BINARY.unwrap()) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ace96147106c34af4e8a9ebfec23f59b8949928 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, + impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, + xcm_emulator::decl_test_relay_chains, +}; + +// Rococo declaration +decl_test_relay_chains! { + #[api_version(9)] + pub struct Rococo { + genesis = genesis::genesis(), + on_init = (), + runtime = rococo_runtime, + core = { + SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, + }, + pallets = { + XcmPallet: rococo_runtime::XcmPallet, + Sudo: rococo_runtime::Sudo, + Balances: rococo_runtime::Balances, + Hrmp: rococo_runtime::Hrmp, + } + }, +} + +// Rococo implementation +impl_accounts_helpers_for_relay_chain!(Rococo); +impl_assert_events_helpers_for_relay_chain!(Rococo); +impl_hrmp_channels_helpers_for_relay_chain!(Rococo); +impl_send_transact_helpers_for_relay_chain!(Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..20b9737735fd4713196f930e2f44a8a2526e32f9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } +beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } +grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } +pallet-staking = { path = "../../../../../../../substrate/frame/staking", default-features = false } + +# Polkadot +polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..e87b85881d3cbfcbe61e01c9529c1fb1d48e559f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -0,0 +1,109 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::storage::Storage; +use sp_runtime::Perbill; + +// Polkadot +use polkadot_primitives::{AssignmentId, ValidatorId}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, get_from_seed, get_host_config, validators, +}; +use parachains_common::Balance; +use westend_runtime_constants::currency::UNITS as WND; + +pub const ED: Balance = westend_runtime_constants::currency::EXISTENTIAL_DEPOSIT; +const ENDOWMENT: u128 = 1_000_000 * WND; +const STASH: u128 = 100 * WND; + +fn session_keys( + babe: BabeId, + grandpa: GrandpaId, + im_online: ImOnlineId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> westend_runtime::SessionKeys { + westend_runtime::SessionKeys { + babe, + grandpa, + im_online, + para_validator, + para_assignment, + authority_discovery, + beefy, + } +} + +pub fn genesis() -> Storage { + let genesis_config = westend_runtime::RuntimeGenesisConfig { + system: westend_runtime::SystemConfig::default(), + balances: westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ENDOWMENT)).collect(), + }, + session: westend_runtime::SessionConfig { + keys: validators::initial_authorities() + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + get_from_seed::("Alice"), + ), + ) + }) + .collect::>(), + }, + staking: westend_runtime::StakingConfig { + validator_count: validators::initial_authorities().len() as u32, + minimum_validator_count: 1, + stakers: validators::initial_authorities() + .iter() + .map(|x| { + (x.0.clone(), x.1.clone(), STASH, westend_runtime::StakerStatus::Validator) + }) + .collect(), + invulnerables: validators::initial_authorities().iter().map(|x| x.0.clone()).collect(), + force_era: pallet_staking::Forcing::ForceNone, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + }, + babe: westend_runtime::BabeConfig { + authorities: Default::default(), + epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() + }, + configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, + ..Default::default() + }; + + build_genesis_storage(&genesis_config, westend_runtime::WASM_BINARY.unwrap()) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ba47250d564e1c487829d4afa034b1bf0770f67 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, + impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, + xcm_emulator::decl_test_relay_chains, +}; + +// Westend declaration +decl_test_relay_chains! { + #[api_version(9)] + pub struct Westend { + genesis = genesis::genesis(), + on_init = (), + runtime = westend_runtime, + core = { + SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, + }, + pallets = { + XcmPallet: westend_runtime::XcmPallet, + Sudo: westend_runtime::Sudo, + Balances: westend_runtime::Balances, + Treasury: westend_runtime::Treasury, + AssetRate: westend_runtime::AssetRate, + Hrmp: westend_runtime::Hrmp, + } + }, +} + +// Westend implementation +impl_accounts_helpers_for_relay_chain!(Westend); +impl_assert_events_helpers_for_relay_chain!(Westend); +impl_hrmp_channels_helpers_for_relay_chain!(Westend); +impl_send_transact_helpers_for_relay_chain!(Westend); diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index e47fdfcdfd6c678629f47f52c652190cc04a83ac..08bb284cded2c7114b4348cb996d621c1c18c1d6 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -1,15 +1,15 @@ [package] -name = "integration-tests-common" +name = "emulated-integration-tests-common" version = "1.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" -publish = false [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } paste = "1.0.14" +serde_json = "1.0.108" # Substrate grandpa = { package = "sc-consensus-grandpa", path = "../../../../../substrate/client/consensus/grandpa" } @@ -19,69 +19,27 @@ frame-support = { path = "../../../../../substrate/frame/support", default-featu sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false} pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-staking = { path = "../../../../../substrate/frame/staking", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false} beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../substrate/primitives/consensus/beefy" } # Polkadot -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} polkadot-service = { path = "../../../../../polkadot/node/service", default-features = false, features = ["full-node"] } polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -rococo-runtime = { path = "../../../../../polkadot/runtime/rococo" } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants" } -westend-runtime = { path = "../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants" } xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} # Cumulus parachains-common = { path = "../../../common" } cumulus-primitives-core = { path = "../../../../primitives/core" } -penpal-runtime = { path = "../../../runtimes/testing/penpal" } -asset-hub-polkadot-runtime = { path = "../../../runtimes/assets/asset-hub-polkadot" } -asset-hub-kusama-runtime = { path = "../../../runtimes/assets/asset-hub-kusama" } -asset-hub-westend-runtime = { path = "../../../runtimes/assets/asset-hub-westend" } -collectives-polkadot-runtime = { path = "../../../runtimes/collectives/collectives-polkadot" } -bridge-hub-kusama-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-kusama" } -bridge-hub-polkadot-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-polkadot" } -bridge-hub-rococo-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-rococo" } xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue" } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false} cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } +asset-test-utils = { path = "../../../runtimes/assets/test-utils" } + +# Bridges bp-messages = { path = "../../../../../bridges/primitives/messages" } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages" } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common" } - -[features] -runtime-benchmarks = [ - "asset-hub-kusama-runtime/runtime-benchmarks", - "asset-hub-polkadot-runtime/runtime-benchmarks", - "asset-hub-westend-runtime/runtime-benchmarks", - "bridge-hub-kusama-runtime/runtime-benchmarks", - "bridge-hub-polkadot-runtime/runtime-benchmarks", - "bridge-hub-rococo-runtime/runtime-benchmarks", - "bridge-runtime-common/runtime-benchmarks", - "collectives-polkadot-runtime/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "penpal-runtime/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-primitives/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "polkadot-service/runtime-benchmarks", - "rococo-runtime/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "westend-runtime/runtime-benchmarks", -] diff --git a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs deleted file mode 100644 index 33a9ac38c8c6f70dc99b722b17ceb876914ecc78..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_consensus_babe::AuthorityId as BabeId; -use sp_core::{sr25519, storage::Storage, Pair, Public}; -use sp_runtime::{ - traits::{IdentifyAccount, Verify}, - BuildStorage, MultiSignature, Perbill, -}; - -// Cumulus -use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId, Balance, BlockNumber}; -use polkadot_primitives::{AssignmentId, ValidatorId}; -use polkadot_runtime_parachains::configuration::HostConfiguration; -use polkadot_service::chain_spec::get_authority_keys_from_seed_no_beefy; -use xcm; - -pub const XCM_V2: u32 = 3; -pub const XCM_V3: u32 = 2; -pub const REF_TIME_THRESHOLD: u64 = 33; -pub const PROOF_SIZE_THRESHOLD: u64 = 33; - -type AccountPublic = ::Signer; - -/// Helper function to generate a crypto pair from seed -fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed. -fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -pub mod accounts { - use super::*; - pub const ALICE: &str = "Alice"; - pub const BOB: &str = "Bob"; - pub const CHARLIE: &str = "Charlie"; - pub const DAVE: &str = "Dave"; - pub const EVE: &str = "Eve"; - pub const FERDIE: &str = "Ferdei"; - pub const ALICE_STASH: &str = "Alice//stash"; - pub const BOB_STASH: &str = "Bob//stash"; - pub const CHARLIE_STASH: &str = "Charlie//stash"; - pub const DAVE_STASH: &str = "Dave//stash"; - pub const EVE_STASH: &str = "Eve//stash"; - pub const FERDIE_STASH: &str = "Ferdie//stash"; - pub const FERDIE_BEEFY: &str = "Ferdie//stash"; - - pub fn init_balances() -> Vec { - vec![ - get_account_id_from_seed::(ALICE), - get_account_id_from_seed::(BOB), - get_account_id_from_seed::(CHARLIE), - get_account_id_from_seed::(DAVE), - get_account_id_from_seed::(EVE), - get_account_id_from_seed::(FERDIE), - get_account_id_from_seed::(ALICE_STASH), - get_account_id_from_seed::(BOB_STASH), - get_account_id_from_seed::(CHARLIE_STASH), - get_account_id_from_seed::(DAVE_STASH), - get_account_id_from_seed::(EVE_STASH), - get_account_id_from_seed::(FERDIE_STASH), - ] - } -} - -pub mod collators { - use super::*; - - pub fn invulnerables_asset_hub_polkadot() -> Vec<(AccountId, AssetHubPolkadotAuraId)> { - vec![ - ( - get_account_id_from_seed::("Alice"), - get_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_from_seed::("Bob"), - ), - ] - } - - pub fn invulnerables() -> Vec<(AccountId, AuraId)> { - vec![ - ( - get_account_id_from_seed::("Alice"), - get_from_seed::("Alice"), - ), - (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), - ] - } -} - -pub mod validators { - use super::*; - - pub fn initial_authorities() -> Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - )> { - vec![get_authority_keys_from_seed_no_beefy("Alice")] - } -} - -/// The default XCM version to set in genesis config. -const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; - -// Westend -pub mod westend { - use super::*; - use westend_runtime_constants::currency::UNITS as WND; - pub const ED: Balance = westend_runtime_constants::currency::EXISTENTIAL_DEPOSIT; - const ENDOWMENT: u128 = 1_000_000 * WND; - const STASH: u128 = 100 * WND; - - pub fn get_host_config() -> HostConfiguration { - HostConfiguration { - max_upward_queue_count: 10, - max_upward_queue_size: 51200, - max_upward_message_size: 51200, - max_upward_message_num_per_candidate: 10, - max_downward_message_size: 51200, - hrmp_sender_deposit: 100_000_000_000, - hrmp_recipient_deposit: 100_000_000_000, - hrmp_channel_max_capacity: 1000, - hrmp_channel_max_message_size: 102400, - hrmp_channel_max_total_size: 102400, - hrmp_max_parachain_outbound_channels: 30, - hrmp_max_parachain_inbound_channels: 30, - ..Default::default() - } - } - - fn session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, - ) -> westend_runtime::SessionKeys { - westend_runtime::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - beefy, - } - } - - pub fn genesis() -> Storage { - let genesis_config = westend_runtime::RuntimeGenesisConfig { - system: westend_runtime::SystemConfig { - code: westend_runtime::WASM_BINARY.unwrap().to_vec(), - ..Default::default() - }, - balances: westend_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ENDOWMENT)) - .collect(), - }, - session: westend_runtime::SessionConfig { - keys: validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - westend::session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - staking: westend_runtime::StakingConfig { - validator_count: validators::initial_authorities().len() as u32, - minimum_validator_count: 1, - stakers: validators::initial_authorities() - .iter() - .map(|x| { - (x.0.clone(), x.1.clone(), STASH, westend_runtime::StakerStatus::Validator) - }) - .collect(), - invulnerables: validators::initial_authorities() - .iter() - .map(|x| x.0.clone()) - .collect(), - force_era: pallet_staking::Forcing::ForceNone, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: westend_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} - -// Rococo -pub mod rococo { - use super::*; - pub const ED: Balance = rococo_runtime_constants::currency::EXISTENTIAL_DEPOSIT; - use rococo_runtime_constants::currency::UNITS as ROC; - const ENDOWMENT: u128 = 1_000_000 * ROC; - - pub fn get_host_config() -> HostConfiguration { - HostConfiguration { - max_upward_queue_count: 10, - max_upward_queue_size: 51200, - max_upward_message_size: 51200, - max_upward_message_num_per_candidate: 10, - max_downward_message_size: 51200, - hrmp_sender_deposit: 0, - hrmp_recipient_deposit: 0, - hrmp_channel_max_capacity: 1000, - hrmp_channel_max_message_size: 102400, - hrmp_channel_max_total_size: 102400, - hrmp_max_parachain_outbound_channels: 30, - hrmp_max_parachain_inbound_channels: 30, - ..Default::default() - } - } - - fn session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, - ) -> rococo_runtime::SessionKeys { - rococo_runtime::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - beefy, - } - } - - pub fn genesis() -> Storage { - let genesis_config = rococo_runtime::RuntimeGenesisConfig { - system: rococo_runtime::SystemConfig { - code: rococo_runtime::WASM_BINARY.unwrap().to_vec(), - ..Default::default() - }, - balances: rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .map(|k| (k.clone(), ENDOWMENT)) - .collect(), - }, - // indices: rococo_runtime::IndicesConfig { indices: vec![] }, - session: rococo_runtime::SessionConfig { - keys: validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - babe: rococo_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - sudo: rococo_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), - }, - configuration: rococo_runtime::ConfigurationConfig { config: get_host_config() }, - registrar: rococo_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() - }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} - -// Asset Hub Westend -pub mod asset_hub_westend { - use super::*; - pub const PARA_ID: u32 = 1000; - pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { - system: asset_hub_westend_runtime::SystemConfig { - code: asset_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: asset_hub_westend_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: asset_hub_westend_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} - -// Asset Hub Kusama -pub mod asset_hub_rococo { - use super::*; - pub const PARA_ID: u32 = 1000; - pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { - system: asset_hub_westend_runtime::SystemConfig { - code: asset_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: asset_hub_westend_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: asset_hub_westend_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} - -// Penpal -pub mod penpal { - use super::*; - pub const PARA_ID_A: u32 = 2000; - pub const PARA_ID_B: u32 = 2001; - pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; - - pub fn genesis(para_id: u32) -> Storage { - let genesis_config = penpal_runtime::RuntimeGenesisConfig { - system: penpal_runtime::SystemConfig { - code: penpal_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: penpal_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: penpal_runtime::ParachainInfoConfig { - parachain_id: para_id.into(), - ..Default::default() - }, - collator_selection: penpal_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: penpal_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - penpal_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: penpal_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - sudo: penpal_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), - }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} - -// Bridge Hub Rococo & Bridge Hub Wococo -pub mod bridge_hub_rococo { - use super::*; - pub const PARA_ID: u32 = 1013; - pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { - system: bridge_hub_rococo_runtime::SystemConfig { - code: bridge_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: bridge_hub_rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: bridge_hub_rococo_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: bridge_hub_rococo_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: bridge_hub_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - ..Default::default() - }; - - genesis_config.build_storage().unwrap() - } -} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 024ae65c51ee40b34160a71892e30dfcd395a397..8c94df6d888ab587a208729c5f0c83d8ef12f29f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -17,50 +17,60 @@ pub use codec::{Decode, Encode}; pub use paste; pub use crate::{ - constants::{PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD}, - xcm_helpers::xcm_transact_unpaid_execution, - BridgeHubRococo, BridgeHubWococo, + xcm_helpers::xcm_transact_unpaid_execution, PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, }; // Substrate -pub use frame_support::{assert_ok, traits::fungibles::Inspect}; +pub use frame_support::{ + assert_ok, + sp_runtime::AccountId32, + traits::fungibles::Inspect, + weights::{Weight, WeightMeter}, +}; pub use pallet_assets; pub use pallet_message_queue; +pub use pallet_xcm; use sp_core::Get; -// Cumulus -use bp_messages::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - LaneId, MessageKey, OutboundLaneData, +// Polkadot +pub use polkadot_runtime_parachains::{ + dmp, hrmp, + inclusion::{AggregateMessageOrigin, UmpQueueId}, }; -use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; -pub use cumulus_pallet_dmp_queue; +pub use xcm::{ + prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm}, + v3::Error, + DoubleEncoded, +}; + +// Cumulus pub use cumulus_pallet_parachain_system; pub use cumulus_pallet_xcmp_queue; pub use cumulus_primitives_core::{ - relay_chain::HrmpChannelId, DmpMessageHandler, ParaId, XcmpMessageHandler, + relay_chain::HrmpChannelId, DmpMessageHandler, Junction, Junctions, NetworkId, ParaId, + XcmpMessageHandler, }; -use pallet_bridge_messages::{Config, Instance1, Instance2, OutboundLanes, Pallet}; pub use parachains_common::{AccountId, Balance}; pub use xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, BridgeMessage, - BridgeMessageDispatchError, BridgeMessageHandler, Chain, Parachain, RelayChain, TestExt, + BridgeMessageDispatchError, BridgeMessageHandler, Chain, Network, Parachain, RelayChain, + TestExt, }; -// Polkadot -pub use pallet_xcm; -pub use polkadot_runtime_parachains::{ - dmp, hrmp, - inclusion::{AggregateMessageOrigin, UmpQueueId}, +// Bridges +use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, + LaneId, MessageKey, OutboundLaneData, }; -pub use xcm::{ - prelude::{OriginKind, Outcome, VersionedXcm, Weight}, - v3::Error, - DoubleEncoded, +use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; +use pallet_bridge_messages::{Config, OutboundLanes, Pallet}; +pub use pallet_bridge_messages::{ + Instance1 as BridgeMessagesInstance1, Instance2 as BridgeMessagesInstance2, + Instance3 as BridgeMessagesInstance3, }; -pub struct BridgeHubMessageHandler { - _marker: std::marker::PhantomData<(S, T, I)>, +pub struct BridgeHubMessageHandler { + _marker: std::marker::PhantomData<(S, SI, T, TI)>, } struct LaneIdWrapper(LaneId); @@ -77,31 +87,14 @@ impl From for LaneIdWrapper { } } -type BridgeHubRococoRuntime = ::Runtime; -type BridgeHubWococoRuntime = ::Runtime; - -// TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged -// type BridgeHubPolkadotRuntime = ::Runtime; -// type BridgeHubKusamaRuntime = ::Runtime; - -pub type RococoWococoMessageHandler = - BridgeHubMessageHandler; -pub type WococoRococoMessageHandler = - BridgeHubMessageHandler; - -// TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged -// pub type PolkadotKusamaMessageHandler -// = BridgeHubMessageHandler; -// pub type KusamaPolkadotMessageHandler -// = BridgeHubMessageHandler; - -impl BridgeMessageHandler for BridgeHubMessageHandler +impl BridgeMessageHandler for BridgeHubMessageHandler where - S: Config, - T: Config, - I: 'static, - >::InboundPayload: From>, - >::MessageDispatch: + S: Config, + SI: 'static, + T: Config, + TI: 'static, + >::InboundPayload: From>, + >::MessageDispatch: MessageDispatch, { fn get_source_outbound_messages() -> Vec { @@ -112,16 +105,13 @@ where // collect messages from `OutboundMessages` for each active outbound lane in the source for lane in active_lanes { - let latest_generated_nonce = - OutboundLanes::::get(lane).latest_generated_nonce; - let latest_received_nonce = - OutboundLanes::::get(lane).latest_received_nonce; + let latest_generated_nonce = OutboundLanes::::get(lane).latest_generated_nonce; + let latest_received_nonce = OutboundLanes::::get(lane).latest_received_nonce; (latest_received_nonce + 1..=latest_generated_nonce).for_each(|nonce| { - let encoded_payload: Vec = - Pallet::::outbound_message_data(*lane, nonce) - .expect("Bridge message does not exist") - .into(); + let encoded_payload: Vec = Pallet::::outbound_message_data(*lane, nonce) + .expect("Bridge message does not exist") + .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) .expect("Decodign XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); @@ -145,9 +135,9 @@ where // Directly dispatch outbound messages assuming everything is correct // and bypassing the `Relayers` and `InboundLane` logic - let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { + let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { key: MessageKey { lane_id, nonce }, - data: DispatchMessageData::> { payload }, + data: DispatchMessageData::> { payload }, }); let result = match dispatch_result.dispatch_level_result { @@ -163,14 +153,14 @@ where } fn notify_source_message_delivery(lane_id: u32) { - let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); + let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); let new_data = OutboundLaneData { oldest_unpruned_nonce: data.oldest_unpruned_nonce + 1, latest_received_nonce: data.latest_received_nonce + 1, ..data }; - OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); + OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); } } @@ -178,12 +168,12 @@ where macro_rules! impl_accounts_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Fund a set of accounts with a balance pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { - $crate::impls::assert_ok!(]>::Balances::force_set_balance( + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), account.0.into(), account.1, @@ -192,7 +182,7 @@ macro_rules! impl_accounts_helpers_for_relay_chain { }); } /// Fund a sovereign account based on its Parachain Id - pub fn fund_para_sovereign(amount: $crate::impls::Balance, para_id: $crate::impls::ParaId) -> sp_runtime::AccountId32 { + pub fn fund_para_sovereign(amount: $crate::impls::Balance, para_id: $crate::impls::ParaId) -> $crate::impls::AccountId32 { let sovereign_account = ::sovereign_account_id_of_child_para(para_id); Self::fund_accounts(vec![(sovereign_account.clone(), amount)]); sovereign_account @@ -206,15 +196,15 @@ macro_rules! impl_accounts_helpers_for_relay_chain { macro_rules! impl_assert_events_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; + type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; - impl $chain { + impl $chain { /// Asserts a dispatchable is completely executed and XCM sent pub fn assert_xcm_pallet_attempted_complete(expected_weight: Option<$crate::impls::Weight>) { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::XcmPallet( + [<$chain RuntimeEvent>]::::XcmPallet( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { weight: $crate::impls::weight_within_threshold( @@ -236,7 +226,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { Self, vec![ // Dispatchable is properly executed and XCM message sent - [<$chain RuntimeEvent>]::XcmPallet( + [<$chain RuntimeEvent>]::::XcmPallet( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { weight: $crate::impls::weight_within_threshold( @@ -255,7 +245,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::XcmPallet($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, + [<$chain RuntimeEvent>]::::XcmPallet($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, ] ); } @@ -270,7 +260,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { Self, vec![ // XCM is succesfully received and proccessed - [<$chain RuntimeEvent>]::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, success, @@ -296,7 +286,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { macro_rules! impl_hrmp_channels_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Init open channel request with another Parachain pub fn init_open_channel_call( recipient_para_id: $crate::impls::ParaId, @@ -336,7 +326,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { let relay_root_origin = ::RuntimeOrigin::root(); // Force process HRMP open channel requests without waiting for the next session - $crate::impls::assert_ok!(]>::Hrmp::force_process_hrmp_open( + $crate::impls::assert_ok!(]>::Hrmp::force_process_hrmp_open( relay_root_origin, 0 )); @@ -356,16 +346,47 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { }; } +#[macro_export] +macro_rules! impl_send_transact_helpers_for_relay_chain { + ( $chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// A root origin (as governance) sends `xcm::Transact` with `UnpaidExecution` and encoded `call` to child parachain. + pub fn send_unpaid_transact_to_parachain_as_root( + recipient: $crate::impls::ParaId, + call: $crate::impls::DoubleEncoded<()> + ) { + use $crate::impls::{bx, Chain, RelayChain}; + + ::execute_with(|| { + let root_origin = ::RuntimeOrigin::root(); + let destination: $crate::impls::MultiLocation = ::child_location_of(recipient); + let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); + + // Send XCM `Transact` + $crate::impls::assert_ok!(]>::XcmPallet::send( + root_origin, + bx!(destination.into()), + bx!(xcm), + )); + Self::assert_xcm_pallet_sent(); + }); + } + } + } + }; +} + #[macro_export] macro_rules! impl_accounts_helpers_for_parachain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Fund a set of accounts with a balance pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { - $crate::impls::assert_ok!(]>::Balances::force_set_balance( + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), account.0.into(), account.1, @@ -373,6 +394,23 @@ macro_rules! impl_accounts_helpers_for_parachain { } }); } + + /// Return local sovereign account of `para_id` on other `network_id` + pub fn sovereign_account_of_parachain_on_other_global_consensus( + network_id: $crate::impls::NetworkId, + para_id: $crate::impls::ParaId, + ) -> $crate::impls::AccountId { + let remote_location = $crate::impls::MultiLocation { + parents: 2, + interior: $crate::impls::Junctions::X2( + $crate::impls::Junction::GlobalConsensus(network_id), + $crate::impls::Junction::Parachain(para_id.into()), + ), + }; + ::execute_with(|| { + Self::sovereign_account_id_of(remote_location) + }) + } } } }; @@ -380,20 +418,20 @@ macro_rules! impl_accounts_helpers_for_parachain { #[macro_export] macro_rules! impl_assert_events_helpers_for_parachain { - ( $chain:ident ) => { + ( $chain:ident, $ignore_weight:expr ) => { $crate::impls::paste::paste! { - type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; + type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; - impl $chain { + impl $chain { /// Asserts a dispatchable is completely executed and XCM sent pub fn assert_xcm_pallet_attempted_complete(expected_weight: Option<$crate::impls::Weight>) { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -412,10 +450,10 @@ macro_rules! impl_assert_events_helpers_for_parachain { Self, vec![ // Dispatchable is properly executed and XCM message sent - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -432,7 +470,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { Self, vec![ // Execution fails in the origin with `Barrier` - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Error(error) } ) => { error: *error == expected_error.unwrap_or(*error), @@ -446,7 +484,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::PolkadotXcm($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, + [<$chain RuntimeEvent>]::::PolkadotXcm($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, ] ); } @@ -456,7 +494,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::ParachainSystem( + [<$chain RuntimeEvent>]::::ParachainSystem( $crate::impls::cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } ) => {}, ] @@ -468,10 +506,10 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::DmpQueue($crate::impls::cumulus_pallet_dmp_queue::Event::ExecutedDownward { - outcome: $crate::impls::Outcome::Complete(weight), .. + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { + success: true, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -484,20 +522,32 @@ macro_rules! impl_assert_events_helpers_for_parachain { /// Asserts a XCM from Relay Chain is incompletely executed pub fn assert_dmp_queue_incomplete( expected_weight: Option<$crate::impls::Weight>, - expected_error: Option<$crate::impls::Error>, ) { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::DmpQueue($crate::impls::cumulus_pallet_dmp_queue::Event::ExecutedDownward { - outcome: $crate::impls::Outcome::Incomplete(weight, error), .. + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { + success: false, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight ), - error: *error == expected_error.unwrap_or(*error), + }, + ] + ); + } + + /// Asserts a XCM from Relay Chain is executed with error + pub fn assert_dmp_queue_error() { + $crate::impls::assert_expected_events!( + Self, + vec![ + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::ProcessingFailed { + .. + }) => { + }, ] ); @@ -508,10 +558,9 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::XcmpQueue( - $crate::impls::cumulus_pallet_xcmp_queue::Event::Success { weight, .. } + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -529,7 +578,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { macro_rules! impl_assets_helpers_for_parachain { ( $chain:ident, $relay_chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Returns the encoded call for `force_create` from the assets pallet pub fn force_create_asset_call( asset_id: u32, @@ -572,19 +621,21 @@ macro_rules! impl_assets_helpers_for_parachain { amount_to_mint: u128, ) { ::execute_with(|| { - $crate::impls::assert_ok!(]>::Assets::mint( + $crate::impls::assert_ok!(]>::Assets::mint( signed_origin, id.into(), beneficiary.clone().into(), amount_to_mint )); - type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { + RuntimeEvent::::Assets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { asset_id: *asset_id == id, owner: *owner == beneficiary.clone().into(), amount: *amount == amount_to_mint, @@ -600,53 +651,140 @@ macro_rules! impl_assets_helpers_for_parachain { min_balance: u128, is_sufficient: bool, asset_owner: $crate::impls::AccountId, + dmp_weight_threshold: Option<$crate::impls::Weight>, amount_to_mint: u128, ) { - use $crate::impls::{bx, Chain, RelayChain, Parachain, Inspect, TestExt}; - // Init values for Relay Chain - let root_origin = <$relay_chain as Chain>::RuntimeOrigin::root(); - let destination = <$relay_chain>::child_location_of(<$chain>::para_id()); - let xcm = Self::force_create_asset_xcm( - $crate::impls::OriginKind::Superuser, + use $crate::impls::Chain; + + // Force create asset + Self::force_create_asset_from_relay_as_root( id, - asset_owner.clone(), - is_sufficient, min_balance, + is_sufficient, + asset_owner.clone(), + dmp_weight_threshold ); - <$relay_chain>::execute_with(|| { - $crate::impls::assert_ok!(<$relay_chain as [<$relay_chain Pallet>]>::XcmPallet::send( - root_origin, - bx!(destination.into()), - bx!(xcm), - )); + // Mint asset for System Parachain's sender + let signed_origin = ::RuntimeOrigin::signed(asset_owner.clone()); + Self::mint_asset(signed_origin, id, asset_owner, amount_to_mint); + } - <$relay_chain>::assert_xcm_pallet_sent(); - }); + /// Relay Chain sends `Transact` instruction with `force_create_asset` to Parachain with `Assets` instance of `pallet_assets` . + pub fn force_create_asset_from_relay_as_root( + id: u32, + min_balance: u128, + is_sufficient: bool, + asset_owner: $crate::impls::AccountId, + dmp_weight_threshold: Option<$crate::impls::Weight>, + ) { + use $crate::impls::{Parachain, Inspect, TestExt}; + <$relay_chain>::send_unpaid_transact_to_parachain_as_root( + Self::para_id(), + Self::force_create_asset_call(id, asset_owner.clone(), is_sufficient, min_balance), + ); + + // Receive XCM message in Assets Parachain Self::execute_with(|| { - Self::assert_dmp_queue_complete(Some($crate::impls::Weight::from_parts(1_019_445_000, 200_000))); + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; - type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + Self::assert_dmp_queue_complete(dmp_weight_threshold); $crate::impls::assert_expected_events!( Self, vec![ - // Asset has been created - RuntimeEvent::Assets($crate::impls::pallet_assets::Event::ForceCreated { asset_id, owner }) => { + RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::ForceCreated { asset_id, owner }) => { asset_id: *asset_id == id, - owner: *owner == asset_owner.clone(), + owner: *owner == asset_owner, }, ] ); - assert!(]>::Assets::asset_exists(id.into())); + assert!(]>::Assets::asset_exists(id.into())); }); + } + } + } + }; +} - let signed_origin = ::RuntimeOrigin::signed(asset_owner.clone()); +#[macro_export] +macro_rules! impl_foreign_assets_helpers_for_parachain { + ( $chain:ident, $relay_chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// Create foreign assets using sudo `ForeignAssets::force_create()` + pub fn force_create_foreign_asset( + id: $crate::impls::MultiLocation, + owner: $crate::impls::AccountId, + is_sufficient: bool, + min_balance: u128, + prefund_accounts: Vec<($crate::impls::AccountId, u128)>, + ) { + use $crate::impls::Inspect; + let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); + ::execute_with(|| { + $crate::impls::assert_ok!( + ]>::ForeignAssets::force_create( + sudo_origin, + id, + owner.clone().into(), + is_sufficient, + min_balance, + ) + ); + assert!(]>::ForeignAssets::asset_exists(id)); + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::ForceCreated { + asset_id, + .. + } + ) => { asset_id: *asset_id == id, }, + ] + ); + }); + for (beneficiary, amount) in prefund_accounts.into_iter() { + let signed_origin = + <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); + Self::mint_foreign_asset(signed_origin, id, beneficiary, amount); + } + } - // Mint asset for System Parachain's sender - Self::mint_asset(signed_origin, id, asset_owner, amount_to_mint); + /// Mint assets making use of the ForeignAssets pallet-assets instance + pub fn mint_foreign_asset( + signed_origin: ::RuntimeOrigin, + id: $crate::impls::MultiLocation, + beneficiary: $crate::impls::AccountId, + amount_to_mint: u128, + ) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::ForeignAssets::mint( + signed_origin, + id.into(), + beneficiary.clone().into(), + amount_to_mint + )); + + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { + asset_id: *asset_id == id, + owner: *owner == beneficiary.clone().into(), + amount: *amount == amount_to_mint, + }, + ] + ); + }); } } } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 068f0238f49783d97c90332355b62908f758388e..952b053f2aa241bcf4a92812a51afa8e65a6855a 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -13,293 +13,161 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod constants; pub mod impls; +pub mod macros; pub mod xcm_helpers; -use constants::{ - accounts::{ALICE, BOB}, - asset_hub_rococo, asset_hub_westend, bridge_hub_rococo, penpal, rococo, westend, -}; -use impls::{RococoWococoMessageHandler, WococoRococoMessageHandler}; +pub use xcm_emulator; // Substrate -use frame_support::traits::OnInitialize; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{sr25519, storage::Storage, Pair, Public}; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + BuildStorage, MultiSignature, +}; + +// Polakdot +use parachains_common::BlockNumber; +use polkadot_runtime_parachains::configuration::HostConfiguration; +use xcm; // Cumulus -use xcm_emulator::{ - decl_test_bridges, decl_test_networks, decl_test_parachains, decl_test_relay_chains, - decl_test_sender_receiver_accounts_parameter_types, DefaultMessageProcessor, -}; +use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId}; +use polkadot_primitives::{AssignmentId, ValidatorId}; +use polkadot_service::chain_spec::get_authority_keys_from_seed_no_beefy; -decl_test_relay_chains! { - #[api_version(7)] - pub struct Westend { - genesis = westend::genesis(), - on_init = (), - runtime = westend_runtime, - core = { - MessageProcessor: DefaultMessageProcessor, - SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: westend_runtime::XcmPallet, - Sudo: westend_runtime::Sudo, - Balances: westend_runtime::Balances, - Treasury: westend_runtime::Treasury, - AssetRate: westend_runtime::AssetRate, - } - }, - #[api_version(7)] - pub struct Rococo { - genesis = rococo::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - MessageProcessor: DefaultMessageProcessor, - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - } - }, - #[api_version(7)] - pub struct Wococo { - genesis = rococo::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - MessageProcessor: DefaultMessageProcessor, - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - } - } -} +pub const XCM_V2: u32 = 2; +pub const XCM_V3: u32 = 3; +pub const REF_TIME_THRESHOLD: u64 = 33; +pub const PROOF_SIZE_THRESHOLD: u64 = 33; -decl_test_parachains! { - // Westend Parachains - pub struct AssetHubWestend { - genesis = asset_hub_westend::genesis(), - on_init = { - asset_hub_westend_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_westend_runtime, - core = { - XcmpMessageHandler: asset_hub_westend_runtime::XcmpQueue, - DmpMessageHandler: asset_hub_westend_runtime::DmpQueue, - LocationToAccountId: asset_hub_westend_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_westend_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_westend_runtime::PolkadotXcm, - Balances: asset_hub_westend_runtime::Balances, - Assets: asset_hub_westend_runtime::Assets, - ForeignAssets: asset_hub_westend_runtime::ForeignAssets, - PoolAssets: asset_hub_westend_runtime::PoolAssets, - AssetConversion: asset_hub_westend_runtime::AssetConversion, - } - }, - pub struct PenpalWestendA { - genesis = penpal::genesis(penpal::PARA_ID_A), - on_init = { - penpal_runtime::AuraExt::on_initialize(1); - }, - runtime = penpal_runtime, - core = { - XcmpMessageHandler: penpal_runtime::XcmpQueue, - DmpMessageHandler: penpal_runtime::DmpQueue, - LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, - ParachainInfo: penpal_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: penpal_runtime::PolkadotXcm, - Assets: penpal_runtime::Assets, - } - }, - // Rococo Parachains - pub struct BridgeHubRococo { - genesis = bridge_hub_rococo::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - DmpMessageHandler: bridge_hub_rococo_runtime::DmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - Balances: bridge_hub_rococo_runtime::Balances, - } - }, - // AssetHubRococo (aka Rockmine/Rockmine2) mirrors AssetHubKusama - pub struct AssetHubRococo { - genesis = asset_hub_rococo::genesis(), - on_init = { - asset_hub_polkadot_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_kusama_runtime, - core = { - XcmpMessageHandler: asset_hub_kusama_runtime::XcmpQueue, - DmpMessageHandler: asset_hub_kusama_runtime::DmpQueue, - LocationToAccountId: asset_hub_kusama_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_kusama_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_kusama_runtime::PolkadotXcm, - Assets: asset_hub_kusama_runtime::Assets, - } - }, - // Wococo Parachains - pub struct BridgeHubWococo { - genesis = bridge_hub_rococo::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - DmpMessageHandler: bridge_hub_rococo_runtime::DmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - } - }, - pub struct AssetHubWococo { - genesis = asset_hub_westend::genesis(), - on_init = { - asset_hub_polkadot_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_polkadot_runtime, - core = { - XcmpMessageHandler: asset_hub_polkadot_runtime::XcmpQueue, - DmpMessageHandler: asset_hub_polkadot_runtime::DmpQueue, - LocationToAccountId: asset_hub_polkadot_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_polkadot_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_polkadot_runtime::PolkadotXcm, - Assets: asset_hub_polkadot_runtime::Assets, - } - }, - pub struct PenpalRococoA { - genesis = penpal::genesis(penpal::PARA_ID_A), - on_init = { - penpal_runtime::AuraExt::on_initialize(1); - }, - runtime = penpal_runtime, - core = { - XcmpMessageHandler: penpal_runtime::XcmpQueue, - DmpMessageHandler: penpal_runtime::DmpQueue, - LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, - ParachainInfo: penpal_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: penpal_runtime::PolkadotXcm, - Assets: penpal_runtime::Assets, - } - } +/// The default XCM version to set in genesis config. +pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; + +type AccountPublic = ::Signer; + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } -decl_test_networks! { - pub struct WestendMockNet { - relay_chain = Westend, - parachains = vec![ - AssetHubWestend, - PenpalWestendA, - ], - bridge = () - }, - pub struct RococoMockNet { - relay_chain = Rococo, - parachains = vec![ - AssetHubRococo, - BridgeHubRococo, - PenpalRococoA, - ], - bridge = RococoWococoMockBridge - }, - pub struct WococoMockNet { - relay_chain = Wococo, - parachains = vec![ - AssetHubWococo, - BridgeHubWococo, - ], - bridge = WococoRococoMockBridge - } +/// Helper function to generate an account ID from seed. +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() } -decl_test_bridges! { - pub struct RococoWococoMockBridge { - source = BridgeHubRococo, - target = BridgeHubWococo, - handler = RococoWococoMessageHandler - }, - pub struct WococoRococoMockBridge { - source = BridgeHubWococo, - target = BridgeHubRococo, - handler = WococoRococoMessageHandler +pub fn get_host_config() -> HostConfiguration { + HostConfiguration { + max_upward_queue_count: 10, + max_upward_queue_size: 51200, + max_upward_message_size: 51200, + max_upward_message_num_per_candidate: 10, + max_downward_message_size: 51200, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 1000, + hrmp_channel_max_message_size: 102400, + hrmp_channel_max_total_size: 102400, + hrmp_max_parachain_outbound_channels: 30, + hrmp_max_parachain_inbound_channels: 30, + ..Default::default() } - // TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged - // pub struct PolkadotKusamaMockBridge { - // source = BridgeHubPolkadot, - // target = BridgeHubKusama, - // handler = PolkadotKusamaMessageHandler - // }, - // pub struct KusamaPolkadotMockBridge { - // source = BridgeHubKusama, - // target = BridgeHubPolkadot, - // handler = KusamaPolkadotMessageHandler - // } } -// Westend implementation -impl_accounts_helpers_for_relay_chain!(Westend); -impl_assert_events_helpers_for_relay_chain!(Westend); - -// Rococo implementation -impl_accounts_helpers_for_relay_chain!(Rococo); -impl_assert_events_helpers_for_relay_chain!(Rococo); - -// Wococo implementation -impl_accounts_helpers_for_relay_chain!(Wococo); -impl_assert_events_helpers_for_relay_chain!(Wococo); +/// Helper function used in tests to build the genesis storage using given RuntimeGenesisConfig and +/// code Used in `legacy_vs_json_check` submods to verify storage building with JSON patch against +/// building with RuntimeGenesisConfig struct. +pub fn build_genesis_storage(builder: &dyn BuildStorage, code: &[u8]) -> Storage { + let mut storage = builder.build_storage().unwrap(); + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.into()); + storage +} -// AssetHubWestend implementation -impl_accounts_helpers_for_parachain!(AssetHubWestend); -impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); -impl_assert_events_helpers_for_parachain!(AssetHubWestend); +pub mod accounts { + use super::*; + pub const ALICE: &str = "Alice"; + pub const BOB: &str = "Bob"; + pub const CHARLIE: &str = "Charlie"; + pub const DAVE: &str = "Dave"; + pub const EVE: &str = "Eve"; + pub const FERDIE: &str = "Ferdei"; + pub const ALICE_STASH: &str = "Alice//stash"; + pub const BOB_STASH: &str = "Bob//stash"; + pub const CHARLIE_STASH: &str = "Charlie//stash"; + pub const DAVE_STASH: &str = "Dave//stash"; + pub const EVE_STASH: &str = "Eve//stash"; + pub const FERDIE_STASH: &str = "Ferdie//stash"; + pub const FERDIE_BEEFY: &str = "Ferdie//stash"; + + pub fn init_balances() -> Vec { + vec![ + get_account_id_from_seed::(ALICE), + get_account_id_from_seed::(BOB), + get_account_id_from_seed::(CHARLIE), + get_account_id_from_seed::(DAVE), + get_account_id_from_seed::(EVE), + get_account_id_from_seed::(FERDIE), + get_account_id_from_seed::(ALICE_STASH), + get_account_id_from_seed::(BOB_STASH), + get_account_id_from_seed::(CHARLIE_STASH), + get_account_id_from_seed::(DAVE_STASH), + get_account_id_from_seed::(EVE_STASH), + get_account_id_from_seed::(FERDIE_STASH), + ] + } +} -// PenpalWestendA implementation -impl_assert_events_helpers_for_parachain!(PenpalWestendA); +pub mod collators { + use super::*; + + pub fn invulnerables_asset_hub_polkadot() -> Vec<(AccountId, AssetHubPolkadotAuraId)> { + vec![ + ( + get_account_id_from_seed::("Alice"), + get_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_from_seed::("Bob"), + ), + ] + } -// BridgeHubRococo implementation -impl_accounts_helpers_for_parachain!(BridgeHubRococo); -impl_assert_events_helpers_for_parachain!(BridgeHubRococo); + pub fn invulnerables() -> Vec<(AccountId, AuraId)> { + vec![ + ( + get_account_id_from_seed::("Alice"), + get_from_seed::("Alice"), + ), + (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), + ] + } +} -decl_test_sender_receiver_accounts_parameter_types! { - // Relays - Westend { sender: ALICE, receiver: BOB }, - Rococo { sender: ALICE, receiver: BOB }, - Wococo { sender: ALICE, receiver: BOB }, - // Asset Hubs - AssetHubWestend { sender: ALICE, receiver: BOB }, - AssetHubRococo { sender: ALICE, receiver: BOB }, - AssetHubWococo { sender: ALICE, receiver: BOB }, - // Bridged Hubs - BridgeHubRococo { sender: ALICE, receiver: BOB }, - BridgeHubWococo { sender: ALICE, receiver: BOB }, - // Penpals - PenpalWestendA { sender: ALICE, receiver: BOB }, - PenpalRococoA { sender: ALICE, receiver: BOB } +pub mod validators { + use super::*; + + pub fn initial_authorities() -> Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ImOnlineId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + )> { + vec![get_authority_keys_from_seed_no_beefy("Alice")] + } } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ea3524ed4a39d5da293c3f69f9c8a35917aab39 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -0,0 +1,122 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use paste; + +// Substrate +pub use pallet_balances; +pub use pallet_message_queue; +pub use pallet_xcm; + +// Polkadot +pub use xcm::prelude::{AccountId32, WeightLimit}; + +// Cumulus +pub use asset_test_utils; +pub use cumulus_pallet_xcmp_queue; +pub use xcm_emulator::Chain; + +#[macro_export] +macro_rules! test_parachain_is_trusted_teleporter { + ( $sender_para:ty, $sender_xcm_config:ty, vec![$( $receiver_para:ty ),+], ($assets:expr, $amount:expr) ) => { + $crate::macros::paste::paste! { + // init Origin variables + let sender = [<$sender_para Sender>]::get(); + let mut para_sender_balance_before = + <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; + let origin = <$sender_para as $crate::macros::Chain>::RuntimeOrigin::signed(sender.clone()); + let fee_asset_item = 0; + let weight_limit = $crate::macros::WeightLimit::Unlimited; + + $( + { + // init Destination variables + let receiver = [<$receiver_para Receiver>]::get(); + let para_receiver_balance_before = + <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; + let para_destination = + <$sender_para>::sibling_location_of(<$receiver_para>::para_id()); + let beneficiary: MultiLocation = + $crate::macros::AccountId32 { network: None, id: receiver.clone().into() }.into(); + + // Send XCM message from Origin Parachain + // We are only testing the limited teleport version, which should be ok since success will + // depend only on a proper `XcmConfig` at destination. + <$sender_para>::execute_with(|| { + assert_ok!(<$sender_para as [<$sender_para Pallet>]>::PolkadotXcm::limited_teleport_assets( + origin.clone(), + bx!(para_destination.into()), + bx!(beneficiary.into()), + bx!($assets.clone().into()), + fee_asset_item, + weight_limit.clone(), + )); + + type RuntimeEvent = <$sender_para as $crate::macros::Chain>::RuntimeEvent; + + assert_expected_events!( + $sender_para, + vec![ + RuntimeEvent::PolkadotXcm( + $crate::macros::pallet_xcm::Event::Attempted { outcome: Outcome::Complete { .. } } + ) => {}, + RuntimeEvent::XcmpQueue( + $crate::macros::cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + RuntimeEvent::Balances( + $crate::macros::pallet_balances::Event::Withdraw { who: sender, amount } + ) => {}, + ] + ); + }); + + // Receive XCM message in Destination Parachain + <$receiver_para>::execute_with(|| { + type RuntimeEvent = <$receiver_para as $crate::macros::Chain>::RuntimeEvent; + + assert_expected_events!( + $receiver_para, + vec![ + RuntimeEvent::Balances( + $crate::macros::pallet_balances::Event::Deposit { who: receiver, .. } + ) => {}, + RuntimeEvent::MessageQueue( + $crate::macros::pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + // Check if balances are updated accordingly in Origin and Destination Parachains + let para_sender_balance_after = + <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; + let para_receiver_balance_after = + <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; + let delivery_fees = <$sender_para>::execute_with(|| { + $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + <$sender_xcm_config as xcm_executor::Config>::XcmSender, + >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) + }); + + assert_eq!(para_sender_balance_before - $amount - delivery_fees, para_sender_balance_after); + assert!(para_receiver_balance_after > para_receiver_balance_before); + + // Update sender balance + para_sender_balance_before = <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; + } + )+ + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 4096cdbba0b9154beb9f90272a459087b2443498..47e92ed075fa0ce00cdf7a012ae07e13bc9bed6f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -1,28 +1,23 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Cumulus use parachains_common::AccountId; -use xcm::{ - prelude::{ - AccountId32, All, BuyExecution, DepositAsset, MultiAsset, MultiAssets, MultiLocation, - OriginKind, RefundSurplus, Transact, UnpaidExecution, VersionedXcm, Weight, WeightLimit, - WithdrawAsset, Xcm, X1, - }, - DoubleEncoded, -}; + +// Polkadot +use xcm::{prelude::*, DoubleEncoded}; /// Helper method to build a XCM with a `Transact` instruction and paying for its execution pub fn xcm_transact_paid_execution( diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..713cc2ecdbb253044cbf4063317a2cd8fae2e823 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rococo-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo System emulated network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +rococo-emulated-chain = { path = "../../chains/relays/rococo" } +asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } +bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad22185fa703ba49d9eadc54d4fd1a56b7e81337 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_rococo_emulated_chain; +pub use bridge_hub_rococo_emulated_chain; +pub use penpal_emulated_chain; +pub use rococo_emulated_chain; + +use asset_hub_rococo_emulated_chain::AssetHubRococo; +use bridge_hub_rococo_emulated_chain::BridgeHubRococo; +use penpal_emulated_chain::{PenpalA, PenpalB}; +use rococo_emulated_chain::Rococo; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, +}; + +decl_test_networks! { + pub struct RococoMockNet { + relay_chain = Rococo, + parachains = vec![ + AssetHubRococo, + BridgeHubRococo, + PenpalA, + PenpalB, + ], + bridge = () + }, +} + +decl_test_sender_receiver_accounts_parameter_types! { + RococoRelay { sender: ALICE, receiver: BOB }, + AssetHubRococoPara { sender: ALICE, receiver: BOB }, + BridgeHubRococoPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..34713f5b48e995351d53c93fd607df236bb00e32 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "rococo-westend-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo<>Westend emulated bridged network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +rococo-emulated-chain = { path = "../../chains/relays/rococo" } +westend-emulated-chain = { path = "../../chains/relays/westend" } +asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } +bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b03ff692b952ab4aae6de04016e83ca0dfe09eef --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs @@ -0,0 +1,96 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_rococo_emulated_chain; +pub use asset_hub_westend_emulated_chain; +pub use bridge_hub_rococo_emulated_chain; +pub use bridge_hub_westend_emulated_chain; +pub use rococo_emulated_chain; +pub use westend_emulated_chain; + +use asset_hub_rococo_emulated_chain::AssetHubRococo; +use asset_hub_westend_emulated_chain::AssetHubWestend; +use bridge_hub_rococo_emulated_chain::BridgeHubRococo; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; +use rococo_emulated_chain::Rococo; +use westend_emulated_chain::Westend; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + impls::{BridgeHubMessageHandler, BridgeMessagesInstance1, BridgeMessagesInstance3}, + xcm_emulator::{ + decl_test_bridges, decl_test_networks, decl_test_sender_receiver_accounts_parameter_types, + Chain, + }, +}; + +decl_test_networks! { + pub struct RococoMockNet { + relay_chain = Rococo, + parachains = vec![ + AssetHubRococo, + BridgeHubRococo, + ], + bridge = RococoWestendMockBridge + + }, + pub struct WestendMockNet { + relay_chain = Westend, + parachains = vec![ + AssetHubWestend, + BridgeHubWestend, + ], + bridge = WestendRococoMockBridge + }, +} + +decl_test_bridges! { + pub struct RococoWestendMockBridge { + source = BridgeHubRococoPara, + target = BridgeHubWestendPara, + handler = RococoWestendMessageHandler + }, + pub struct WestendRococoMockBridge { + source = BridgeHubWestendPara, + target = BridgeHubRococoPara, + handler = WestendRococoMessageHandler + } +} + +type BridgeHubRococoRuntime = ::Runtime; +type BridgeHubWestendRuntime = ::Runtime; + +pub type RococoWestendMessageHandler = BridgeHubMessageHandler< + BridgeHubRococoRuntime, + BridgeMessagesInstance3, + BridgeHubWestendRuntime, + BridgeMessagesInstance1, +>; +pub type WestendRococoMessageHandler = BridgeHubMessageHandler< + BridgeHubWestendRuntime, + BridgeMessagesInstance1, + BridgeHubRococoRuntime, + BridgeMessagesInstance3, +>; + +decl_test_sender_receiver_accounts_parameter_types! { + RococoRelay { sender: ALICE, receiver: BOB }, + AssetHubRococoPara { sender: ALICE, receiver: BOB }, + BridgeHubRococoPara { sender: ALICE, receiver: BOB }, + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a4360076d6bd4e2a4a78008708e0ae0891b7fe70 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "westend-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend System emulated network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +westend-emulated-chain = { path = "../../chains/relays/westend", default-features = false } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..667b44a69869a1a9e991d13053cccc5f206324c7 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_westend_emulated_chain; +pub use bridge_hub_westend_emulated_chain; +pub use penpal_emulated_chain; +pub use westend_emulated_chain; + +use asset_hub_westend_emulated_chain::AssetHubWestend; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; +use penpal_emulated_chain::{PenpalA, PenpalB}; +use westend_emulated_chain::Westend; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, +}; + +decl_test_networks! { + pub struct WestendMockNet { + relay_chain = Westend, + parachains = vec![ + AssetHubWestend, + BridgeHubWestend, + PenpalA, + PenpalB, + ], + bridge = () + }, +} + +decl_test_sender_receiver_accounts_parameter_types! { + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..23f80f33f78e8f5d2cc65e343dfefce2c2523e40 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "asset-hub-rococo-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Rococo runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } + +# Cumulus +asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } +rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ff8c37c64651c79d76a33bf10aa83db5ba20754 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use codec::Encode; + +// Substrate +pub use frame_support::{ + assert_err, assert_ok, + pallet_prelude::Weight, + sp_runtime::{AccountId32, DispatchError, DispatchResult}, + traits::fungibles::Inspect, +}; + +// Polkadot +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{Error, NetworkId::Rococo as RococoId}, +}; + +// Cumulus +pub use asset_test_utils::xcm_helpers; +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use rococo_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, + AssetHubRococoParaSender as AssetHubRococoSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, PenpalAPara as PenpalA, + PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, + RococoRelaySender as RococoSender, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +// `Assets` pallet index +pub const ASSETS_PALLET_ID: u8 = 50; + +pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; +pub type SystemParaToRelayTest = Test; +pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; + +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { + TestArgs { + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), + amount, + assets: (Here, amount).into(), + asset_id: None, + fee_asset_item: 0, + weight_limit: WeightLimit::Unlimited, + } +} + +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, + assets: MultiAssets, + asset_id: Option, + fee_asset_item: u32, +) -> TestArgs { + TestArgs { + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), + amount, + assets, + asset_id, + fee_asset_item, + weight_limit: WeightLimit::Unlimited, + } +} + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b3841af0e6c38372b8fb621fac468b25bdec63a1 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -0,0 +1,20 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod reserve_transfer; +mod send; +mod set_xcm_versions; +mod swap; +mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0e9b72176bce1a21ede4e3491ec5aefa23abe43 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -0,0 +1,502 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use penpal_runtime::xcm_config::XcmConfig as PenpalRococoXcmConfig; +use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; + +fn relay_to_para_sender_assertions(t: RelayToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + Rococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == Rococo::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn relay_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 864_610_000, + 8_799, + ))); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == AssetHubRococo::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn system_para_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 864_610_000, + 8799, + ))); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Assets( + pallet_assets::Event::Transferred { asset_id, from, to, amount } + ) => { + asset_id: *asset_id == ASSET_ID, + from: *from == t.sender.account_id, + to: *to == AssetHubRococo::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn relay_to_para_limited_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn para_to_system_para_limited_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +#[test] +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(RococoSender::get().into()); + let destination = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); + let amount_to_send: Balance = ROCOCO_ED * 1000; + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Rococo::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); +} + +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work +#[test] +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { + // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let destination = AssetHubRococo::parent_location(); + let beneficiary_id = RococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubRococo::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); +} + +/// Reserve Transfers of native asset from Relay to Parachain should work +#[test] +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); + let amount_to_send: Balance = ROCOCO_ED * 1000; + + let test_args = TestContext { + sender: RococoSender::get(), + receiver: PenpalAReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), + }; + + let mut test = RelayToParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(relay_to_para_receiver_assertions); + test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); + test.assert(); + + let delivery_fees = Rococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Reserve Transfers of native asset from System Parachain to Parachain should work +#[test] +fn reserve_transfer_native_asset_from_system_para_to_para() { + // Init values for System Parachain + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: PenpalAReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = SystemParaToParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = AssetHubRococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Reserve Transfers of native asset from Parachain to System Parachain should work +#[test] +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: PenpalASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = ParaToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); + + // fund the Penpal's SA on AHR with the native tokens held in reserve + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = PenpalA::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work +#[test] +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubRococo and PenpalA from Relay Chain + AssetHubRococo::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + AssetHubRococoSender::get(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalA::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalASender::get(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + 0, + ); + + // Init values for System Parachain + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); + let fee_amount_to_send = ASSET_HUB_ROCOCO_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: PenpalAReceiver::get(), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), + }; + + let mut test = SystemParaToParaTest::new(para_test_args); + + // Create SA-of-Penpal-on-AHR with ED. + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), ROCOCO_ED)]); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let sender_assets_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); + + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + + let sender_assets_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); + + // Sender's balance is reduced + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased + assert!(receiver_assets_after > receiver_assets_before); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs new file mode 100644 index 0000000000000000000000000000000000000000..7be0463d2ec46fb3d6f9ea1eeaff30336f0fddcf --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -0,0 +1,104 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +/// Relay Chain should be able to execute `Transact` instructions in System Parachain +/// when `OriginKind::Superuser`. +#[test] +fn send_transact_as_superuser_from_relay_to_system_para_works() { + AssetHubRococo::force_create_asset_from_relay_as_root( + ASSET_ID, + ASSET_MIN_BALANCE, + true, + AssetHubRococoSender::get().into(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + ) +} + +/// Parachain should be able to send XCM paying its fee with sufficient asset +/// in the System Parachain +#[test] +fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { + let para_sovereign_account = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + + // Force create and mint assets for Parachain's sovereign account + AssetHubRococo::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + true, + para_sovereign_account.clone(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + ASSET_MIN_BALANCE * 1000000000, + ); + + // We just need a call that can pass the `SafeCallFilter` + // Call values are not relevant + let call = AssetHubRococo::force_create_asset_call( + ASSET_ID, + para_sovereign_account.clone(), + true, + ASSET_MIN_BALANCE, + ); + + let origin_kind = OriginKind::SovereignAccount; + let fee_amount = ASSET_MIN_BALANCE * 1000000; + let native_asset = + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); + + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()).into(); + let xcm = xcm_transact_paid_execution( + call, + origin_kind, + native_asset, + para_sovereign_account.clone(), + ); + + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + root_origin, + bx!(system_para_destination), + bx!(xcm), + )); + + PenpalA::assert_xcm_pallet_sent(); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_xcmp_queue_success(Some(Weight::from_parts( + 15_594_564_000, + 562_893, + ))); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::Assets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == ASSET_ID, + owner: *owner == para_sovereign_account, + balance: *balance == fee_amount, + }, + RuntimeEvent::Assets(pallet_assets::Event::Issued { asset_id, .. }) => { + asset_id: *asset_id == ASSET_ID, + }, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs new file mode 100644 index 0000000000000000000000000000000000000000..8faba50fc888b2155836d54c8e111146bc85c218 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs @@ -0,0 +1,83 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +#[test] +fn relay_sets_system_para_xcm_supported_version() { + // Init tests variables + let sudo_origin = ::RuntimeOrigin::root(); + let system_para_destination: MultiLocation = + Rococo::child_location_of(AssetHubRococo::para_id()); + + // Relay Chain sets supported version for Asset Parachain + Rococo::execute_with(|| { + assert_ok!(::XcmPallet::force_xcm_version( + sudo_origin, + bx!(system_para_destination), + XCM_V3 + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::SupportedVersionChanged { + location, + version: XCM_V3 + }) => { location: *location == system_para_destination, }, + ] + ); + }); +} + +#[test] +fn system_para_sets_relay_xcm_supported_version() { + // Init test variables + let parent_location = AssetHubRococo::parent_location(); + let force_xcm_version_call = + ::RuntimeCall::PolkadotXcm(pallet_xcm::Call::< + ::Runtime, + >::force_xcm_version { + location: bx!(parent_location), + version: XCM_V3, + }) + .encode() + .into(); + + // System Parachain sets supported version for Relay Chain through it + Rococo::send_unpaid_transact_to_parachain_as_root( + AssetHubRococo::para_id(), + force_xcm_version_call, + ); + + // System Parachain receive the XCM message + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_dmp_queue_complete(Some(Weight::from_parts(1_019_210_000, 200_000))); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::SupportedVersionChanged { + location, + version: XCM_V3 + }) => { location: *location == parent_location, }, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs new file mode 100644 index 0000000000000000000000000000000000000000..e08af50c14eeda732c4fad1b1c2f296ce6fb590d --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -0,0 +1,364 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::{instances::Instance2, BoundedVec}; +use parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; +use sp_runtime::{DispatchError, ModuleError}; + +#[test] +fn swap_locally_on_chain_using_local_assets() { + let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); + let asset_one = Box::new(MultiLocation { + parents: 0, + interior: X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + ASSET_ID.into(), + AssetHubRococoSender::get().into(), + 1000, + )); + assert!(::Assets::asset_exists(ASSET_ID)); + + assert_ok!(::Assets::mint( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + ASSET_ID.into(), + AssetHubRococoSender::get().into(), + 100_000_000_000_000, + )); + + assert_ok!(::Balances::force_set_balance( + ::RuntimeOrigin::root(), + AssetHubRococoSender::get().into(), + 100_000_000_000_000, + )); + + assert_ok!(::AssetConversion::create_pool( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + asset_native.clone(), + asset_one.clone(), + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + + assert_ok!(::AssetConversion::add_liquidity( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + asset_native.clone(), + asset_one.clone(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + AssetHubRococoSender::get().into() + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {lp_token_minted, .. }) => { lp_token_minted: *lp_token_minted == 1414213562273, }, + ] + ); + + let path = BoundedVec::<_, _>::truncate_from(vec![asset_native.clone(), asset_one.clone()]); + + assert_ok!( + ::AssetConversion::swap_exact_tokens_for_tokens( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + path, + 100, + 1, + AssetHubRococoSender::get().into(), + true + ) + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::SwapExecuted { amount_in, amount_out, .. }) => { + amount_in: *amount_in == 100, + amount_out: *amount_out == 199, + }, + ] + ); + + assert_ok!(::AssetConversion::remove_liquidity( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + asset_native, + asset_one, + 1414213562273 - EXISTENTIAL_DEPOSIT * 2, // all but the 2 EDs can't be retrieved. + 0, + 0, + AssetHubRococoSender::get().into(), + )); + }); +} + +#[test] +fn swap_locally_on_chain_using_foreign_assets() { + use frame_support::weights::WeightToFee; + + let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); + + let foreign_asset1_at_asset_hub_rococo = Box::new(MultiLocation { + parents: 1, + interior: X3( + Parachain(PenpalA::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(ASSET_ID.into()), + ), + }); + + let assets_para_destination: VersionedMultiLocation = + MultiLocation { parents: 1, interior: X1(Parachain(AssetHubRococo::para_id().into())) } + .into(); + + let penpal_location = + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; + + // 1. Create asset on penpal: + PenpalA::execute_with(|| { + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(PenpalASender::get()), + ASSET_ID.into(), + PenpalASender::get().into(), + 1000, + )); + + assert!(::Assets::asset_exists(ASSET_ID)); + }); + + // 2. Create foreign asset on asset_hub_rococo: + + let require_weight_at_most = Weight::from_parts(1_100_000_000_000, 30_000); + let origin_kind = OriginKind::Xcm; + let sov_penpal_on_asset_hub_rococo = AssetHubRococo::sovereign_account_id_of(penpal_location); + + AssetHubRococo::fund_accounts(vec![ + (AssetHubRococoSender::get().into(), 5_000_000 * ROCOCO_ED), /* An account to swap dot + * for something else. */ + (sov_penpal_on_asset_hub_rococo.clone().into(), 1000_000_000_000_000_000 * ROCOCO_ED), + ]); + + let sov_penpal_on_asset_hub_rococo_as_location: MultiLocation = MultiLocation { + parents: 0, + interior: X1(AccountId32Junction { + network: None, + id: sov_penpal_on_asset_hub_rococo.clone().into(), + }), + }; + + let call_foreign_assets_create = + ::RuntimeCall::ForeignAssets(pallet_assets::Call::< + ::Runtime, + Instance2, + >::create { + id: *foreign_asset1_at_asset_hub_rococo, + min_balance: 1000, + admin: sov_penpal_on_asset_hub_rococo.clone().into(), + }) + .encode() + .into(); + + let buy_execution_fee_amount = parachains_common::rococo::fee::WeightToFee::weight_to_fee( + &Weight::from_parts(10_100_000_000_000, 300_000), + ); + let buy_execution_fee = MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(buy_execution_fee_amount), + }; + + let xcm = VersionedXcm::from(Xcm(vec![ + WithdrawAsset { 0: vec![buy_execution_fee.clone()].into() }, + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + Transact { require_weight_at_most, origin_kind, call: call_foreign_assets_create }, + RefundSurplus, + DepositAsset { + assets: All.into(), + beneficiary: sov_penpal_on_asset_hub_rococo_as_location, + }, + ])); + + // Send XCM message from penpal => asset_hub_rococo + let sudo_penpal_origin = ::RuntimeOrigin::root(); + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + sudo_penpal_origin.clone(), + bx!(assets_para_destination.clone()), + bx!(xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Receive XCM message in Assets Parachain + AssetHubRococo::execute_with(|| { + assert!(::ForeignAssets::asset_exists( + *foreign_asset1_at_asset_hub_rococo + )); + + // 3: Mint foreign asset on asset_hub_rococo: + // + // (While it might be nice to use batch, + // currently that's disabled due to safe call filters.) + + type RuntimeEvent = ::RuntimeEvent; + // 3. Mint foreign asset (in reality this should be a teleport or some such) + assert_ok!(::ForeignAssets::mint( + ::RuntimeOrigin::signed( + sov_penpal_on_asset_hub_rococo.clone().into() + ), + *foreign_asset1_at_asset_hub_rococo, + sov_penpal_on_asset_hub_rococo.clone().into(), + 3_000_000_000_000, + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + + // 4. Create pool: + assert_ok!(::AssetConversion::create_pool( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + asset_native.clone(), + foreign_asset1_at_asset_hub_rococo.clone(), + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + + // 5. Add liquidity: + assert_ok!(::AssetConversion::add_liquidity( + ::RuntimeOrigin::signed( + sov_penpal_on_asset_hub_rococo.clone() + ), + asset_native.clone(), + foreign_asset1_at_asset_hub_rococo.clone(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + sov_penpal_on_asset_hub_rococo.clone().into() + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {lp_token_minted, .. }) => { + lp_token_minted: *lp_token_minted == 1414213562273, + }, + ] + ); + + // 6. Swap! + let path = BoundedVec::<_, _>::truncate_from(vec![ + asset_native.clone(), + foreign_asset1_at_asset_hub_rococo.clone(), + ]); + + assert_ok!( + ::AssetConversion::swap_exact_tokens_for_tokens( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + path, + 100000, + 1000, + AssetHubRococoSender::get().into(), + true + ) + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::SwapExecuted { amount_in, amount_out, .. },) => { + amount_in: *amount_in == 100000, + amount_out: *amount_out == 199399, + }, + ] + ); + + // 7. Remove liquidity + assert_ok!(::AssetConversion::remove_liquidity( + ::RuntimeOrigin::signed( + sov_penpal_on_asset_hub_rococo.clone() + ), + asset_native, + foreign_asset1_at_asset_hub_rococo, + 1414213562273 - 2_000_000_000, // all but the 2 EDs can't be retrieved. + 0, + 0, + sov_penpal_on_asset_hub_rococo.clone().into(), + )); + }); +} + +#[test] +fn cannot_create_pool_from_pool_assets() { + let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); + let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocation::get(); + asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); + + AssetHubRococo::execute_with(|| { + let pool_owner_account_id = asset_hub_rococo_runtime::AssetConversionOrigin::get(); + + assert_ok!(::PoolAssets::create( + ::RuntimeOrigin::signed(pool_owner_account_id.clone()), + ASSET_ID.into(), + pool_owner_account_id.clone().into(), + 1000, + )); + assert!(::PoolAssets::asset_exists(ASSET_ID)); + + assert_ok!(::PoolAssets::mint( + ::RuntimeOrigin::signed(pool_owner_account_id), + ASSET_ID.into(), + AssetHubRococoSender::get().into(), + 3_000_000_000_000, + )); + + assert_matches::assert_matches!( + ::AssetConversion::create_pool( + ::RuntimeOrigin::signed(AssetHubRococoSender::get()), + asset_native.clone(), + Box::new(asset_one), + ), + Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("UnsupportedAsset")) + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8017f7a1c54fa816006a29f52065048c779b680 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -0,0 +1,412 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; + +fn relay_origin_assertions(t: RelayToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(631_531_000, 7_186))); + + assert_expected_events!( + Rococo, + vec![ + // Amount to teleport is withdrawn from Sender + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + // Amount to teleport is deposited in Relay's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn relay_dest_assertions(t: SystemParaToRelayTest) { + type RuntimeEvent = ::RuntimeEvent; + + Rococo::assert_ump_queue_processed( + true, + Some(AssetHubRococo::para_id()), + Some(Weight::from_parts(307_225_000, 7_186)), + ); + + assert_expected_events!( + Rococo, + vec![ + // Amount is withdrawn from Relay Chain's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + // Amount minus fees are deposited in Receiver's account + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + ] + ); +} + +fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { + Rococo::assert_ump_queue_processed( + false, + Some(AssetHubRococo::para_id()), + Some(Weight::from_parts(157_718_000, 3_593)), + ); +} + +fn para_origin_assertions(t: SystemParaToRelayTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 720_053_000, + 7_203, + ))); + + AssetHubRococo::assert_parachain_system_ump_sent(); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount is withdrawn from Sender's account + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_dest_assertions(t: RelayToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubRococo::assert_dmp_queue_complete(Some(Weight::from_parts(157_718_000, 3593))); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount minus fees are deposited in Receiver's account + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + ] + ); +} + +fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { + ::XcmPallet::limited_teleport_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { + ::XcmPallet::teleport_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + ) +} + +fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { + ::PolkadotXcm::limited_teleport_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { + ::PolkadotXcm::teleport_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + ) +} + +/// Limited Teleport of native asset from Relay Chain to the System Parachain should work +#[test] +fn limited_teleport_native_assets_from_relay_to_system_para_works() { + // Init values for Relay Chain + let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); + let test_args = TestContext { + sender: RococoSender::get(), + receiver: AssetHubRococoReceiver::get(), + args: relay_test_args(dest, beneficiary_id, amount_to_send), + }; + + let mut test = RelayToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(relay_origin_assertions); + test.set_assertion::(para_dest_assertions); + test.set_dispatchable::(relay_limited_teleport_assets); + test.assert(); + + let delivery_fees = Rococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Limited Teleport of native asset from System Parachain to Relay Chain +/// should work when there is enough balance in Relay Chain's `CheckAccount` +#[test] +fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { + // Dependency - Relay Chain's `CheckAccount` should have enough balance + limited_teleport_native_assets_from_relay_to_system_para_works(); + + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let destination = AssetHubRococo::parent_location(); + let beneficiary_id = RococoReceiver::get(); + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: RococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions); + test.set_dispatchable::(system_para_limited_teleport_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = AssetHubRococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Limited Teleport of native asset from System Parachain to Relay Chain +/// should't work when there is not enough balance in Relay Chain's `CheckAccount` +#[test] +fn limited_teleport_native_assets_from_system_para_to_relay_fails() { + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let destination = AssetHubRococo::parent_location().into(); + let beneficiary_id = RococoReceiver::get().into(); + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: RococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions_fail); + test.set_dispatchable::(system_para_limited_teleport_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = AssetHubRococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance does not change + assert_eq!(receiver_balance_after, receiver_balance_before); +} + +/// Teleport of native asset from Relay Chain to the System Parachain should work +#[test] +fn teleport_native_assets_from_relay_to_system_para_works() { + // Init values for Relay Chain + let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); + let test_args = TestContext { + sender: RococoSender::get(), + receiver: AssetHubRococoReceiver::get(), + args: relay_test_args(dest, beneficiary_id, amount_to_send), + }; + + let mut test = RelayToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(relay_origin_assertions); + test.set_assertion::(para_dest_assertions); + test.set_dispatchable::(relay_teleport_assets); + test.assert(); + + let delivery_fees = Rococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Teleport of native asset from System Parachains to the Relay Chain +/// should work when there is enough balance in Relay Chain's `CheckAccount` +#[test] +fn teleport_native_assets_back_from_system_para_to_relay_works() { + // Dependency - Relay Chain's `CheckAccount` should have enough balance + teleport_native_assets_from_relay_to_system_para_works(); + + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let destination = AssetHubRococo::parent_location(); + let beneficiary_id = RococoReceiver::get(); + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: RococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions); + test.set_dispatchable::(system_para_teleport_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = AssetHubRococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Teleport of native asset from System Parachain to Relay Chain +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` +#[test] +fn teleport_native_assets_from_system_para_to_relay_fails() { + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let destination = AssetHubRococo::parent_location(); + let beneficiary_id = RococoReceiver::get(); + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: RococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions_fail); + test.set_dispatchable::(system_para_teleport_assets); + test.assert(); + + let delivery_fees = AssetHubRococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance does not change + assert_eq!(receiver_balance_after, receiver_balance_before); +} + +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = ASSET_HUB_ROCOCO_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + AssetHubRococo, // Origin + AssetHubRococoXcmConfig, // XCM Configuration + vec![BridgeHubRococo], // Destinations + (native_asset, amount) + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7080abc0a4403681e099cd61fddcc6b0b72dbed0 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "asset-hub-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Westend runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false} +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false} +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } + +# Polkadot +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } +asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../../pallets/dmp-queue" } +cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +westend-system-emulated-network ={ path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs similarity index 62% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 6e0f3434aedf3dd7a5f630ad40729e6a61e9ba47..83a867e6ae31e2367b471ec7f520055827a2752f 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -14,6 +14,8 @@ // limitations under the License. pub use codec::Encode; + +// Substrate pub use frame_support::{ assert_err, assert_ok, instances::Instance2, @@ -22,24 +24,36 @@ pub use frame_support::{ traits::fungibles::Inspect, BoundedVec, }; -pub use integration_tests_common::{ - constants::{ - asset_hub_westend::ED as ASSET_HUB_WESTEND_ED, westend::ED as WESTEND_ED, - PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, - }, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubWestend, AssetHubWestendPallet, AssetHubWestendReceiver, AssetHubWestendSender, - PenpalWestendA, PenpalWestendAPallet, PenpalWestendAReceiver, PenpalWestendASender, Westend, - WestendPallet, WestendReceiver, WestendSender, -}; -pub use parachains_common::{AccountId, Balance}; + +// Polkadot pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, v3::{Error, NetworkId::Westend as WestendId}, }; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + +// Cumulus +pub use asset_test_utils::xcm_helpers; +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use westend_system_emulated_network::{ + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, + AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, + AssetHubWestendParaSender as AssetHubWestendSender, PenpalAPara as PenpalA, + PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, + WestendRelaySender as WestendSender, }; pub const ASSET_ID: u32 = 1; @@ -49,9 +63,9 @@ pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; +pub type SystemParaToParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args(amount: Balance) -> TestArgs { TestArgs { dest: Westend::child_location_of(AssetHubWestend::para_id()), @@ -68,7 +82,7 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests +/// Returns a `TestArgs` instance to be used for the System Parachain across integration tests pub fn system_para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, @@ -88,5 +102,4 @@ pub fn system_para_test_args( } #[cfg(test)] -#[cfg(not(feature = "runtime-benchmarks"))] mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b2c648b7b085c1bd42df6537b2d38e3e12fbbe4 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -0,0 +1,240 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use asset_hub_westend_runtime::xcm_config::XcmConfig; + +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 676_119_000, + 6196, + ))); + + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == AssetHubWestend::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 676_119_000, + 6196, + ))); + + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Assets( + pallet_assets::Event::Transferred { asset_id, from, to, amount } + ) => { + asset_id: *asset_id == ASSET_ID, + from: *from == t.sender.account_id, + to: *to == AssetHubWestend::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +#[test] +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(WestendSender::get().into()); + let destination = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); + let amount_to_send: Balance = WESTEND_ED * 1000; + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Westend::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); +} + +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work +#[test] +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { + // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let destination = AssetHubWestend::parent_location(); + let beneficiary_id = WestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubWestend::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); +} + +/// Reserve Transfers of native asset from System Parachain to Parachain should work +#[test] +fn reserve_transfer_native_asset_from_system_para_to_para() { + // Init values for System Parachain + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); + let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubWestendSender::get(), + receiver: PenpalAReceiver::get(), + args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + }; + + let mut test = SystemParaToParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = AssetHubWestend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( + test.args.assets.clone(), + 0, + test.args.weight_limit, + test.args.beneficiary, + test.args.dest, + ) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Reserve Transfers of a local asset from System Parachain to Parachain should work +#[test] +fn reserve_transfer_asset_from_system_para_to_para() { + // Force create asset from Relay Chain and mint assets for System Parachain's sender account + AssetHubWestend::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + true, + AssetHubWestendSender::get(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + ASSET_MIN_BALANCE * 1000000, + ); + + // Init values for System Parachain + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); + let amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets = + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) + .into(); + + let system_para_test_args = TestContext { + sender: AssetHubWestendSender::get(), + receiver: PenpalAReceiver::get(), + args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + }; + + let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + + system_para_test.set_assertion::(system_para_to_para_assets_assertions); + // TODO: Add assertions when Penpal is able to manage assets + system_para_test + .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + system_para_test.assert(); +} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs similarity index 59% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index 424d222bef3811966f55d8fbac954f33f065ffc7..bda9a3e69c4fbbc2fef03f5fe040526dc3a35f1d 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -16,52 +16,16 @@ use crate::*; /// Relay Chain should be able to execute `Transact` instructions in System Parachain -/// when `OriginKind::Superuser` and signer is `sudo` +/// when `OriginKind::Superuser`. #[test] -fn send_transact_sudo_from_relay_to_system_para_works() { - // Init tests variables - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = Westend::child_location_of(AssetHubWestend::para_id()).into(); - let asset_owner: AccountId = AssetHubWestendSender::get().into(); - let xcm = AssetHubWestend::force_create_asset_xcm( - OriginKind::Superuser, +fn send_transact_as_superuser_from_relay_to_system_para_works() { + AssetHubWestend::force_create_asset_from_relay_as_root( ASSET_ID, - asset_owner.clone(), + ASSET_MIN_BALANCE, true, - 1000, - ); - // Send XCM message from Relay Chain - Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( - root_origin, - bx!(system_para_destination), - bx!(xcm), - )); - - Westend::assert_xcm_pallet_sent(); - }); - - // Receive XCM message in Assets Parachain - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts( - 1_019_445_000, - 200_000, - ))); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::Assets(pallet_assets::Event::ForceCreated { asset_id, owner }) => { - asset_id: *asset_id == ASSET_ID, - owner: *owner == asset_owner, - }, - ] - ); - - assert!(::Assets::asset_exists(ASSET_ID)); - }); + AssetHubWestendSender::get().into(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + ) } /// Parachain should be able to send XCM paying its fee with sufficient asset @@ -69,7 +33,7 @@ fn send_transact_sudo_from_relay_to_system_para_works() { #[test] fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()), + AssetHubWestend::sibling_location_of(PenpalA::para_id()), ); // Force create and mint assets for Parachain's sovereign account @@ -78,6 +42,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { ASSET_MIN_BALANCE, true, para_sovereign_account.clone(), + Some(Weight::from_parts(1_019_445_000, 200_000)), ASSET_MIN_BALANCE * 1000000000, ); @@ -95,9 +60,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let native_asset = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = - PenpalWestendA::sibling_location_of(AssetHubWestend::para_id()).into(); + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()).into(); let xcm = xcm_transact_paid_execution( call, origin_kind, @@ -105,22 +69,22 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { para_sovereign_account.clone(), ); - PenpalWestendA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), bx!(xcm), )); - PenpalWestendA::assert_xcm_pallet_sent(); + PenpalA::assert_xcm_pallet_sent(); }); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcmp_queue_success(Some(Weight::from_parts( - 2_176_414_000, - 203_593, + 16_290_336_000, + 562_893, ))); assert_expected_events!( diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs similarity index 74% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs index 2720095aac00d5d15c43e77224b30b7387079897..2133d5e5fb7c7537ae757c30a8c6ba3b96cc9c00 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs @@ -47,32 +47,22 @@ fn relay_sets_system_para_xcm_supported_version() { #[test] fn system_para_sets_relay_xcm_supported_version() { // Init test variables - let sudo_origin = ::RuntimeOrigin::root(); let parent_location = AssetHubWestend::parent_location(); - let system_para_destination: VersionedMultiLocation = - Westend::child_location_of(AssetHubWestend::para_id()).into(); - let call = ::RuntimeCall::PolkadotXcm(pallet_xcm::Call::< - ::Runtime, - >::force_xcm_version { - location: bx!(parent_location), - version: XCM_V3, - }) - .encode() - .into(); - let origin_kind = OriginKind::Superuser; - - let xcm = xcm_transact_unpaid_execution(call, origin_kind); - - // System Parachain sets supported version for Relay Chain throught it - Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( - sudo_origin, - bx!(system_para_destination), - bx!(xcm), - )); + let force_xcm_version_call = + ::RuntimeCall::PolkadotXcm(pallet_xcm::Call::< + ::Runtime, + >::force_xcm_version { + location: bx!(parent_location), + version: XCM_V3, + }) + .encode() + .into(); - Westend::assert_xcm_pallet_sent(); - }); + // System Parachain sets supported version for Relay Chain through it + Westend::send_unpaid_transact_to_parachain_as_root( + AssetHubWestend::para_id(), + force_xcm_version_call, + ); // System Parachain receive the XCM message AssetHubWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs similarity index 93% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 7d1615c9e29182c97146ca46c966f0ae85991b11..a8e19f9ef4b1c66bdd3f918ce9d5f53e76b8b2d2 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -114,7 +114,7 @@ fn swap_locally_on_chain_using_foreign_assets() { let foreign_asset1_at_asset_hub_westend = Box::new(MultiLocation { parents: 1, interior: X3( - Parachain(PenpalWestendA::para_id().into()), + Parachain(PenpalA::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into()), ), @@ -125,18 +125,18 @@ fn swap_locally_on_chain_using_foreign_assets() { .into(); let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalWestendA::para_id().into())) }; + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; // 1. Create asset on penpal: - PenpalWestendA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalWestendASender::get()), + PenpalA::execute_with(|| { + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(PenpalASender::get()), ASSET_ID.into(), - PenpalWestendASender::get().into(), + PenpalASender::get().into(), 1000, )); - assert!(::Assets::asset_exists(ASSET_ID)); + assert!(::Assets::asset_exists(ASSET_ID)); }); // 2. Create foreign asset on asset_hub_westend: @@ -190,25 +190,25 @@ fn swap_locally_on_chain_using_foreign_assets() { ])); // Send XCM message from penpal => asset_hub_westend - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalWestendA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + let sudo_penpal_origin = ::RuntimeOrigin::root(); + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( sudo_penpal_origin.clone(), bx!(assets_para_destination.clone()), bx!(xcm), )); - type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - PenpalWestendA, + PenpalA, vec![ RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, ] ); }); - // Receive XCM message in Assets Parachain + // Receive XCM message in Assets Parachain in the next block. AssetHubWestend::execute_with(|| { assert!(::ForeignAssets::asset_exists( *foreign_asset1_at_asset_hub_westend diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs similarity index 60% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 8de73a7420c6b69954b2fe295a3b5f6bf4e7219c..d618cd2fe04d70679f5c07689ed423c0c48e8f6b 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(dead_code)] // - use crate::*; +use asset_hub_westend_runtime::xcm_config::XcmConfig as AssetHubWestendXcmConfig; +use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Westend, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, @@ -97,7 +97,7 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { fn para_dest_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(164_733_000, 0))); + AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(164_793_000, 3593))); assert_expected_events!( AssetHubWestend, @@ -142,16 +142,15 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -// TODO: Uncomment when https://github.com/paritytech/polkadot/pull/7424 is merged -// fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { -// ::PolkadotXcm::teleport_assets( -// t.signed_origin, -// bx!(t.args.dest), -// bx!(t.args.beneficiary), -// bx!(t.args.assets), -// t.args.fee_asset_item, -// ) -// } +fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { + ::PolkadotXcm::teleport_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + ) +} /// Limited Teleport of native asset from Relay Chain to the System Parachain should work #[test] @@ -174,11 +173,17 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.set_dispatchable::(relay_limited_teleport_assets); test.assert(); + let delivery_fees = Westend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); } @@ -215,8 +220,14 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + let delivery_fees = AssetHubWestend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); } @@ -250,8 +261,14 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + let delivery_fees = AssetHubWestend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance does not change assert_eq!(receiver_balance_after, receiver_balance_before); } @@ -277,87 +294,116 @@ fn teleport_native_assets_from_relay_to_system_para_works() { test.set_dispatchable::(relay_teleport_assets); test.assert(); + let delivery_fees = Westend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); } -// TODO: Uncomment when https://github.com/paritytech/polkadot/pull/7424 is merged +/// Teleport of native asset from System Parachains to the Relay Chain +/// should work when there is enough balance in Relay Chain's `CheckAccount` +#[test] +fn teleport_native_assets_back_from_system_para_to_relay_works() { + // Dependency - Relay Chain's `CheckAccount` should have enough balance + teleport_native_assets_from_relay_to_system_para_works(); -// Right now it is failing in the Relay Chain with a -// `messageQueue.ProcessingFailed` event `error: Unsupported`. -// The reason is the `Weigher` in `pallet_xcm` is not properly calculating the `remote_weight` -// and it cause an `Overweight` error in `AllowTopLevelPaidExecutionFrom` barrier + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let destination = AssetHubWestend::parent_location(); + let beneficiary_id = WestendReceiver::get(); + let assets = (Parent, amount_to_send).into(); -// /// Teleport of native asset from System Parachains to the Relay Chain -// /// should work when there is enough balance in Relay Chain's `CheckAccount` -// #[test] -// fn teleport_native_assets_back_from_system_para_to_relay_works() { -// // Dependency - Relay Chain's `CheckAccount` should have enough balance -// teleport_native_assets_from_relay_to_system_para_works(); - -// // Init values for Relay Chain -// let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; -// let test_args = TestContext { -// sender: AssetHubWestendSender::get(), -// receiver: WestendReceiver::get(), -// args: get_para_dispatch_args(amount_to_send), -// }; - -// let mut test = SystemParaToRelayTest::new(test_args); - -// let sender_balance_before = test.sender.balance; -// let receiver_balance_before = test.receiver.balance; - -// test.set_assertion::(para_origin_assertions); -// test.set_assertion::(relay_dest_assertions); -// test.set_dispatchable::(system_para_teleport_assets); -// test.assert(); - -// let sender_balance_after = test.sender.balance; -// let receiver_balance_after = test.receiver.balance; - -// // Sender's balance is reduced -// assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); -// // Receiver's balance is increased -// assert!(receiver_balance_after > receiver_balance_before); -// } + let test_args = TestContext { + sender: AssetHubWestendSender::get(), + receiver: WestendReceiver::get(), + args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + }; + + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions); + test.set_dispatchable::(system_para_teleport_assets); + test.assert(); + + let delivery_fees = AssetHubWestend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + +/// Teleport of native asset from System Parachain to Relay Chain +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` +#[test] +fn teleport_native_assets_from_system_para_to_relay_fails() { + // Init values for Relay Chain + let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let destination = AssetHubWestend::parent_location(); + let beneficiary_id = WestendReceiver::get(); + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: AssetHubWestendSender::get(), + receiver: WestendReceiver::get(), + args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + }; -// /// Teleport of native asset from System Parachain to Relay Chain -// /// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` + let mut test = SystemParaToRelayTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(para_origin_assertions); + test.set_assertion::(relay_dest_assertions_fail); + test.set_dispatchable::(system_para_teleport_assets); + test.assert(); + + let delivery_fees = AssetHubWestend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance does not change + assert_eq!(receiver_balance_after, receiver_balance_before); +} + +// TODO: uncomment when CollectivesWestend and BridgeHubWestend are implemented +// https://github.com/paritytech/polkadot-sdk/pull/1737 (CollectivesWestend) // #[test] -// fn teleport_native_assets_from_system_para_to_relay_fails() { -// // Init values for Relay Chain -// let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; -// let assets = (Parent, amount_to_send).into(); -// -// let test_args = TestContext { -// sender: AssetHubWestendSender::get(), -// receiver: WestendReceiver::get(), -// args: system_para_test_args(amount_to_send), -// assets, -// None -// }; - -// let mut test = SystemParaToRelayTest::new(test_args); - -// let sender_balance_before = test.sender.balance; -// let receiver_balance_before = test.receiver.balance; - -// test.set_assertion::(para_origin_assertions); -// test.set_assertion::(relay_dest_assertions); -// test.set_dispatchable::(system_para_teleport_assets); -// test.assert(); - -// let sender_balance_after = test.sender.balance; -// let receiver_balance_after = test.receiver.balance; - -// // Sender's balance is reduced -// assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); -// // Receiver's balance does not change -// assert_eq!(receiver_balance_after, receiver_balance_before); +// fn teleport_to_other_system_parachains_works() { +// let amount = ASSET_HUB_WESTEND_ED * 100; +// let native_asset: VersionedMultiAssets = (Parent, amount).into(); + +// test_parachain_is_trusted_teleporter!( +// AssetHubWestend, // Origin +// vec![CollectivesWestend, BridgeHubWestend], // Destinations +// (native_asset, amount) +// ); // } diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs similarity index 95% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index cf06f58682da2d8dfb626305a9e8f0aff7b48804..32089f7ecec04fc6973b65f160eb054fc4eee84f 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -14,8 +14,8 @@ // limitations under the License. use crate::*; +use emulated_integration_tests_common::accounts::{ALICE, BOB}; use frame_support::traits::fungibles::{Create, Inspect, Mutate}; -use integration_tests_common::constants::accounts::{ALICE, BOB}; use polkadot_runtime_common::impls::VersionedLocatableAsset; use xcm_executor::traits::ConvertLocation; @@ -103,7 +103,7 @@ fn create_and_claim_treasury_spend() { amount: amount == &SPEND_AMOUNT, }, RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, - RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { outcome: Outcome::Complete(..) ,.. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, ] ); // beneficiary received the assets from the treasury. diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..00e3af2e4fff82c7e85c94a173c08039a9cb53e9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "bridge-hub-rococo-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } + +# Substrate +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Bridges +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} + +# Cumulus +asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..53665437887c95940dba08485dc19364a027a29e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +pub use frame_support::assert_ok; + +// Polkadot +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, +}; + +// Bridges +pub use bp_messages::LaneId; + +// Cumulus +pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + bridge_hub_rococo_emulated_chain::{ + genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, + }, + rococo_emulated_chain::RococoRelayPallet as RococoPallet, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, + AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, + AssetHubWestendParaReceiver as AssetHubWestendReceiver, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, + RococoRelay as Rococo, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +pub const ASSETS_PALLET_ID: u8 = 50; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs new file mode 100644 index 0000000000000000000000000000000000000000..c55613f2826ffb0696cbe8046ea4f4bc4f425310 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -0,0 +1,219 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let asset_hub_westend_para_id = AssetHubWestend::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Westend), Parachain(asset_hub_westend_para_id)), + }; + let beneficiary_id = AssetHubWestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHR's SA on BHR for paying bridge transport fees + let ahr_as_seen_by_bhr = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); + let sov_ahr_on_bhr = BridgeHubRococo::sovereign_account_id_of(ahr_as_seen_by_bhr); + BridgeHubRococo::fund_accounts(vec![(sov_ahr_on_bhr.into(), 10_000_000_000_000u128)]); + + AssetHubRococo::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeWestendMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { + let roc_at_asset_hub_rococo: MultiLocation = Parent.into(); + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + let sender_rocs_before = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + + let amount = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(roc_at_asset_hub_rococo, amount); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == roc_at_asset_hub_rococo, + owner: *owner == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); +} + +#[test] +fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { + let prefund_amount = 10_000_000_000_000u128; + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubRococoSender::get(), prefund_amount)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); + let sender_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + assert_eq!(sender_wnds_before, prefund_amount); + let receiver_wnds_before = + ::account_data_of(AssetHubWestendReceiver::get()).free; + + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(wnd_at_asset_hub_rococo, amount_to_send); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // WND is withdrawn from AHR's SA on AHW + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahr_on_ahw, + amount: *amount == amount_to_send, + }, + // WNDs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + let receiver_wnds_after = + ::account_data_of(AssetHubWestendReceiver::get()).free; + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is reduced by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs similarity index 92% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 48347557ae77df661d8c41726cb3fe7f24738678..4e2ef1434fdfd7a098d6dd63c51bd2a47514be22 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -13,4 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +mod asset_transfers; +mod send_xcm; +mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs similarity index 70% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index f24e13bb71b875d8d68be161e5dc833198f93c39..4e61f7ce0ddb771ce88810bf98935f2546842da0 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -16,7 +16,7 @@ use crate::*; #[test] -fn example() { +fn send_xcm_from_rococo_relay_to_westend_asset_hub() { // Init tests variables // XcmPallet send arguments let sudo_origin = ::RuntimeOrigin::root(); @@ -29,13 +29,13 @@ fn example() { let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: WococoId, - destination: X1(Parachain(AssetHubWococo::para_id().into())), + network: WestendId, + destination: X1(Parachain(AssetHubWestend::para_id().into())), xcm: remote_xcm, }, ])); - //Rococo Global Consensus + // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { assert_ok!(::XcmPallet::send( @@ -60,38 +60,40 @@ fn example() { assert_expected_events!( BridgeHubRococo, vec![ - RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { - outcome: Outcome::Complete(_), + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { + success: true, .. }) => {}, - RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { - lane_id: LaneId([0, 0, 0, 1]), + RuntimeEvent::BridgeWestendMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 2]), nonce: 1, }) => {}, ] ); }); - // Wococo GLobal Consensus + // Westend Global Consensus // Receive XCM message in Bridge Hub target Parachain - BridgeHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - BridgeHubWococo, + BridgeHubWestend, vec![ RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, ] ); }); - // Receive embeded XCM message within `ExportMessage` in Parachain destination - AssetHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - AssetHubWococo, + AssetHubWestend, vec![ - RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Fail { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { + .. + }) => {}, ] ); }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs new file mode 100644 index 0000000000000000000000000000000000000000..f00288a4d8c76ccffdf3a8ef00638b73618476ee --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use bridge_hub_rococo_runtime::xcm_config::XcmConfig; + +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = BRIDGE_HUB_ROCOCO_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + BridgeHubRococo, // Origin + XcmConfig, // XCM configuration + vec![AssetHubRococo], // Destinations + (native_asset, amount) + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e5b1fce5f2b146f7ac41d9177e26521072a23538 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "bridge-hub-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Westend runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } + +# Substrate +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Bridges +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} + +# Cumulus +asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..04746aa86705290184e20e3a3f65cff25a60b375 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +pub use frame_support::assert_ok; + +// Polkadot +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, +}; + +// Bridges +pub use bp_messages::LaneId; + +// Cumulus +pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + bridge_hub_westend_emulated_chain::{ + genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, + }, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, + AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaSender as BridgeHubWestendSender, + WestendRelay as Westend, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +pub const ASSETS_PALLET_ID: u8 = 50; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs new file mode 100644 index 0000000000000000000000000000000000000000..f90514f80c3e1b4514dcd3cf179a34d10b256909 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -0,0 +1,218 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::*; + +fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let asset_hub_rococo_para_id = AssetHubRococo::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Rococo), Parachain(asset_hub_rococo_para_id)), + }; + let beneficiary_id = AssetHubRococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHW's SA on BHW for paying bridge transport fees + let ahw_as_seen_by_bhw = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let sov_ahw_on_bhw = BridgeHubWestend::sovereign_account_id_of(ahw_as_seen_by_bhw); + BridgeHubWestend::fund_accounts(vec![(sov_ahw_on_bhw.into(), 10_000_000_000_000u128)]); + + AssetHubWestend::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeRococoMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { + let wnd_at_asset_hub_westend: MultiLocation = Parent.into(); + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let sender_wnds_before = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + + let amount = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_asset_hub_rococo, + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before + amount); +} + +#[test] +fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { + let prefund_amount = 10_000_000_000_000u128; + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubWestendSender::get(), prefund_amount)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); + let sender_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + assert_eq!(sender_rocs_before, prefund_amount); + let receiver_rocs_before = + ::account_data_of(AssetHubRococoReceiver::get()).free; + + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(roc_at_asset_hub_westend, amount_to_send); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // ROC is withdrawn from AHW's SA on AHR + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahw_on_ahr, + amount: *amount == amount_to_send, + }, + // ROCs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + let receiver_rocs_after = + ::account_data_of(AssetHubRococoReceiver::get()).free; + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e2ef1434fdfd7a098d6dd63c51bd2a47514be22 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod asset_transfers; +mod send_xcm; +mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b21d758cd98384f11a1336f3373bf0127d5f7b9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -0,0 +1,100 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +#[test] +fn send_xcm_from_westend_relay_to_rococo_asset_hub() { + // Init tests variables + // XcmPallet send arguments + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into(); + let weight_limit = WeightLimit::Unlimited; + let check_origin = None; + + let remote_xcm = Xcm(vec![ClearOrigin]); + + let xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit, check_origin }, + ExportMessage { + network: RococoId, + destination: X1(Parachain(AssetHubRococo::para_id().into())), + xcm: remote_xcm, + }, + ])); + + // Westend Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Westend::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + // Receive XCM message in Bridge Hub source Parachain + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { + success: true, + .. + }) => {}, + RuntimeEvent::BridgeRococoMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 2]), + nonce: 1, + }) => {}, + ] + ); + }); + + // Rococo Global Consensus + // Receive XCM message in Bridge Hub target Parachain + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { + .. + }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs new file mode 100644 index 0000000000000000000000000000000000000000..8dff6c292955f96d3e5bd83c424fcf9cdb85e8a2 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use bridge_hub_westend_runtime::xcm_config::XcmConfig; + +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = BRIDGE_HUB_WESTEND_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + BridgeHubWestend, // Origin + XcmConfig, // XCM configuration + vec![AssetHubWestend], // Destinations + (native_asset, amount) + ); +} diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 1c831ac7268a54533acf38188b3dcf73edcec6c1..e3f8023f4199fed318e2e5efcd7cd33687e90958 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -4,10 +4,11 @@ version = "0.1.0" authors = ["Parity Technologies "] edition = "2021" description = "Managed content" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.3.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", optional = true, default-features = false } frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 931df9d9273d8cf0d511ff321973593ecf563657..727182dfb8e8639b6fb889a1f6a898b14495697e 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -1,12 +1,14 @@ [package] authors.workspace = true edition.workspace = true -name = "parachain-info" +name = "staging-parachain-info" version = "0.1.0" +license = "Apache-2.0" +description = "Pallet to store the parachain ID" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../../substrate/frame/system", default-features = false} diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index f0afa63d692d458c4f5ea008bf0dee223c7e2c6c..0133befa855b030e622b44a7c54e9b49d5eedde7 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -3,10 +3,12 @@ authors.workspace = true edition.workspace = true name = "cumulus-ping" version = "0.1.0" +license = "Apache-2.0" +description = "Ping Pallet for Cumulus XCM/UMP testing." [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false} sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} diff --git a/cumulus/parachains/runtimes/assets/README.md b/cumulus/parachains/runtimes/assets/README.md index 78145395cbf905f57f42b32c09fdd52a8b73b09d..05466e537f426f6edcb8f4bc569debb8b5bc0f96 100644 --- a/cumulus/parachains/runtimes/assets/README.md +++ b/cumulus/parachains/runtimes/assets/README.md @@ -8,7 +8,7 @@ Asset Hub allows users to: - Deploy promise-backed assets, both fungible and non-fungible, with a DOT/KSM deposit. - Set admin roles to manage assets and asset classes. - Register assets as "self-sufficient" if the Relay Chain agrees, i.e. gain the ability for an - asset to justify the existance of accounts sans DOT/KSM. + asset to justify the existence of accounts sans DOT/KSM. - Pay transaction fees using sufficient assets. - Transfer (and approve transfer) assets. - Interact with the chain via its transactional API or XCM. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index eb0f249aaae79307629cb0abc73826a14dcbda56..f71499e0c29184b64a7284e411f4ac8876842d58 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -4,12 +4,13 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Kusama variant of Asset Hub parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -67,6 +68,8 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } + cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} @@ -75,7 +78,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } @@ -97,9 +100,11 @@ default = [ "std" ] state-trie-version-1 = [ "pallet-state-trie-migration" ] runtime-benchmarks = [ "assets-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -109,6 +114,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", @@ -143,6 +149,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-nft-fractionalization/try-runtime", "pallet-nfts/try-runtime", @@ -184,6 +191,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-nft-fractionalization/std", "pallet-nfts-runtime-api/std", @@ -203,6 +211,7 @@ std = [ "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", + "primitive-types/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 40ce122112d296d538496cb216ad33602d58e9b9..af0116d7014a584e359a506f7ad7304344606be5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -34,13 +34,14 @@ use assets_common::{ AssetIdForTrustBackedAssetsConvert, MultiLocationForAssetId, }; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Permill, + ApplyExtrinsicResult, Perbill, Permill, }; use sp_std::prelude::*; @@ -49,6 +50,7 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -56,7 +58,7 @@ use frame_support::{ ord_parameter_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, + InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight}, BoundedVec, PalletId, @@ -71,6 +73,7 @@ pub use parachains_common as common; use parachains_common::{ impls::DealWithFees, kusama::{consensus::*, currency::*, fee::WeightToFee}, + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, @@ -79,7 +82,7 @@ use sp_runtime::RuntimeDebug; use xcm::opaque::v3::MultiLocation; use xcm_config::{ FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, KsmLocation, - PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, XcmConfig, + PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, }; #[cfg(any(feature = "std", test))] @@ -89,7 +92,6 @@ pub use sp_runtime::BuildStorage; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use xcm::latest::BodyId; -use xcm_executor::XcmExecutor; use crate::xcm_config::{ ForeignCreatorsSovereignAccountOf, LocalAndForeignAssetsMultiLocationMatcher, @@ -223,6 +225,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); // We allow each account to have holds on it from: // - `NftFractionalization`: 1 @@ -607,10 +610,11 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -626,6 +630,32 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { @@ -635,23 +665,28 @@ parameter_types! { impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, >; type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } parameter_types! { @@ -845,7 +880,9 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + // Temporary to migrate the remaining DMP messages: DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -902,19 +939,16 @@ pub type Executive = frame_executive::Executive< Migrations, >; -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_nft_fractionalization, NftFractionalization] [pallet_nfts, Nfts] @@ -924,9 +958,11 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1164,6 +1200,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1207,13 +1244,58 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + KsmLocation::get(), + ExistentialDeposit::get() + ).into()); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { Ok(KsmLocation::get()) } @@ -1269,6 +1351,7 @@ impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..f787aa3270118b87202bc78b58dcb8084d5f5a5b --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemint-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemint/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_660_000 picoseconds. + Weight::from_parts(1_720_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 28_418 + .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs index 9c7a56687b314e51ff3d77134b5d389e9568b293..e394e8b837a10323bddab2f2fdfff6d6208b290c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// asset-hub-kusama-dev +// --output +// cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_467_000 picoseconds. - Weight::from_parts(5_634_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3517` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_409_000 picoseconds. - Weight::from_parts(5_570_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65747` + // Estimated: `69212` + // Minimum execution time: 62_000_000 picoseconds. + Weight::from_parts(66_000_000, 0) + .saturating_add(Weight::from_parts(0, 69212)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs index 96477ddf4bd3fab597a6d2a89ae30e30f33840cd..6304051e6cb3b18d04cf266293e97df2ba891a0f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs index 281c013b3372f57a24b57812de91e684ffabd0c9..f04081a84fb3e191f1a6e1e26cfaac3dee90cc76 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs @@ -1,20 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; @@ -24,6 +25,7 @@ pub mod pallet_assets_local; pub mod pallet_assets_pool; pub mod pallet_balances; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nft_fractionalization; pub mod pallet_nfts; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs index 702f3743a7203f7103474dea554cb7aec2448ce6..3fcf2f8f4ec57260b2aaa7e422baa8c8e9be4772 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_asset_conversion` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs index 7c237b20389db7af3bab0e5e231dac0097210328..c2688d97905491adbd91cd8bbc588adc64819c96 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs index 10bd4b1f8b097d40bb23e9a24a1f81ee0c6fea09..957e33fcd9ea20ff201ae3635466afcf154be011 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs index 444699e33ef7d714aac0f0cb11a4ead143c76c87..e0b4ff3655217bb2b91c27ea0290b9aeee389ba2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs index be1ac3011f79e72f852010f8fbdbaf19c4776b8b..79c88f305806495de0d55c51986cc4ae27731750 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs index 7fe56ac31f7aa11c8eb497dbacdabef522ccb294..c686bd6134a7a6a489045416309f7f4c08dcbb43 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -124,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..792b7d18b672dfc1adad680b7c7a10598598645c --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemint-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemint/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 18_976_000 picoseconds. + Weight::from_parts(18_976_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 12_686_000 picoseconds. + Weight::from_parts(12_686_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_951_000 picoseconds. + Weight::from_parts(4_951_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_023_000 picoseconds. + Weight::from_parts(6_023_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_901_000 picoseconds. + Weight::from_parts(6_901_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_503_000 picoseconds. + Weight::from_parts(58_503_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 9_318_000 picoseconds. + Weight::from_parts(9_318_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 52_228_000 picoseconds. + Weight::from_parts(52_228_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 59_617_000 picoseconds. + Weight::from_parts(59_617_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 69_681_000 picoseconds. + Weight::from_parts(69_681_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs index ee7b70736414d86fc270d08777fe606ba76c0b34..d2e0f0ec7f0b96aaffa66c0eb3c04f6b907431d8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs index c55a18adc5233d75dd05f8b35678023a548051ed..4becc569514c1b571048033264ed3b6673269944 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_nft_fractionalization` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs index 2de706bbdc70087cb8fe8128a30644e6e1f2ce74..7a51830799ad60424283ef2f2e2021b857795a08 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_nfts` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs index 9bc4ba448e5af7d50988a319f62718ac486b689c..0cdffc653bcd6c1d04c38cf214ab1aaa39cd6787 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_proxy` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs index 56982f565ac478cc59ae9db6864e78e27a795191..73c3c06945d38a6d9b4ae9409002cc70c8e5735e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs index 94914eefba04db2a068767adeb7657e328613460..e27289a49e992246c20c9b169acb8cd92c61b5e0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs index 43bc74931cb0e7c7ae8c90e8dafbac12df83669c..69d3e773afb347e22e93d1537532fda65f606462 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_uniques` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs index 680e65a2dcf90271faf4f25e1f02656ded9d6881..e6c3e1295ef2a5ab8a0770028d99647a994665b7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs index c2ed67d2f5d45196894955e3e2fbe41e175454c0..1e4a723e10f0bdceb6b16573981bd31bc8554054 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,12 +62,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_576_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_670_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -76,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_785_000 picoseconds. - Weight::from_parts(25_097_000, 0) + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(19_261_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -87,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_561_000 picoseconds. - Weight::from_parts(19_121_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_500_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -108,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_721_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_493_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -119,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_912_000 picoseconds. - Weight::from_parts(3_262_000, 0) + // Minimum execution time: 2_162_000 picoseconds. + Weight::from_parts(2_278_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -128,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,16 +144,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 35_127_000 picoseconds. - Weight::from_parts(36_317_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_144_000 picoseconds. + Weight::from_parts(30_134_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3791` - // Minimum execution time: 36_634_000 picoseconds. - Weight::from_parts(37_983_000, 0) - .saturating_add(Weight::from_parts(0, 3791)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_522_000 picoseconds. + Weight::from_parts(32_679_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -180,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_085_000, 0) + // Minimum execution time: 2_338_000 picoseconds. + Weight::from_parts(2_494_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_400_000 picoseconds. - Weight::from_parts(17_759_000, 0) + // Minimum execution time: 17_315_000 picoseconds. + Weight::from_parts(17_787_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -203,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_287_000 picoseconds. - Weight::from_parts(17_678_000, 0) + // Minimum execution time: 17_273_000 picoseconds. + Weight::from_parts(17_712_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -215,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_941_000 picoseconds. - Weight::from_parts(19_285_000, 0) + // Minimum execution time: 18_395_000 picoseconds. + Weight::from_parts(19_095_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,12 +240,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6116` - // Minimum execution time: 32_668_000 picoseconds. - Weight::from_parts(33_533_000, 0) - .saturating_add(Weight::from_parts(0, 6116)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_343_000 picoseconds. + Weight::from_parts(28_068_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -248,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_182_000 picoseconds. - Weight::from_parts(9_498_000, 0) + // Minimum execution time: 9_156_000 picoseconds. + Weight::from_parts(9_552_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -259,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_519_000 picoseconds. - Weight::from_parts(17_943_000, 0) + // Minimum execution time: 17_454_000 picoseconds. + Weight::from_parts(17_831_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,12 +287,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `11069` - // Minimum execution time: 38_680_000 picoseconds. - Weight::from_parts(39_984_000, 0) - .saturating_add(Weight::from_parts(0, 11069)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_299_000 picoseconds. + Weight::from_parts(35_156_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_508_000 picoseconds. + Weight::from_parts(4_702_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(26_980_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs index 9aff4902d15baa7c8040c8d37ef747fc0c08b754..405d7c72e55792fd49a39c57baf031046977761d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -61,16 +60,8 @@ impl XcmWeightInfo for AssetHubKusamaXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -127,10 +118,7 @@ impl XcmWeightInfo for AssetHubKusamaXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 6e663039b0c2bcd2767dbd46b6e53c113b50be3d..e680c2d5c1193b25f8408d959ae50d5c8ab0ae00 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 26_104_000 picoseconds. - Weight::from_parts(26_722_000, 3593) + // Minimum execution time: 25_602_000 picoseconds. + Weight::from_parts(26_312_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 52_259_000 picoseconds. - Weight::from_parts(53_854_000, 6196) + // Minimum execution time: 51_173_000 picoseconds. + Weight::from_parts(52_221_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,10 +85,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `210` + // Measured: `246` // Estimated: `6196` - // Minimum execution time: 77_248_000 picoseconds. - Weight::from_parts(80_354_000, 6196) + // Minimum execution time: 74_651_000 picoseconds. + Weight::from_parts(76_500_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -101,8 +98,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -118,10 +115,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 482_070_000 picoseconds. - Weight::from_parts(490_269_000, 3574) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 458_666_000 picoseconds. + Weight::from_parts(470_470_000, 3610) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -129,8 +126,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_970_000 picoseconds. - Weight::from_parts(4_056_000, 0) + // Minimum execution time: 3_701_000 picoseconds. + Weight::from_parts(3_887_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 26_324_000 picoseconds. - Weight::from_parts(26_985_000, 3593) + // Minimum execution time: 25_709_000 picoseconds. + Weight::from_parts(26_320_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,10 +156,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3593` - // Minimum execution time: 52_814_000 picoseconds. - Weight::from_parts(54_666_000, 3593) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 51_663_000 picoseconds. + Weight::from_parts(52_538_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,10 +177,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 33_044_000 picoseconds. - Weight::from_parts(33_849_000, 3574) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 31_972_000 picoseconds. + Weight::from_parts(32_834_000, 3610) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 625549ecfccfa348e11debd99325f7ac41646445..9e8f3bfe75c1a1bdf94e701d5bbbdd0bb623dd90 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index 0c197598f889535ce94fe08398ded797a324ef8a..05262e074103132548785e929addea7afa7bc452 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -16,14 +16,12 @@ use super::{ AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, }; -use crate::ForeignAssets; +use crate::{ForeignAssets, CENTS}; use assets_common::{ local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, - matching::{ - FromSiblingParachain, IsForeignConcreteAsset, StartsWith, StartsWithExplicitGlobalConsensus, - }, + matching::{FromSiblingParachain, IsForeignConcreteAsset}, }; use frame_support::{ match_types, parameter_types, @@ -31,19 +29,23 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::AssetFeeAsExistentialDepositMultiplier}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem}, +}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::ConvertInto; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NativeAsset, - NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NoChecking, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -270,7 +272,7 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | + RuntimeCall::MessageQueue(..) | RuntimeCall::Assets( pallet_assets::Call::create { .. } | pallet_assets::Call::force_create { .. } | @@ -448,7 +450,7 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. @@ -470,6 +472,15 @@ pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentia TrustBackedAssetsInstance, >; +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// +/// - KSM with the parent Relay Chain and sibling system parachains; and +/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). +pub type TrustedTeleporters = ( + ConcreteAssetFromSystem, + IsForeignConcreteAsset>>, +); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -480,13 +491,7 @@ impl xcm_executor::Config for XcmConfig { // Asset Hub acting _as_ a reserve location for KSM and assets created under `pallet-assets`. // For KSM, users must use teleport where allowed (e.g. with the Relay Chain). type IsReserve = (); - // We allow: - // - teleportation of KSM - // - teleportation of sibling parachain's assets (as ForeignCreators) - type IsTeleporter = ( - NativeAsset, - IsForeignConcreteAsset>>, - ); + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -528,20 +533,25 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(KsmLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -571,8 +581,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs index 7d49b56e461a0a3919e2130dbb6d84f6888349ad..cdd4290770fd3971ddfb0e46dae6bc8e6d21cefa 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs @@ -18,13 +18,14 @@ //! Tests for the Statemine (Kusama Assets Hub) chain. use asset_hub_kusama_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, TrustBackedAssetsPalletLocation, + AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, LocationToAccountId, + TrustBackedAssetsPalletLocation, }; pub use asset_hub_kusama_runtime::{ xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -518,12 +519,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -632,3 +627,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml index df38e4d9d64a9b70bfb3d09042e404221e1b430e..b5eff6b63afbae705927855c867f9cf325e488db 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml @@ -4,12 +4,13 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Asset Hub Polkadot parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -62,6 +63,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} @@ -70,7 +72,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } @@ -85,9 +87,11 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] runtime-benchmarks = [ "assets-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -98,6 +102,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", @@ -129,6 +134,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-nfts/try-runtime", "pallet-proxy/try-runtime", @@ -167,6 +173,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-nfts-runtime-api/std", "pallet-nfts/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index d4f7d6ef361674e5fa7bbd407e2eb830529a2963..1b7ef10f4857320bd55a192c8a2274b355a07448 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -66,13 +66,15 @@ use assets_common::{ foreign_creators::ForeignCreators, matching::FromSiblingParachain, MultiLocationForAssetId, }; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, Perbill, }; use sp_std::prelude::*; @@ -88,7 +90,7 @@ use frame_support::{ parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, + InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight}, PalletId, @@ -101,6 +103,7 @@ use pallet_nfts::PalletFeatures; pub use parachains_common as common; use parachains_common::{ impls::{AssetsToBlockAuthor, DealWithFees}, + message_queue::*, polkadot::{consensus::*, currency::*, fee::WeightToFee}, AccountId, AssetHubPolkadotAuraId as AuraId, AssetIdForTrustBackedAssets, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, @@ -109,7 +112,7 @@ use parachains_common::{ use sp_runtime::RuntimeDebug; use xcm_config::{ DotLocation, FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, - TrustBackedAssetsConvertedConcreteId, XcmConfig, XcmOriginToTransactDispatchOrigin, + TrustBackedAssetsConvertedConcreteId, XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -119,7 +122,6 @@ pub use sp_runtime::BuildStorage; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use xcm::latest::BodyId; -use xcm_executor::XcmExecutor; use crate::xcm_config::ForeignCreatorsSovereignAccountOf; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -233,6 +235,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -543,10 +546,11 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -560,6 +564,32 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; } +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl parachain_info::Config for Runtime {} impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -572,22 +602,27 @@ parameter_types! { impl cumulus_pallet_xcmp_queue::Config for Runtime { type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, >; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } parameter_types! { @@ -760,6 +795,7 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -797,7 +833,10 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -809,17 +848,14 @@ pub type Executive = frame_executive::Executive< Migrations, >; -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_nfts, Nfts] [pallet_proxy, Proxy] @@ -828,9 +864,11 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1044,6 +1082,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1086,13 +1125,58 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{DotLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + xcm_config::DotLocation::get(), + ExistentialDeposit::get() + ).into()); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { Ok(DotLocation::get()) } @@ -1148,6 +1232,7 @@ impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..970534560c68d619908dc241658493717c480c1c --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// westmint-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/westmint/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_638_000 picoseconds. + Weight::from_parts(1_690_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 22_873 + .saturating_add(Weight::from_parts(24_208_496, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs index 65844ce194a0e66415179919a444eacbf6cccbe0..89c80d0be62862b9b4c0488d12c9e7ed7e122a81 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// asset-hub-polkadot-dev +// --output +// cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_240_000 picoseconds. - Weight::from_parts(5_487_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_243_000 picoseconds. - Weight::from_parts(5_549_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 62_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs index 713c33d34f77f5cc3a5e659a2b31dbbfd5721931..0b988fd0f6fd3d5fb6b2a3703b23dcd99efcaa65 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs index 3eb3b925e651836446a7ea771cb20f9b9444594d..0823dcad88e97f6cc3a4eb2b60da82de2ed816d6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs @@ -1,20 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; @@ -22,6 +23,7 @@ pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_balances; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nfts; pub mod pallet_proxy; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs index 51413bb471b953b6724def2d8d009532425c02da..adb686c0afc3d77c16c565fbf2234e17f447b5fd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs index c8420e72ba213fda9f11754552cf05fb84a530d4..810f5b57c45dcc5f33fc76f3417682a0d517d6ed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs index a7f02ba24fd13ff409e599b5344d8c6a512aa9c9..7c4501e6d882ef61bd3e608b76750287245eb83a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs index 53efb218440a87dbd5557708a42c92ef39add826..b3062984baf023c083f54bb3f7519c4fbb850b40 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -122,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -176,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..a9f0cb07cfe1385d62df0f1e81cd1b03cc963f7e --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// westmint-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/westmint/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 12_192_000 picoseconds. + Weight::from_parts(12_192_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 10_447_000 picoseconds. + Weight::from_parts(10_447_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_851_000 picoseconds. + Weight::from_parts(4_851_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_342_000 picoseconds. + Weight::from_parts(6_342_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_199_000 picoseconds. + Weight::from_parts(6_199_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_612_000 picoseconds. + Weight::from_parts(58_612_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 7_296_000 picoseconds. + Weight::from_parts(7_296_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 48_345_000 picoseconds. + Weight::from_parts(48_345_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 56_441_000 picoseconds. + Weight::from_parts(56_441_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 70_858_000 picoseconds. + Weight::from_parts(70_858_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs index 705aca9e1a4e78222f231a11ca44dd27ac20c70e..0bb05511d7a863dd0ba641e8e0e6e625c9498313 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs index 6d6f7cbbafb44c31b55def7c36a731aee984bf40..842daf49f599c0f7c349a2b6c25f797db9a79d19 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_nfts` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs index 99db2865692fc6e0316285f4985a974d85dba766..b6121f2fca2ecdee22f31a3ac1f67f4dc72ae813 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_proxy` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs index 8a6943d53048047641132a70d6cade4983e503b6..560322abeb3f6baf573fa6dfd9caba41b1e8cbd6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs index 8c6a2b5505e56afe4b3f32c06d2a3ccb59bc36f0..17b050c3e90be8bc506d2f505cebefc227137c48 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs index a88928be65348beb3fc0fe29c893e1de0bacacfe..5b13d56f5bb2edb491d2e4d76ef64a10d45d4e0c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_uniques` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs index c6fc093cc4b44e1396d49b4a74e34eb99f07bd3c..d028fb898a4a7cd6147302cbf549ed62095bf43e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs index bd7615895e2e96fa95d45389e3296d988fc7057e..27867e278ed06c21bb488a1841b3d80e491c50d5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_186_000, 0) + // Minimum execution time: 25_203_000 picoseconds. + Weight::from_parts(25_927_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -76,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_830_000 picoseconds. - Weight::from_parts(26_312_000, 0) + // Minimum execution time: 20_113_000 picoseconds. + Weight::from_parts(20_439_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -87,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_584_000 picoseconds. - Weight::from_parts(19_083_000, 0) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_264_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -108,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_415_000 picoseconds. - Weight::from_parts(9_821_000, 0) + // Minimum execution time: 7_399_000 picoseconds. + Weight::from_parts(7_674_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -119,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_902_000 picoseconds. - Weight::from_parts(3_377_000, 0) + // Minimum execution time: 2_388_000 picoseconds. + Weight::from_parts(2_522_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -128,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -144,14 +146,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 32_730_000 picoseconds. - Weight::from_parts(33_879_000, 0) + // Minimum execution time: 28_791_000 picoseconds. + Weight::from_parts(29_443_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `257` - // Estimated: `3722` - // Minimum execution time: 34_053_000 picoseconds. - Weight::from_parts(34_506_000, 0) - .saturating_add(Weight::from_parts(0, 3722)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `292` + // Estimated: `3757` + // Minimum execution time: 30_880_000 picoseconds. + Weight::from_parts(31_675_000, 0) + .saturating_add(Weight::from_parts(0, 3757)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -180,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_986_000, 0) + // Minimum execution time: 2_365_000 picoseconds. + Weight::from_parts(2_550_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `129` // Estimated: `11019` - // Minimum execution time: 17_011_000 picoseconds. - Weight::from_parts(17_488_000, 0) + // Minimum execution time: 17_185_000 picoseconds. + Weight::from_parts(17_680_000, 0) .saturating_add(Weight::from_parts(0, 11019)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -203,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `133` // Estimated: `11023` - // Minimum execution time: 17_191_000 picoseconds. - Weight::from_parts(17_784_000, 0) + // Minimum execution time: 16_974_000 picoseconds. + Weight::from_parts(17_660_000, 0) .saturating_add(Weight::from_parts(0, 11023)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -215,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `13505` - // Minimum execution time: 18_625_000 picoseconds. - Weight::from_parts(19_177_000, 0) + // Minimum execution time: 18_536_000 picoseconds. + Weight::from_parts(19_292_000, 0) .saturating_add(Weight::from_parts(0, 13505)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -236,10 +242,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 30_762_000 picoseconds. - Weight::from_parts(31_481_000, 0) + // Minimum execution time: 27_368_000 picoseconds. + Weight::from_parts(28_161_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -248,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_025_000 picoseconds. - Weight::from_parts(9_423_000, 0) + // Minimum execution time: 9_553_000 picoseconds. + Weight::from_parts(9_899_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -259,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `11030` - // Minimum execution time: 17_550_000 picoseconds. - Weight::from_parts(17_939_000, 0) + // Minimum execution time: 17_445_000 picoseconds. + Weight::from_parts(18_206_000, 0) .saturating_add(Weight::from_parts(0, 11030)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -281,10 +289,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `11036` - // Minimum execution time: 36_922_000 picoseconds. - Weight::from_parts(37_709_000, 0) + // Minimum execution time: 34_200_000 picoseconds. + Weight::from_parts(35_198_000, 0) .saturating_add(Weight::from_parts(0, 11036)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_679_000 picoseconds. + Weight::from_parts(4_841_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 27_281_000 picoseconds. + Weight::from_parts(27_694_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs index 55fed809e2b750d96d279c0558264918a8b60eb9..44fcb91d6880cdee33fa9142e7eff5ca01427db4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -61,16 +60,8 @@ impl XcmWeightInfo for AssetHubPolkadotXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -127,10 +118,7 @@ impl XcmWeightInfo for AssetHubPolkadotXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 4f64ea3fa1bb37a4a91274de003dbf49349d6e4f..1036d4cbf00fb2261ef2a12a7b264c92dee8e103 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=asset-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 26_090_000 picoseconds. - Weight::from_parts(27_006_000, 3593) + // Minimum execution time: 25_903_000 picoseconds. + Weight::from_parts(26_768_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 50_699_000 picoseconds. - Weight::from_parts(51_888_000, 6196) + // Minimum execution time: 51_042_000 picoseconds. + Weight::from_parts(51_939_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +87,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `176` // Estimated: `6196` - // Minimum execution time: 72_130_000 picoseconds. - Weight::from_parts(73_994_000, 6196) + // Minimum execution time: 74_626_000 picoseconds. + Weight::from_parts(75_963_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -101,8 +98,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -120,8 +117,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 477_183_000 picoseconds. - Weight::from_parts(488_156_000, 3540) + // Minimum execution time: 480_030_000 picoseconds. + Weight::from_parts(486_039_000, 3540) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -129,8 +126,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_966_000 picoseconds. - Weight::from_parts(4_129_000, 0) + // Minimum execution time: 3_936_000 picoseconds. + Weight::from_parts(4_033_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 26_047_000 picoseconds. - Weight::from_parts(26_982_000, 3593) + // Minimum execution time: 26_274_000 picoseconds. + Weight::from_parts(26_609_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -161,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3593` - // Minimum execution time: 51_076_000 picoseconds. - Weight::from_parts(51_826_000, 3593) + // Minimum execution time: 52_888_000 picoseconds. + Weight::from_parts(53_835_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 30_606_000 picoseconds. - Weight::from_parts(31_168_000, 3540) + // Minimum execution time: 33_395_000 picoseconds. + Weight::from_parts(33_827_000, 3540) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 061992691a605b42702a292eeb11e0520160272c..12ef504727effc5008a70b0ee00506297b3ce77a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index 65cf62a610f472105450db14429c5ae29b9b6843..b3c2ce4da76f3ea3720c1555a16397adcf8c6497 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -16,30 +16,32 @@ use super::{ AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, ForeignAssets, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, -}; -use assets_common::matching::{ - FromSiblingParachain, IsForeignConcreteAsset, StartsWith, StartsWithExplicitGlobalConsensus, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, CENTS, }; +use assets_common::matching::{FromSiblingParachain, IsForeignConcreteAsset}; use frame_support::{ match_types, parameter_types, traits::{ConstU32, Contains, Everything, Nothing, PalletInfoAccess}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::AssetFeeAsExistentialDepositMultiplier}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem}, +}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::ConvertInto; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, - EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NativeAsset, - NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NoChecking, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -229,7 +231,7 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | + RuntimeCall::MessageQueue(..) | RuntimeCall::Assets( pallet_assets::Call::create { .. } | pallet_assets::Call::force_create { .. } | @@ -366,7 +368,7 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality @@ -394,6 +396,15 @@ pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentia TrustBackedAssetsInstance, >; +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// +/// - DOT with the parent Relay Chain and sibling system parachains; and +/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). +pub type TrustedTeleporters = ( + ConcreteAssetFromSystem, + IsForeignConcreteAsset>>, +); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -404,13 +415,7 @@ impl xcm_executor::Config for XcmConfig { // Asset Hub acting _as_ a reserve location for DOT and assets created under `pallet-assets`. // For DOT, users must use teleport where allowed (e.g. with the Relay Chain). type IsReserve = (); - // We allow: - // - teleportation of DOT - // - teleportation of sibling parachain's assets (as ForeignCreators) - type IsTeleporter = ( - NativeAsset, - IsForeignConcreteAsset>>, - ); + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -452,20 +457,25 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(DotLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -495,8 +505,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs index 7200ebc16a2875336fc50e711386cab41df6c9b4..b7e44646ece7accc57238e995d59ef2cc02abff4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs @@ -19,12 +19,13 @@ use asset_hub_polkadot_runtime::xcm_config::{ AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, DotLocation, - ForeignCreatorsSovereignAccountOf, TrustBackedAssetsPalletLocation, XcmConfig, + ForeignCreatorsSovereignAccountOf, LocationToAccountId, TrustBackedAssetsPalletLocation, + XcmConfig, }; pub use asset_hub_polkadot_runtime::{ AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -531,12 +532,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -657,3 +652,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0773eb0c8eb5af40f20e0904450ca53ba1dded13 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -0,0 +1,256 @@ +[package] +name = "asset-hub-rococo-runtime" +version = "0.9.420" +authors.workspace = true +edition.workspace = true +description = "Rococo variant of Asset Hub parachain runtime" +license = "Apache-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +hex-literal = { version = "0.4.1" } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +smallvec = "1.11.0" + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} +pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} +pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} +pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} +pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} +# num-traits feature needed for dex integer sq root: +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } + +# Polkadot +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } +assets-common = { path = "../common", default-features = false } + +# Bridges +pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } + +[dev-dependencies] +asset-test-utils = { path = "../test-utils" } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = [ "std" ] +# When enabled the `state_version` is set to `1`. +# This means that the chain will start using the new state format. The migration is lazy, so +# it requires to write a storage value to use the new state format. To migrate all the other +# storage values that aren't touched the state migration pallet is added as well. +# This pallet will migrate the entire state, controlled through some account. +# +# This feature should be removed when the main-net will be migrated. +state-trie-version-1 = [ "pallet-state-trie-migration" ] +runtime-benchmarks = [ + "assets-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-nft-fractionalization/runtime-benchmarks", + "pallet-nfts/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-state-trie-migration/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-uniques/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm-bridge-hub-router/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-asset-conversion-tx-payment/try-runtime", + "pallet-asset-conversion/try-runtime", + "pallet-assets/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-nft-fractionalization/try-runtime", + "pallet-nfts/try-runtime", + "pallet-proxy/try-runtime", + "pallet-session/try-runtime", + "pallet-state-trie-migration/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-uniques/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm-bridge-hub-router/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] +std = [ + "assets-common/std", + "bp-asset-hub-rococo/std", + "bp-asset-hub-westend/std", + "bp-bridge-hub-rococo/std", + "bp-bridge-hub-westend/std", + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-asset-conversion-tx-payment/std", + "pallet-asset-conversion/std", + "pallet-assets/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-collator-selection/std", + "pallet-message-queue/std", + "pallet-multisig/std", + "pallet-nft-fractionalization/std", + "pallet-nfts-runtime-api/std", + "pallet-nfts/std", + "pallet-proxy/std", + "pallet-session/std", + "pallet-state-trie-migration/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-uniques/std", + "pallet-utility/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub-router/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-core-primitives/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "primitive-types/std", + "rococo-runtime-constants/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "sp-weights/std", + "substrate-wasm-builder", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] + +experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..60f8a125129ff1344a1799246e931acdb1d139d5 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -0,0 +1,26 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "std")] +fn main() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b274f45877b3822c79586b3209cac1b5227a4a08 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -0,0 +1,1671 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Hub Rococo Runtime +//! +//! Asset Hub Rococo, formerly known as "Rockmine", is the test network for its Kusama cousin. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +mod weights; +pub mod xcm_config; + +use assets_common::{ + foreign_creators::ForeignCreators, + local_and_foreign_assets::{LocalAndForeignAssets, MultiLocationConverter}, + matching::FromSiblingParachain, + AssetIdForTrustBackedAssetsConvert, MultiLocationForAssetId, +}; +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::AggregateMessageOrigin; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, Permill, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::ParaId; +use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + ord_parameter_types, parameter_types, + traits::{ + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, + Equals, InstanceFilter, TransformOrigin, + }, + weights::{ConstantMultiplier, Weight}, + BoundedVec, PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, EnsureSigned, EnsureSignedBy, +}; +use pallet_asset_conversion_tx_payment::AssetConversionAdapter; +use pallet_nfts::PalletFeatures; +pub use parachains_common as common; +use parachains_common::{ + impls::DealWithFees, + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, + rococo::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, + Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, + NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; + +use sp_runtime::{Perbill, RuntimeDebug}; +use xcm::opaque::v3::MultiLocation; +use xcm_config::{ + ForeignAssetsConvertedConcreteId, GovernanceLocation, PoolAssetsConvertedConcreteId, + TokenLocation, TrustBackedAssetsConvertedConcreteId, +}; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +// Polkadot imports +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::prelude::*; + +use crate::xcm_config::{ + ForeignCreatorsSovereignAccountOf, LocalAndForeignAssetsMultiLocationMatcher, + TrustBackedAssetsPalletLocation, +}; +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[cfg(feature = "state-trie-version-1")] +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("statemine"), + impl_name: create_runtime_str!("statemine"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 13, + state_version: 1, +}; + +#[cfg(not(feature = "state-trie-version-1"))] +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("statemine"), + impl_name: create_runtime_str!("statemine"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 13, + state_version: 0, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +// Configure FRAME pallets to include in runtime. +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + // We allow each account to have holds on it from: + // - `NftFractionalization`: 1 + type MaxHolds = ConstU32<1>; + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::CurrencyAdapter>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +parameter_types! { + pub const AssetDeposit: Balance = UNITS / 10; // 1 / 10 UNITS deposit to create asset + pub const AssetAccountDeposit: Balance = deposit(1, 16); + pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const AssetsStringLimit: u32 = 50; + /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) + // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 + pub const MetadataDepositBase: Balance = deposit(1, 68); + pub const MetadataDepositPerByte: Balance = deposit(0, 1); +} + +/// We allow root to execute privileged asset operations. +pub type AssetsForceOrigin = EnsureRoot; + +// Called "Trust Backed" assets because these are generally registered by some account, and users of +// the asset assume it has some claimed backing. The pallet is called `Assets` in +// `construct_runtime` to avoid breaking changes on storage reads. +pub type TrustBackedAssetsInstance = pallet_assets::Instance1; +type TrustBackedAssetsCall = pallet_assets::Call; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForTrustBackedAssets; + type AssetIdParameter = codec::Compact; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = weights::pallet_assets_local::WeightInfo; + type CallbackHandle = (); + type AssetAccountDeposit = AssetAccountDeposit; + type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub const AllowMultiAssetPools: bool = false; + // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero + pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); +} + +ord_parameter_types! { + pub const AssetConversionOrigin: sp_runtime::AccountId32 = + AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +pub type PoolAssetsInstance = pallet_assets::Instance3; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = + AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + // Deposits are zero because creation/admin is limited to Asset Conversion pallet. + type AssetDeposit = ConstU128<0>; + type AssetAccountDeposit = ConstU128<0>; + type MetadataDepositBase = ConstU128<0>; + type MetadataDepositPerByte = ConstU128<0>; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type WeightInfo = weights::pallet_assets_pool::WeightInfo; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +impl pallet_asset_conversion::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type HigherPrecisionBalance = sp_core::U256; + type Currency = Balances; + type AssetBalance = Balance; + type AssetId = MultiLocation; + type Assets = LocalAndForeignAssets< + Assets, + AssetIdForTrustBackedAssetsConvert, + ForeignAssets, + >; + type PoolAssets = PoolAssets; + type PoolAssetId = u32; + type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam + type PoolSetupFeeReceiver = AssetConversionOrigin; + // should be non-zero if `AllowMultiAssetPools` is true, otherwise can be zero. + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type LPFee = ConstU32<3>; + type PalletId = AssetConversionPalletId; + type AllowMultiAssetPools = AllowMultiAssetPools; + type MaxSwapPathLength = ConstU32<4>; + type MultiAssetId = Box; + type MultiAssetIdConverter = + MultiLocationConverter; + type MintMinLiquidity = ConstU128<100>; + type WeightInfo = weights::pallet_asset_conversion::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = + crate::xcm_config::BenchmarkMultiLocationConverter>; +} + +parameter_types! { + // we just reuse the same deposits + pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); + pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); + pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); + pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); + pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); + pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); +} + +/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as +/// this type is used in proxy definitions. We assume that a foreign location would not want to set +/// an individual, local account as a proxy for the issuance of their assets. This issuance should +/// be managed by the foreign location's governance. +pub type ForeignAssetsInstance = pallet_assets::Instance2; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocationForAssetId; + type AssetIdParameter = MultiLocationForAssetId; + type Currency = Balances; + type CreateOrigin = ForeignCreators< + (FromSiblingParachain>,), + ForeignCreatorsSovereignAccountOf, + AccountId, + >; + type ForceOrigin = AssetsForceOrigin; + type AssetDeposit = ForeignAssetsAssetDeposit; + type MetadataDepositBase = ForeignAssetsMetadataDepositBase; + type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; + type ApprovalDeposit = ForeignAssetsApprovalDeposit; + type StringLimit = ForeignAssetsAssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = weights::pallet_assets_foreign::WeightInfo; + type CallbackHandle = (); + type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; + type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); + pub const MaxSignatories: u32 = 100; +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = MaxSignatories; + type WeightInfo = weights::pallet_multisig::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. + Assets, + /// Owner proxy. Can execute calls related to asset ownership. + AssetOwner, + /// Asset manager. Can execute calls related to asset management. + AssetManager, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + RuntimeCall::Assets { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Assets => { + matches!( + c, + RuntimeCall::Assets { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } + ) + }, + ProxyType::AssetOwner => matches!( + c, + RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::AssetManager => matches!( + c, + RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Assets, ProxyType::AssetOwner) => true, + (ProxyType::Assets, ProxyType::AssetManager) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type OutboundXcmpMessageSource = XcmpQueue; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl parachain_info::Config for Runtime {} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::TokenLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type DmpSink = frame_support::traits::EnqueueWithOrigin; +} + +parameter_types! { + pub const Period: u32 = 6 * HOURS; + pub const Offset: u32 = 0; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +impl pallet_asset_conversion_tx_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Fungibles = LocalAndForeignAssets< + Assets, + AssetIdForTrustBackedAssetsConvert, + ForeignAssets, + >; + type OnChargeAssetTransaction = AssetConversionAdapter; +} + +parameter_types! { + pub const UniquesCollectionDeposit: Balance = UNITS / 10; // 1 / 10 UNIT deposit to create a collection + pub const UniquesItemDeposit: Balance = UNITS / 1_000; // 1 / 1000 UNIT deposit to mint an item + pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); + pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); + pub const UniquesDepositPerByte: Balance = deposit(0, 1); +} + +impl pallet_uniques::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type ForceOrigin = AssetsForceOrigin; + type CollectionDeposit = UniquesCollectionDeposit; + type ItemDeposit = UniquesItemDeposit; + type MetadataDepositBase = UniquesMetadataDepositBase; + type AttributeDepositBase = UniquesAttributeDepositBase; + type DepositPerByte = UniquesDepositPerByte; + type StringLimit = ConstU32<128>; + type KeyLimit = ConstU32<32>; + type ValueLimit = ConstU32<64>; + type WeightInfo = weights::pallet_uniques::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type Locker = (); +} + +parameter_types! { + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); +} + +impl pallet_nft_fractionalization::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Deposit = AssetDeposit; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; + type StringLimit = AssetsStringLimit; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = >::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionalizationPalletId; + type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type RuntimeHoldReason = RuntimeHoldReason; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); + pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; + // re-use the Uniques deposits + pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); + pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); + pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); + pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); + pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); +} + +impl pallet_nfts::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + type Locker = (); + type CollectionDeposit = NftsCollectionDeposit; + type ItemDeposit = NftsItemDeposit; + type MetadataDepositBase = NftsMetadataDepositBase; + type AttributeDepositBase = NftsAttributeDepositBase; + type DepositPerByte = NftsDepositPerByte; + type StringLimit = ConstU32<256>; + type KeyLimit = ConstU32<64>; + type ValueLimit = ConstU32<256>; + type ApprovalsLimit = ConstU32<20>; + type ItemAttributesApprovalsLimit = ConstU32<30>; + type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = NftsMaxDeadlineDuration; + type MaxAttributesPerCall = ConstU32<10>; + type Features = NftsPalletFeatures; + type OffchainSignature = Signature; + type OffchainPublic = ::Signer; + type WeightInfo = weights::pallet_nfts::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); +} + +/// XCM router instance to BridgeHub with bridging capabilities for `Westend` global +/// consensus with dynamic fees and back-pressure. +pub type ToWestendXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance3; +impl pallet_xcm_bridge_hub_router::Config for Runtime { + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; + + type UniversalLocation = xcm_config::UniversalLocation; + type BridgedNetworkId = xcm_config::bridging::to_westend::WestendNetwork; + type Bridges = xcm_config::bridging::NetworkExportTable; + + #[cfg(not(feature = "runtime-benchmarks"))] + type BridgeHubOrigin = EnsureXcm>; + #[cfg(feature = "runtime-benchmarks")] + type BridgeHubOrigin = EitherOfDiverse< + // for running benchmarks + EnsureRoot, + // for running tests with `--feature runtime-benchmarks` + EnsureXcm>, + >; + + type ToBridgeHubSender = XcmpQueue; + type WithBridgeHubChannel = + cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< + xcm_config::bridging::SiblingBridgeHubParaId, + Runtime, + >; + + type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; + type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 4, + + // Monetary stuff. + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, + AssetTxPayment: pallet_asset_conversion_tx_payment::{Pallet, Event} = 13, + + // Collator support. the order of these 5 are important and shall not change. + Authorship: pallet_authorship::{Pallet, Storage} = 20, + CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, + Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, + Aura: pallet_aura::{Pallet, Storage, Config} = 23, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + + // Handy utilities. + Utility: pallet_utility::{Pallet, Call, Event} = 40, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, + + // Bridge utilities. + ToWestendXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 45, + + // The main stage. + Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, + Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, + Nfts: pallet_nfts::{Pallet, Call, Storage, Event} = 52, + ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 53, + NftFractionalization: pallet_nft_fractionalization::{Pallet, Call, Storage, Event, HoldReason} = 54, + + PoolAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 55, + AssetConversion: pallet_asset_conversion::{Pallet, Call, Storage, Event} = 56, + + #[cfg(feature = "state-trie-version-1")] + StateTrieMigration: pallet_state_trie_migration = 70, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// Migrations to apply on runtime upgrade. +pub type Migrations = + (pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions); + +/// Migration to initialize storage versions for pallets added after genesis. +/// +/// This is now done automatically (see ), +/// but some pallets had made it in and had storage set in them for this parachain before it was +/// merged. +pub struct InitStorageVersions; + +impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::{GetStorageVersion, StorageVersion}; + + let mut writes = 0; + + if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { + PolkadotXcm::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Multisig::on_chain_storage_version() == StorageVersion::new(0) { + Multisig::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Assets::on_chain_storage_version() == StorageVersion::new(0) { + Assets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Uniques::on_chain_storage_version() == StorageVersion::new(0) { + Uniques::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Nfts::on_chain_storage_version() == StorageVersion::new(0) { + Nfts::current_storage_version().put::(); + writes.saturating_inc(); + } + + if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { + ForeignAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { + PoolAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + ::DbWeight::get().reads_writes(7, writes) + } +} + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [frame_system, SystemBench::] + [pallet_assets, Local] + [pallet_assets, Foreign] + [pallet_assets, Pool] + [pallet_asset_conversion, AssetConversion] + [pallet_balances, Balances] + [pallet_multisig, Multisig] + [pallet_nft_fractionalization, NftFractionalization] + [pallet_nfts, Nfts] + [pallet_proxy, Proxy] + [pallet_session, SessionBench::] + [pallet_uniques, Uniques] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [pallet_xcm_bridge_hub_router, ToWestend] + // XCM + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + // NOTE: Make sure you point to the individual modules below. + [pallet_xcm_benchmarks::fungible, XcmBalances] + [pallet_xcm_benchmarks::generic, XcmGeneric] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_asset_conversion::AssetConversionApi< + Block, + Balance, + u128, + Box, + > for Runtime + { + fn quote_price_exact_tokens_for_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) + } + fn quote_price_tokens_for_exact_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) + } + fn get_reserves(asset1: Box, asset2: Box) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(&asset1, &asset2).ok() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl assets_common::runtime_api::FungiblesApi< + Block, + AccountId, + > for Runtime + { + fn query_account_balances(account: AccountId) -> Result { + use assets_common::fungible_conversion::{convert, convert_balance}; + Ok([ + // collect pallet_balance + { + let balance = Balances::free_balance(account.clone()); + if balance > 0 { + vec![convert_balance::(balance)?] + } else { + vec![] + } + }, + // collect pallet_assets (TrustBackedAssets) + convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( + Assets::account_balances(account.clone()) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect pallet_assets (ForeignAssets) + convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( + ForeignAssets::account_balances(account.clone()) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect pallet_assets (PoolAssets) + convert::<_, _, _, _, PoolAssetsConvertedConcreteId>( + PoolAssets::account_balances(account) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect ... e.g. other tokens + ].concat().into()) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; + + // This is defined once again in dispatch_benchmark, because list_benchmarks! + // and add_benchmarks! are macros exported by define_benchmarks! macros and those types + // are referenced in that call. + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + // Benchmark files generated for `Assets/ForeignAssets` instances are by default + // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, + // so with this redefinition we can change names to nicer: + // `pallet_assets_local.rs / pallet_assets_foreign.rs`. + type Local = pallet_assets::Pallet::; + type Foreign = pallet_assets::Pallet::; + type Pool = pallet_assets::Pallet::; + + type ToWestend = XcmBridgeHubRouterBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use pallet_xcm_bridge_hub_router::benchmarking::{ + Pallet as XcmBridgeHubRouterBench, + Config as XcmBridgeHubRouterConfig, + }; + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + + impl XcmBridgeHubRouterConfig for Runtime { + fn make_congested() { + cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + } + fn ensure_bridged_target_destination() -> MultiLocation { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + xcm_config::bridging::to_westend::AssetHubWestend::get() + } + } + + use xcm::latest::prelude::*; + use xcm_config::{TokenLocation, MaxAssetsIntoHolding}; + use pallet_xcm_benchmarks::asset_instance_from; + + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + } + + impl pallet_xcm_benchmarks::Config for Runtime { + type XcmConfig = xcm_config::XcmConfig; + type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; + fn valid_destination() -> Result { + Ok(TokenLocation::get()) + } + fn worst_case_holding(depositable_count: u32) -> MultiAssets { + // A mix of fungible, non-fungible, and concrete assets. + let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; + let holding_fungibles = holding_non_fungibles.saturating_sub(1); + let fungibles_amount: u128 = 100; + let mut assets = (0..holding_fungibles) + .map(|i| { + MultiAsset { + id: Concrete(GeneralIndex(i as u128).into()), + fun: Fungible(fungibles_amount * i as u128), + } + }) + .chain(core::iter::once(MultiAsset { id: Concrete(Here.into()), fun: Fungible(u128::MAX) })) + .chain((0..holding_non_fungibles).map(|i| MultiAsset { + id: Concrete(GeneralIndex(i as u128).into()), + fun: NonFungible(asset_instance_from(i)), + })) + .collect::>(); + + assets.push(MultiAsset { + id: Concrete(TokenLocation::get()), + fun: Fungible(1_000_000 * UNITS), + }); + assets.into() + } + } + + parameter_types! { + pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( + TokenLocation::get(), + MultiAsset { fun: Fungible(UNITS), id: Concrete(TokenLocation::get()) }, + )); + pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; + // AssetHubRococo trusts AssetHubWestend as reserve for WNDs + pub TrustedReserve: Option<(MultiLocation, MultiAsset)> = Some( + ( + xcm_config::bridging::to_westend::AssetHubWestend::get(), + MultiAsset::from((xcm_config::bridging::to_westend::WndLocation::get(), 1000000000000 as u128)) + ) + ); + } + + impl pallet_xcm_benchmarks::fungible::Config for Runtime { + type TransactAsset = Balances; + + type CheckedAccount = CheckedAccount; + type TrustedTeleporter = TrustedTeleporter; + type TrustedReserve = TrustedReserve; + + fn get_multi_asset() -> MultiAsset { + MultiAsset { + id: Concrete(TokenLocation::get()), + fun: Fungible(UNITS), + } + } + } + + impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; + type RuntimeCall = RuntimeCall; + + fn worst_case_response() -> (u64, Response) { + (0u64, Response::Version(Default::default())) + } + + fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { + match xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() { + Some(alias) => Ok(alias), + None => Err(BenchmarkError::Skip) + } + } + + fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { + Ok((TokenLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + } + + fn subscribe_origin() -> Result { + Ok(TokenLocation::get()) + } + + fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { + let origin = TokenLocation::get(); + let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); + let ticket = MultiLocation { parents: 0, interior: Here }; + Ok((origin, ticket, assets)) + } + + fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn export_message_origin_and_destination( + ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { + Err(BenchmarkError::Skip) + } + } + + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + type Local = pallet_assets::Pallet::; + type Foreign = pallet_assets::Pallet::; + type Pool = pallet_assets::Pallet::; + + type ToWestend = XcmBridgeHubRouterBench; + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + //TODO: use from relay_well_known_keys::ACTIVE_CONFIG + hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} + +#[cfg(feature = "state-trie-version-1")] +parameter_types! { + // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) + pub const MigrationSignedDepositPerItem: Balance = CENTS; + pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; + pub const MigrationMaxKeyLen: u32 = 512; +} + +#[cfg(feature = "state-trie-version-1")] +impl pallet_state_trie_migration::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type SignedDepositPerItem = MigrationSignedDepositPerItem; + type SignedDepositBase = MigrationSignedDepositBase; + // An origin that can control the whole pallet: should be Root, or a part of your council. + type ControlOrigin = frame_system::EnsureSignedBy; + // specific account for the migration, can trigger the signed migrations. + type SignedFilter = frame_system::EnsureSignedBy; + + // Replace this with weight based on your runtime. + type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; + + type MaxKeyLen = MigrationMaxKeyLen; +} + +#[cfg(feature = "state-trie-version-1")] +frame_support::ord_parameter_types! { + pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); + pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); +} + +#[cfg(feature = "state-trie-version-1")] +#[test] +fn ensure_key_ss58() { + use frame_support::traits::SortedMembers; + use sp_core::crypto::Ss58Codec; + let acc = + AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); + //panic!("{:x?}", acc); + assert_eq!(acc, MigController::sorted_members()[0]); + let acc = + AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); + assert_eq!(acc, RootMigController::sorted_members()[0]); + //panic!("{:x?}", acc); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{CENTS, MILLICENTS}; + use parachains_common::rococo::fee; + use sp_runtime::traits::Zero; + use sp_weights::WeightToFee; + + /// We can fit at least 1000 transfers in a block. + #[test] + fn sane_block_weight() { + use pallet_balances::WeightInfo; + let block = RuntimeBlockWeights::get().max_block; + let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; + let transfer = + base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); + + let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); + assert!(fit >= 1000, "{} should be at least 1000", fit); + } + + /// The fee for one transfer is at most 1 CENT. + #[test] + fn sane_transfer_fee() { + use pallet_balances::WeightInfo; + let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; + let transfer = + base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); + + let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); + assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); + } + + /// Weight is being charged for both dimensions. + #[test] + fn weight_charged_for_both_components() { + let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); + assert!(!fee.is_zero(), "Charges for ref time"); + + let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); + assert_eq!(fee, CENTS, "10kb maps to CENT"); + } + + /// Filling up a block by proof size is at most 30 times more expensive than ref time. + /// + /// This is just a sanity check. + #[test] + fn full_block_fee_ratio() { + let block = RuntimeBlockWeights::get().max_block; + let time_fee: Balance = + fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); + let proof_fee: Balance = + fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); + + let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); + assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); + let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); + assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/block_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/block_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7fdb2aae2a01ec06076de83d94817e540e205dd --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..c1e5c6a74293995b6e3702f19b8b3750c34b192f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemine-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemine/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_622_000 picoseconds. + Weight::from_parts(1_709_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 22_138 + .saturating_add(Weight::from_parts(23_923_169, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..70fc3617bce93e2fa9246a649cd3adf81c1cda23 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// bridge-hub-rococo-dev +// --output +// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(73_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/extrinsic_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a4adb968bb7195428ea00d59cd92dcd3b6eea5f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f993155c19c7fc521f00f00b4841a15064d15c8 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs @@ -0,0 +1,155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_106_000 picoseconds. + Weight::from_parts(1_884_213, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_528_000 picoseconds. + Weight::from_parts(27_081_927, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_730, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_882_000 picoseconds. + Weight::from_parts(4_149_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `119` + // Estimated: `1604` + // Minimum execution time: 103_389_161_000 picoseconds. + Weight::from_parts(106_870_091_000, 0) + .saturating_add(Weight::from_parts(0, 1604)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_236_000 picoseconds. + Weight::from_parts(2_302_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_045 + .saturating_add(Weight::from_parts(763_456, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_175_000 picoseconds. + Weight::from_parts(2_238_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_040 + .saturating_add(Weight::from_parts(571_397, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `84 + p * (69 ±0)` + // Estimated: `80 + p * (70 ±0)` + // Minimum execution time: 3_843_000 picoseconds. + Weight::from_parts(3_947_000, 0) + .saturating_add(Weight::from_parts(0, 80)) + // Standard Error: 2_188 + .saturating_add(Weight::from_parts(1_212_360, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..252cf2630f41c5ca7bbefe34a345742f8cff170e --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -0,0 +1,47 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod pallet_asset_conversion; +pub mod pallet_assets_foreign; +pub mod pallet_assets_local; +pub mod pallet_assets_pool; +pub mod pallet_balances; +pub mod pallet_collator_selection; +pub mod pallet_message_queue; +pub mod pallet_multisig; +pub mod pallet_nft_fractionalization; +pub mod pallet_nfts; +pub mod pallet_proxy; +pub mod pallet_session; +pub mod pallet_timestamp; +pub mod pallet_uniques; +pub mod pallet_utility; +pub mod pallet_xcm; +pub mod pallet_xcm_bridge_hub_router; +pub mod paritydb_weights; +pub mod rocksdb_weights; +pub mod xcm; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs new file mode 100644 index 0000000000000000000000000000000000000000..4514fbfa8763ad40a65ad604eadde797c6f6bf9b --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs @@ -0,0 +1,155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion +// --chain=asset-hub-rococo-dev +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:1) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) + /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn create_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `480` + // Estimated: `6196` + // Minimum execution time: 88_484_000 picoseconds. + Weight::from_parts(92_964_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn add_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1117` + // Estimated: `7404` + // Minimum execution time: 153_015_000 picoseconds. + Weight::from_parts(157_018_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn remove_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1106` + // Estimated: `7404` + // Minimum execution time: 141_726_000 picoseconds. + Weight::from_parts(147_865_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `ForeignAssets::Asset` (r:2 w:2) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:4 w:4) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn swap_exact_tokens_for_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1148` + // Estimated: `13818` + // Minimum execution time: 168_619_000 picoseconds. + Weight::from_parts(174_283_000, 0) + .saturating_add(Weight::from_parts(0, 13818)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:2 w:2) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:4 w:4) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn swap_tokens_for_exact_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1148` + // Estimated: `13818` + // Minimum execution time: 171_565_000 picoseconds. + Weight::from_parts(173_702_000, 0) + .saturating_add(Weight::from_parts(0, 13818)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs new file mode 100644 index 0000000000000000000000000000000000000000..5148edb0ee9e2c5f97c9c0afe830e0f87f2ac92f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs @@ -0,0 +1,534 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/cumulus/.git/.artifacts/bench.json +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `107` + // Estimated: `4273` + // Minimum execution time: 30_485_000 picoseconds. + Weight::from_parts(31_007_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `4273` + // Minimum execution time: 12_991_000 picoseconds. + Weight::from_parts(13_304_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 15_689_000 picoseconds. + Weight::from_parts(16_063_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: ForeignAssets Asset (r:1 w:1) + /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) + /// Storage: ForeignAssets Account (r:1001 w:1000) + /// Proof: ForeignAssets Account (max_values: None, max_size: Some(732), added: 3207, mode: MaxEncodedLen) + /// Storage: System Account (r:1000 w:1000) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `4273 + c * (3207 ±0)` + // Minimum execution time: 18_533_000 picoseconds. + Weight::from_parts(18_791_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 5_059 + .saturating_add(Weight::from_parts(12_049_659, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) + } + /// Storage: ForeignAssets Asset (r:1 w:1) + /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) + /// Storage: ForeignAssets Approvals (r:1001 w:1000) + /// Proof: ForeignAssets Approvals (max_values: None, max_size: Some(746), added: 3221, mode: MaxEncodedLen) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `413 + a * (86 ±0)` + // Estimated: `4273 + a * (3221 ±0)` + // Minimum execution time: 20_028_000 picoseconds. + Weight::from_parts(20_148_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 3_401 + .saturating_add(Weight::from_parts(13_897_319, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:0) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 15_949_000 picoseconds. + Weight::from_parts(16_241_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 27_156_000 picoseconds. + Weight::from_parts(28_182_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 33_503_000 picoseconds. + Weight::from_parts(33_860_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(45_856_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 39_913_000 picoseconds. + Weight::from_parts(40_791_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 45_337_000 picoseconds. + Weight::from_parts(45_980_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 19_012_000 picoseconds. + Weight::from_parts(19_326_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 18_656_000 picoseconds. + Weight::from_parts(19_205_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 15_440_000 picoseconds. + Weight::from_parts(15_825_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 15_465_000 picoseconds. + Weight::from_parts(15_769_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:0) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 16_579_000 picoseconds. + Weight::from_parts(16_931_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 15_138_000 picoseconds. + Weight::from_parts(15_435_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: ForeignAssets Asset (r:1 w:0) + /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) + /// Storage: ForeignAssets Metadata (r:1 w:1) + /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(_n: u32, _s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 29_846_000 picoseconds. + Weight::from_parts(31_607_649, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `4273` + // Minimum execution time: 30_582_000 picoseconds. + Weight::from_parts(31_008_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: ForeignAssets Asset (r:1 w:0) + /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) + /// Storage: ForeignAssets Metadata (r:1 w:1) + /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `81` + // Estimated: `4273` + // Minimum execution time: 14_186_000 picoseconds. + Weight::from_parts(14_717_332, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 517 + .saturating_add(Weight::from_parts(2_595, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `4273` + // Minimum execution time: 29_499_000 picoseconds. + Weight::from_parts(29_918_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 13_815_000 picoseconds. + Weight::from_parts(14_138_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 33_029_000 picoseconds. + Weight::from_parts(33_524_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `520` + // Estimated: `7404` + // Minimum execution time: 63_205_000 picoseconds. + Weight::from_parts(64_078_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `446` + // Estimated: `4273` + // Minimum execution time: 34_948_000 picoseconds. + Weight::from_parts(35_484_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `446` + // Estimated: `4273` + // Minimum execution time: 35_722_000 picoseconds. + Weight::from_parts(36_266_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 15_855_000 picoseconds. + Weight::from_parts(16_182_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `4273` + // Minimum execution time: 34_984_000 picoseconds. + Weight::from_parts(35_512_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 33_041_000 picoseconds. + Weight::from_parts(34_124_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `471` + // Estimated: `4273` + // Minimum execution time: 31_728_000 picoseconds. + Weight::from_parts(32_012_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `401` + // Estimated: `4273` + // Minimum execution time: 29_432_000 picoseconds. + Weight::from_parts(29_968_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 18_827_000 picoseconds. + Weight::from_parts(19_172_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ee235830aed23f9b6c6743a90aa5095bf6f9ed9 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs @@ -0,0 +1,531 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3675` + // Minimum execution time: 26_510_000 picoseconds. + Weight::from_parts(27_332_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3675` + // Minimum execution time: 10_899_000 picoseconds. + Weight::from_parts(11_395_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 13_593_000 picoseconds. + Weight::from_parts(14_108_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2609 ±0)` + // Minimum execution time: 16_216_000 picoseconds. + Weight::from_parts(16_636_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 9_346 + .saturating_add(Weight::from_parts(15_306_152, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `414 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 16_745_000 picoseconds. + Weight::from_parts(17_247_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(Weight::from_parts(15_634_963, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 13_650_000 picoseconds. + Weight::from_parts(14_721_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 24_121_000 picoseconds. + Weight::from_parts(25_023_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 31_414_000 picoseconds. + Weight::from_parts(32_235_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 43_114_000 picoseconds. + Weight::from_parts(44_106_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 37_954_000 picoseconds. + Weight::from_parts(38_772_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 43_051_000 picoseconds. + Weight::from_parts(44_003_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 17_048_000 picoseconds. + Weight::from_parts(17_614_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_705_000 picoseconds. + Weight::from_parts(17_581_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 13_284_000 picoseconds. + Weight::from_parts(13_735_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 13_030_000 picoseconds. + Weight::from_parts(13_417_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 14_174_000 picoseconds. + Weight::from_parts(14_660_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 12_737_000 picoseconds. + Weight::from_parts(13_172_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 27_707_000 picoseconds. + Weight::from_parts(29_036_880, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 688 + .saturating_add(Weight::from_parts(2_426, 0).saturating_mul(n.into())) + // Standard Error: 688 + .saturating_add(Weight::from_parts(776, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `3675` + // Minimum execution time: 28_514_000 picoseconds. + Weight::from_parts(29_216_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3675` + // Minimum execution time: 12_452_000 picoseconds. + Weight::from_parts(13_095_356, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 275 + .saturating_add(Weight::from_parts(826, 0).saturating_mul(n.into())) + // Standard Error: 275 + .saturating_add(Weight::from_parts(808, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `3675` + // Minimum execution time: 28_181_000 picoseconds. + Weight::from_parts(29_050_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 12_253_000 picoseconds. + Weight::from_parts(12_545_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 31_084_000 picoseconds. + Weight::from_parts(32_052_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `521` + // Estimated: `6208` + // Minimum execution time: 61_756_000 picoseconds. + Weight::from_parts(62_740_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `447` + // Estimated: `3675` + // Minimum execution time: 33_370_000 picoseconds. + Weight::from_parts(34_127_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `447` + // Estimated: `3675` + // Minimum execution time: 33_753_000 picoseconds. + Weight::from_parts(34_613_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 13_508_000 picoseconds. + Weight::from_parts(13_997_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `3675` + // Minimum execution time: 32_578_000 picoseconds. + Weight::from_parts(33_675_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 30_768_000 picoseconds. + Weight::from_parts(31_710_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `472` + // Estimated: `3675` + // Minimum execution time: 30_028_000 picoseconds. + Weight::from_parts(30_793_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `402` + // Estimated: `3675` + // Minimum execution time: 28_354_000 picoseconds. + Weight::from_parts(29_097_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_607_000 picoseconds. + Weight::from_parts(17_433_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs new file mode 100644 index 0000000000000000000000000000000000000000..df7ad2c633867aaf5fea7c737ed926cccb5e2c80 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs @@ -0,0 +1,531 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json +// --pallet=pallet_assets +// --chain=asset-hub-rococo-dev +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3675` + // Minimum execution time: 11_591_000 picoseconds. + Weight::from_parts(11_901_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3675` + // Minimum execution time: 11_184_000 picoseconds. + Weight::from_parts(11_640_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 13_809_000 picoseconds. + Weight::from_parts(14_226_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1001 w:1000) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2609 ±0)` + // Minimum execution time: 16_439_000 picoseconds. + Weight::from_parts(16_743_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 4_792 + .saturating_add(Weight::from_parts(14_463_991, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1001 w:1000) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 17_218_000 picoseconds. + Weight::from_parts(17_585_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 2_056 + .saturating_add(Weight::from_parts(5_323_866, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:0) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 13_848_000 picoseconds. + Weight::from_parts(14_325_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 24_904_000 picoseconds. + Weight::from_parts(25_607_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 31_477_000 picoseconds. + Weight::from_parts(32_338_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 42_994_000 picoseconds. + Weight::from_parts(44_041_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 37_551_000 picoseconds. + Weight::from_parts(38_648_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 42_829_000 picoseconds. + Weight::from_parts(44_029_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 17_304_000 picoseconds. + Weight::from_parts(17_782_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 17_040_000 picoseconds. + Weight::from_parts(17_698_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 13_238_000 picoseconds. + Weight::from_parts(13_810_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 13_034_000 picoseconds. + Weight::from_parts(13_603_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:0) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 14_357_000 picoseconds. + Weight::from_parts(14_774_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 13_040_000 picoseconds. + Weight::from_parts(13_616_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 15_274_000 picoseconds. + Weight::from_parts(16_096_881, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 239 + .saturating_add(Weight::from_parts(1_631, 0).saturating_mul(n.into())) + // Standard Error: 239 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3675` + // Minimum execution time: 15_900_000 picoseconds. + Weight::from_parts(16_526_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119` + // Estimated: `3675` + // Minimum execution time: 13_391_000 picoseconds. + Weight::from_parts(14_047_176, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 172 + .saturating_add(Weight::from_parts(2_617, 0).saturating_mul(n.into())) + // Standard Error: 172 + .saturating_add(Weight::from_parts(2_081, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3675` + // Minimum execution time: 15_794_000 picoseconds. + Weight::from_parts(16_279_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 12_538_000 picoseconds. + Weight::from_parts(13_080_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 18_991_000 picoseconds. + Weight::from_parts(19_812_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `558` + // Estimated: `6208` + // Minimum execution time: 50_336_000 picoseconds. + Weight::from_parts(51_441_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `3675` + // Minimum execution time: 21_195_000 picoseconds. + Weight::from_parts(21_946_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `3675` + // Minimum execution time: 21_568_000 picoseconds. + Weight::from_parts(22_366_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 13_690_000 picoseconds. + Weight::from_parts(14_086_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 18_240_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 18_469_000 picoseconds. + Weight::from_parts(19_040_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `3675` + // Minimum execution time: 14_633_000 picoseconds. + Weight::from_parts(15_296_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `3675` + // Minimum execution time: 14_751_000 picoseconds. + Weight::from_parts(15_312_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 16_930_000 picoseconds. + Weight::from_parts(17_653_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..d373d0f8e651137360382a995db368db8eac1fed --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 55_040_000 picoseconds. + Weight::from_parts(56_106_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 41_342_000 picoseconds. + Weight::from_parts(41_890_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 14_723_000 picoseconds. + Weight::from_parts(15_182_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 22_073_000 picoseconds. + Weight::from_parts(22_638_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 57_265_000 picoseconds. + Weight::from_parts(58_222_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 51_485_000 picoseconds. + Weight::from_parts(52_003_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 17_460_000 picoseconds. + Weight::from_parts(17_849_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 17_259_000 picoseconds. + Weight::from_parts(17_478_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 16_756 + .saturating_add(Weight::from_parts(15_291_954, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000000000000000000000000000000..aeda7bbbb6a7e47d8d1e1f403c7419439f7f5953 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs @@ -0,0 +1,249 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `163 + b * (79 ±0)` + // Estimated: `1154 + b * (2555 ±0)` + // Minimum execution time: 15_408_000 picoseconds. + Weight::from_parts(13_068_592, 0) + .saturating_add(Weight::from_parts(0, 1154)) + // Standard Error: 7_395 + .saturating_add(Weight::from_parts(3_219_916, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `756 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 49_692_000 picoseconds. + Weight::from_parts(51_768_986, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 18_404 + .saturating_add(Weight::from_parts(55_676, 0).saturating_mul(b.into())) + // Standard Error: 3_488 + .saturating_add(Weight::from_parts(184_343, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 16_486_000 picoseconds. + Weight::from_parts(16_646_017, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 3_230 + .saturating_add(Weight::from_parts(148_941, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_806_000 picoseconds. + Weight::from_parts(8_002_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_937_000 picoseconds. + Weight::from_parts(8_161_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `736 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 42_805_000 picoseconds. + Weight::from_parts(45_979_502, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_336 + .saturating_add(Weight::from_parts(221_049, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 46_989_000 picoseconds. + Weight::from_parts(48_151_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 17_547_000 picoseconds. + Weight::from_parts(17_854_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 370_637 + .saturating_add(Weight::from_parts(15_798_857, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..45531ccfa797c52ead00833f765347f1282816c7 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemine-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemine/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 13_668_000 picoseconds. + Weight::from_parts(13_668_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_106_000 picoseconds. + Weight::from_parts(11_106_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_921_000 picoseconds. + Weight::from_parts(4_921_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_879_000 picoseconds. + Weight::from_parts(6_879_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_564_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 59_963_000 picoseconds. + Weight::from_parts(59_963_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 7_200_000 picoseconds. + Weight::from_parts(7_200_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 41_366_000 picoseconds. + Weight::from_parts(41_366_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 60_538_000 picoseconds. + Weight::from_parts(60_538_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 73_665_000 picoseconds. + Weight::from_parts(73_665_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf9c523f6571f48b37a300295d162960d82528bd --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs @@ -0,0 +1,165 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_714_000 picoseconds. + Weight::from_parts(14_440_231, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(598, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `262 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 44_768_000 picoseconds. + Weight::from_parts(33_662_218, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_633 + .saturating_add(Weight::from_parts(128_927, 0).saturating_mul(s.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_543, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 29_745_000 picoseconds. + Weight::from_parts(20_559_891, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 914 + .saturating_add(Weight::from_parts(103_601, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_504, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `385 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 51_506_000 picoseconds. + Weight::from_parts(36_510_777, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 2_183 + .saturating_add(Weight::from_parts(183_764, 0).saturating_mul(s.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_072_000 picoseconds. + Weight::from_parts(32_408_621, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 913 + .saturating_add(Weight::from_parts(121_410, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 18_301_000 picoseconds. + Weight::from_parts(18_223_547, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 747 + .saturating_add(Weight::from_parts(114_584, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 32_107_000 picoseconds. + Weight::from_parts(33_674_827, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_220 + .saturating_add(Weight::from_parts(122_011, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nft_fractionalization.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nft_fractionalization.rs new file mode 100644 index 0000000000000000000000000000000000000000..97cec5d82ecd0635e5032f50a887c6766c80d2a2 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nft_fractionalization.rs @@ -0,0 +1,115 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_nft_fractionalization` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_nft_fractionalization +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_nft_fractionalization`. +pub struct WeightInfo(PhantomData); +impl pallet_nft_fractionalization::WeightInfo for WeightInfo { + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + fn fractionalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `4326` + // Minimum execution time: 178_501_000 picoseconds. + Weight::from_parts(180_912_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn unify() -> Weight { + // Proof Size summary in bytes: + // Measured: `1275` + // Estimated: `4326` + // Minimum execution time: 125_253_000 picoseconds. + Weight::from_parts(128_238_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(10)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nfts.rs new file mode 100644 index 0000000000000000000000000000000000000000..277cfd1747bf68a745ee2cd021be05381554ac9a --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_nfts.rs @@ -0,0 +1,773 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_nfts` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_nfts +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_nfts`. +pub struct WeightInfo(PhantomData); +impl pallet_nfts::WeightInfo for WeightInfo { + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `179` + // Estimated: `3549` + // Minimum execution time: 39_124_000 picoseconds. + Weight::from_parts(39_975_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3549` + // Minimum execution time: 23_444_000 picoseconds. + Weight::from_parts(23_857_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// The range of component `m` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32204 + a * (366 ±0)` + // Estimated: `2523990 + a * (2954 ±0)` + // Minimum execution time: 1_224_365_000 picoseconds. + Weight::from_parts(1_281_136_346, 0) + .saturating_add(Weight::from_parts(0, 2523990)) + // Standard Error: 10_484 + .saturating_add(Weight::from_parts(6_910_740, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(1004)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1005)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `455` + // Estimated: `4326` + // Minimum execution time: 50_489_000 picoseconds. + Weight::from_parts(51_045_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn force_mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `455` + // Estimated: `4326` + // Minimum execution time: 49_146_000 picoseconds. + Weight::from_parts(49_756_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `564` + // Estimated: `4326` + // Minimum execution time: 56_059_000 picoseconds. + Weight::from_parts(57_162_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `593` + // Estimated: `4326` + // Minimum execution time: 42_406_000 picoseconds. + Weight::from_parts(43_187_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `763 + i * (108 ±0)` + // Estimated: `3549 + i * (3336 ±0)` + // Minimum execution time: 16_960_000 picoseconds. + Weight::from_parts(17_167_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + // Standard Error: 24_110 + .saturating_add(Weight::from_parts(18_046_970, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn lock_item_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 21_023_000 picoseconds. + Weight::from_parts(21_409_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn unlock_item_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 20_706_000 picoseconds. + Weight::from_parts(21_030_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn lock_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3549` + // Minimum execution time: 17_449_000 picoseconds. + Weight::from_parts(17_804_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3549` + // Minimum execution time: 22_958_000 picoseconds. + Weight::from_parts(23_499_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `369` + // Estimated: `6078` + // Minimum execution time: 40_105_000 picoseconds. + Weight::from_parts(40_800_000, 0) + .saturating_add(Weight::from_parts(0, 6078)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_collection_owner() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `3549` + // Minimum execution time: 17_832_000 picoseconds. + Weight::from_parts(18_297_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn force_collection_config() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3549` + // Minimum execution time: 15_027_000 picoseconds. + Weight::from_parts(15_370_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn lock_item_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 19_912_000 picoseconds. + Weight::from_parts(20_258_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + fn set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `539` + // Estimated: `3944` + // Minimum execution time: 50_138_000 picoseconds. + Weight::from_parts(50_971_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + fn force_set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `344` + // Estimated: `3944` + // Minimum execution time: 26_385_000 picoseconds. + Weight::from_parts(27_086_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn clear_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 45_687_000 picoseconds. + Weight::from_parts(47_107_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + fn approve_item_attributes() -> Weight { + // Proof Size summary in bytes: + // Measured: `381` + // Estimated: `4466` + // Minimum execution time: 18_065_000 picoseconds. + Weight::from_parts(18_371_000, 0) + .saturating_add(Weight::from_parts(0, 4466)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `760 + n * (398 ±0)` + // Estimated: `4466 + n * (2954 ±0)` + // Minimum execution time: 26_680_000 picoseconds. + Weight::from_parts(27_010_000, 0) + .saturating_add(Weight::from_parts(0, 4466)) + // Standard Error: 6_351 + .saturating_add(Weight::from_parts(6_584_290, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + fn set_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `539` + // Estimated: `3812` + // Minimum execution time: 42_038_000 picoseconds. + Weight::from_parts(42_758_000, 0) + .saturating_add(Weight::from_parts(0, 3812)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 40_220_000 picoseconds. + Weight::from_parts(41_026_000, 0) + .saturating_add(Weight::from_parts(0, 3812)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + fn set_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `398` + // Estimated: `3759` + // Minimum execution time: 38_135_000 picoseconds. + Weight::from_parts(38_561_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + fn clear_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 37_583_000 picoseconds. + Weight::from_parts(38_215_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `410` + // Estimated: `4326` + // Minimum execution time: 21_405_000 picoseconds. + Weight::from_parts(21_803_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `4326` + // Minimum execution time: 18_713_000 picoseconds. + Weight::from_parts(19_185_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn clear_all_transfer_approvals() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `4326` + // Minimum execution time: 17_803_000 picoseconds. + Weight::from_parts(18_270_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_accept_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3517` + // Minimum execution time: 15_982_000 picoseconds. + Weight::from_parts(16_700_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn set_collection_max_supply() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3549` + // Minimum execution time: 19_501_000 picoseconds. + Weight::from_parts(19_785_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn update_mint_settings() -> Weight { + // Proof Size summary in bytes: + // Measured: `323` + // Estimated: `3538` + // Minimum execution time: 18_914_000 picoseconds. + Weight::from_parts(19_292_000, 0) + .saturating_add(Weight::from_parts(0, 3538)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn set_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `518` + // Estimated: `4326` + // Minimum execution time: 24_625_000 picoseconds. + Weight::from_parts(25_257_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn buy_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `705` + // Estimated: `4326` + // Minimum execution time: 50_833_000 picoseconds. + Weight::from_parts(52_161_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_220_000 picoseconds. + Weight::from_parts(3_476_001, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7_084 + .saturating_add(Weight::from_parts(3_844_820, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn create_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `494` + // Estimated: `7662` + // Minimum execution time: 21_983_000 picoseconds. + Weight::from_parts(22_746_000, 0) + .saturating_add(Weight::from_parts(0, 7662)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn cancel_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `513` + // Estimated: `4326` + // Minimum execution time: 20_875_000 picoseconds. + Weight::from_parts(21_465_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn claim_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `7662` + // Minimum execution time: 84_771_000 picoseconds. + Weight::from_parts(86_078_000, 0) + .saturating_add(Weight::from_parts(0, 7662)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(10)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 10]`. + fn mint_pre_signed(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `558` + // Estimated: `6078 + n * (2954 ±0)` + // Minimum execution time: 143_265_000 picoseconds. + Weight::from_parts(150_978_773, 0) + .saturating_add(Weight::from_parts(0, 6078)) + // Standard Error: 49_443 + .saturating_add(Weight::from_parts(31_888_255, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 10]`. + fn set_attributes_pre_signed(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `4466 + n * (2954 ±0)` + // Minimum execution time: 83_754_000 picoseconds. + Weight::from_parts(96_685_026, 0) + .saturating_add(Weight::from_parts(0, 4466)) + // Standard Error: 72_592 + .saturating_add(Weight::from_parts(30_914_858, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..0e680011e79b512e123c19345eec8041c87763aa --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs @@ -0,0 +1,226 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_283_443, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_409 + .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_572_000 picoseconds. + Weight::from_parts(37_045_756, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_896 + .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) + // Standard Error: 2_993 + .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_066_000 picoseconds. + Weight::from_parts(24_711_403, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_626 + .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) + // Standard Error: 1_680 + .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_162_000 picoseconds. + Weight::from_parts(23_928_058, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_858_000 picoseconds. + Weight::from_parts(33_568_059, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_816 + .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) + // Standard Error: 1_876 + .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_235_199, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_363 + .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_186_000 picoseconds. + Weight::from_parts(26_823_133, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_259 + .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_156_000 picoseconds. + Weight::from_parts(23_304_060, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_009_062, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_978 + .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_281_000 picoseconds. + Weight::from_parts(24_392_989, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_943 + .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_session.rs new file mode 100644 index 0000000000000000000000000000000000000000..6cfa66a4bea61e13fb4f176327aacfff800697bb --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_session.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `270` + // Estimated: `3735` + // Minimum execution time: 16_932_000 picoseconds. + Weight::from_parts(17_357_000, 0) + .saturating_add(Weight::from_parts(0, 3735)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `3707` + // Minimum execution time: 12_157_000 picoseconds. + Weight::from_parts(12_770_000, 0) + .saturating_add(Weight::from_parts(0, 3707)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..38e1fbd822b2309ee92e57b5599adef0e79d414c --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_timestamp.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_775_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_322_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_uniques.rs new file mode 100644 index 0000000000000000000000000000000000000000..c4e220b7facb17d6b8a53a8af195f3d22f14d9ef --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_uniques.rs @@ -0,0 +1,467 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_uniques` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_uniques +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_uniques`. +pub struct WeightInfo(PhantomData); +impl pallet_uniques::WeightInfo for WeightInfo { + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3643` + // Minimum execution time: 28_845_000 picoseconds. + Weight::from_parts(29_675_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3643` + // Minimum execution time: 13_492_000 picoseconds. + Weight::from_parts(14_049_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1001 w:1000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1000 w:1000) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1000) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` + // Minimum execution time: 2_920_070_000 picoseconds. + Weight::from_parts(2_983_862_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + // Standard Error: 36_415 + .saturating_add(Weight::from_parts(7_589_778, 0).saturating_mul(n.into())) + // Standard Error: 36_415 + .saturating_add(Weight::from_parts(479_496, 0).saturating_mul(m.into())) + // Standard Error: 36_415 + .saturating_add(Weight::from_parts(562_056, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 35_329_000 picoseconds. + Weight::from_parts(36_019_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 36_474_000 picoseconds. + Weight::from_parts(37_190_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 26_786_000 picoseconds. + Weight::from_parts(27_400_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:5000 w:5000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `738 + i * (76 ±0)` + // Estimated: `3643 + i * (2597 ±0)` + // Minimum execution time: 14_546_000 picoseconds. + Weight::from_parts(14_831_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + // Standard Error: 24_362 + .saturating_add(Weight::from_parts(17_972_938, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 18_919_000 picoseconds. + Weight::from_parts(19_547_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 18_643_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn freeze_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_530_000 picoseconds. + Weight::from_parts(14_165_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn thaw_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_523_000 picoseconds. + Weight::from_parts(14_055_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:2) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `356` + // Estimated: `3643` + // Minimum execution time: 22_131_000 picoseconds. + Weight::from_parts(22_628_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_841_000 picoseconds. + Weight::from_parts(14_408_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_item_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 16_954_000 picoseconds. + Weight::from_parts(17_482_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + fn set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `559` + // Estimated: `3652` + // Minimum execution time: 38_493_000 picoseconds. + Weight::from_parts(39_513_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + fn clear_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `756` + // Estimated: `3652` + // Minimum execution time: 37_918_000 picoseconds. + Weight::from_parts(38_666_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + fn set_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `3652` + // Minimum execution time: 29_810_000 picoseconds. + Weight::from_parts(30_363_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `559` + // Estimated: `3652` + // Minimum execution time: 30_877_000 picoseconds. + Weight::from_parts(31_430_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + fn set_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 30_478_000 picoseconds. + Weight::from_parts(31_065_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + fn clear_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `473` + // Estimated: `3643` + // Minimum execution time: 29_582_000 picoseconds. + Weight::from_parts(30_160_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 19_328_000 picoseconds. + Weight::from_parts(19_866_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `3643` + // Minimum execution time: 19_131_000 picoseconds. + Weight::from_parts(19_569_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_accept_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3517` + // Minimum execution time: 15_212_000 picoseconds. + Weight::from_parts(15_691_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn set_collection_max_supply() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 16_290_000 picoseconds. + Weight::from_parts(16_654_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:0) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn set_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `259` + // Estimated: `3587` + // Minimum execution time: 16_095_000 picoseconds. + Weight::from_parts(16_555_000, 0) + .saturating_add(Weight::from_parts(0, 3587)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn buy_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `540` + // Estimated: `3643` + // Minimum execution time: 35_506_000 picoseconds. + Weight::from_parts(36_305_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..a82115b9d0937f2d224667383569596dbe8a236d --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs @@ -0,0 +1,102 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_103_000 picoseconds. + Weight::from_parts(7_226_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_732 + .saturating_add(Weight::from_parts(6_560_347, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_208_000 picoseconds. + Weight::from_parts(5_480_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_070_000 picoseconds. + Weight::from_parts(1_321_270, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 3_454 + .saturating_add(Weight::from_parts(6_864_640, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_255_000 picoseconds. + Weight::from_parts(9_683_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_852_000 picoseconds. + Weight::from_parts(7_007_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(6_562_902, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..afe85fdaf28820f52c7a134e66c6884d96dd6aef --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -0,0 +1,328 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_498_000 picoseconds. + Weight::from_parts(25_385_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 19_746_000 picoseconds. + Weight::from_parts(20_535_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 15_059_000 picoseconds. + Weight::from_parts(15_386_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_108_000 picoseconds. + Weight::from_parts(7_458_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_360_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_099_000 picoseconds. + Weight::from_parts(29_580_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_161_000 picoseconds. + Weight::from_parts(31_933_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_158_000 picoseconds. + Weight::from_parts(2_316_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_934_000 picoseconds. + Weight::from_parts(17_655_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 17_658_000 picoseconds. + Weight::from_parts(17_973_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_673_000 picoseconds. + Weight::from_parts(19_027_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_171_000 picoseconds. + Weight::from_parts(27_802_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_423_000 picoseconds. + Weight::from_parts(9_636_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 17_442_000 picoseconds. + Weight::from_parts(17_941_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_340_000 picoseconds. + Weight::from_parts(34_934_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 5_496_000 picoseconds. + Weight::from_parts(5_652_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_140_000 picoseconds. + Weight::from_parts(26_824_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs new file mode 100644 index 0000000000000000000000000000000000000000..7e12453583d49faebef8c0a3764d8c91aeae768c --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -0,0 +1,126 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_bridge_hub_router` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_bridge_hub_router +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_bridge_hub_router`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn on_initialize_when_non_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `154` + // Estimated: `1639` + // Minimum execution time: 7_924_000 picoseconds. + Weight::from_parts(8_199_000, 0) + .saturating_add(Weight::from_parts(0, 1639)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn on_initialize_when_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `1629` + // Minimum execution time: 4_265_000 picoseconds. + Weight::from_parts(4_417_000, 0) + .saturating_add(Weight::from_parts(0, 1629)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 10_292_000 picoseconds. + Weight::from_parts(10_797_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `3852` + // Minimum execution time: 61_995_000 picoseconds. + Weight::from_parts(65_137_000, 0) + .saturating_add(Weight::from_parts(0, 3852)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/paritydb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..25679703831a13b8d1bb7fb7dd4d92fa84b1f255 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/rocksdb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2fbbd61654becd057c7a10b42c8fe22f8cbca310 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -0,0 +1,244 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +mod pallet_xcm_benchmarks_fungible; +mod pallet_xcm_benchmarks_generic; + +use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use frame_support::weights::Weight; +use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; +use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_std::prelude::*; +use xcm::{latest::prelude::*, DoubleEncoded}; + +trait WeighMultiAssets { + fn weigh_multi_assets(&self, weight: Weight) -> Weight; +} + +const MAX_ASSETS: u64 = 100; + +impl WeighMultiAssets for MultiAssetFilter { + fn weigh_multi_assets(&self, weight: Weight) -> Weight { + match self { + Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), + Self::Wild(asset) => match asset { + All => weight.saturating_mul(MAX_ASSETS), + AllOf { fun, .. } => match fun { + WildFungibility::Fungible => weight, + // Magic number 2 has to do with the fact that we could have up to 2 times + // MaxAssetsIntoHolding in the worst-case scenario. + WildFungibility::NonFungible => + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), + }, + AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + }, + } + } +} + +impl WeighMultiAssets for MultiAssets { + fn weigh_multi_assets(&self, weight: Weight) -> Weight { + weight.saturating_mul(self.inner().iter().count() as u64) + } +} + +pub struct AssetHubRococoXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for AssetHubRococoXcmWeight { + fn withdraw_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) + } + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) + } + fn receive_teleported_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) + } + fn query_response( + _query_id: &u64, + _response: &Response, + _max_weight: &Weight, + _querier: &Option, + ) -> Weight { + XcmGeneric::::query_response() + } + fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) + } + fn transfer_reserve_asset( + assets: &MultiAssets, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) + } + fn transact( + _origin_type: &OriginKind, + _require_weight_at_most: &Weight, + _call: &DoubleEncoded, + ) -> Weight { + XcmGeneric::::transact() + } + fn hrmp_new_channel_open_request( + _sender: &u32, + _max_message_size: &u32, + _max_capacity: &u32, + ) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_accepted(_recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn clear_origin() -> Weight { + XcmGeneric::::clear_origin() + } + fn descend_origin(_who: &InteriorMultiLocation) -> Weight { + XcmGeneric::::descend_origin() + } + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_error() + } + fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) + } + fn deposit_reserve_asset( + assets: &MultiAssetFilter, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) + } + fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { + Weight::MAX + } + fn initiate_reserve_withdraw( + assets: &MultiAssetFilter, + _reserve: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) + } + fn initiate_teleport( + assets: &MultiAssetFilter, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) + } + fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { + XcmGeneric::::report_holding() + } + fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { + XcmGeneric::::buy_execution() + } + fn refund_surplus() -> Weight { + XcmGeneric::::refund_surplus() + } + fn set_error_handler(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_error_handler() + } + fn set_appendix(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_appendix() + } + fn clear_error() -> Weight { + XcmGeneric::::clear_error() + } + fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { + XcmGeneric::::claim_asset() + } + fn trap(_code: &u64) -> Weight { + XcmGeneric::::trap() + } + fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { + XcmGeneric::::subscribe_version() + } + fn unsubscribe_version() -> Weight { + XcmGeneric::::unsubscribe_version() + } + fn burn_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmGeneric::::burn_asset()) + } + fn expect_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmGeneric::::expect_asset()) + } + fn expect_origin(_origin: &Option) -> Weight { + XcmGeneric::::expect_origin() + } + fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { + XcmGeneric::::expect_error() + } + fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { + XcmGeneric::::expect_transact_status() + } + fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::query_pallet() + } + fn expect_pallet( + _index: &u32, + _name: &Vec, + _module_name: &Vec, + _crate_major: &u32, + _min_crate_minor: &u32, + ) -> Weight { + XcmGeneric::::expect_pallet() + } + fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_transact_status() + } + fn clear_transact_status() -> Weight { + XcmGeneric::::clear_transact_status() + } + fn universal_origin(_: &Junction) -> Weight { + XcmGeneric::::universal_origin() + } + fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { + Weight::MAX + } + fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn set_fees_mode(_: &bool) -> Weight { + XcmGeneric::::set_fees_mode() + } + fn set_topic(_topic: &[u8; 32]) -> Weight { + XcmGeneric::::set_topic() + } + fn clear_topic() -> Weight { + XcmGeneric::::clear_topic() + } + fn alias_origin(_: &MultiLocation) -> Weight { + // XCM Executor does not currently support alias origin operations + Weight::MAX + } + fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { + XcmGeneric::::unpaid_execution() + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000000000000000000000000000000..7fab35842509deceba14e89e5bbf6bebe2240528 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,201 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 21_643_000 picoseconds. + Weight::from_parts(22_410_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 43_758_000 picoseconds. + Weight::from_parts(44_654_000, 6196) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `System::Account` (r:3 w:3) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `8799` + // Minimum execution time: 87_978_000 picoseconds. + Weight::from_parts(88_517_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 6_883_000 picoseconds. + Weight::from_parts(6_979_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 198_882_000 picoseconds. + Weight::from_parts(199_930_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_343_000 picoseconds. + Weight::from_parts(3_487_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 19_399_000 picoseconds. + Weight::from_parts(19_659_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_543_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(47_041_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000000000000000000000000000000..4454494badcbfe9b4f429312e24b63786b83ef75 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,355 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 440_298_000 picoseconds. + Weight::from_parts(446_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_313_000 picoseconds. + Weight::from_parts(3_422_000, 0) + } + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3568` + // Minimum execution time: 9_691_000 picoseconds. + Weight::from_parts(9_948_000, 3568) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(11_085_000, 0) + } + pub fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_438_000 picoseconds. + Weight::from_parts(3_577_000, 0) + } + pub fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_243_000, 0) + } + pub fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_207_000, 0) + } + pub fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_193_000, 0) + } + pub fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_999_000 picoseconds. + Weight::from_parts(3_056_000, 0) + } + pub fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_091_000 picoseconds. + Weight::from_parts(2_176_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 55_728_000 picoseconds. + Weight::from_parts(56_704_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 12_839_000 picoseconds. + Weight::from_parts(13_457_000, 3625) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_116_000 picoseconds. + Weight::from_parts(2_219_000, 0) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_891_000 picoseconds. + Weight::from_parts(25_583_000, 3610) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(4_122_000, 0) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 136_220_000 picoseconds. + Weight::from_parts(137_194_000, 0) + } + pub fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 12_343_000 picoseconds. + Weight::from_parts(12_635_000, 0) + } + pub fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_237_000 picoseconds. + Weight::from_parts(2_315_000, 0) + } + pub fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_094_000 picoseconds. + Weight::from_parts(2_231_000, 0) + } + pub fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_379_000 picoseconds. + Weight::from_parts(2_455_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_964_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_500_000 picoseconds. + Weight::from_parts(5_720_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 55_767_000 picoseconds. + Weight::from_parts(56_790_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_201_000 picoseconds. + Weight::from_parts(2_291_000, 0) + } + pub fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_241_000, 0) + } + pub fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_127_000 picoseconds. + Weight::from_parts(2_236_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn universal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 4_275_000 picoseconds. + Weight::from_parts(4_381_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_216_000, 0) + } + pub fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_265_000 picoseconds. + Weight::from_parts(2_332_000, 0) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..4da0a2500a541907f1126b776e3f73182062b306 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -0,0 +1,856 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, + FeeAssetId, ForeignAssets, ForeignAssetsInstance, ParachainInfo, ParachainSystem, PolkadotXcm, + PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ToWestendXcmRouter, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, +}; +use assets_common::{ + local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, + matching::{FromSiblingParachain, IsForeignConcreteAsset}, +}; +use frame_support::{ + match_types, parameter_types, + traits::{ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess}, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ + AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem, + RelayOrOtherSystemParachains, + }, + TREASURY_PALLET_ID, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use rococo_runtime_constants::system_parachain; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, + IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, + StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, +}; +use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; + +#[cfg(feature = "runtime-benchmarks")] +use cumulus_primitives_core::ParaId; + +parameter_types! { + pub const TokenLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Rococo; + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); + pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); + pub TrustBackedAssetsPalletLocation: MultiLocation = + PalletInstance(::index() as u8).into(); + pub ForeignAssetsPalletLocation: MultiLocation = + PalletInstance(::index() as u8).into(); + pub PoolAssetsPalletLocation: MultiLocation = + PalletInstance(::index() as u8).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, + // Different global consensus parachain sovereign account. + // (Used for over-bridge transfers and reserve processing) + GlobalConsensusParachainConvertsFor, +); + +/// Means for transacting the native currency on this chain. +pub type CurrencyTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. +pub type TrustBackedAssetsConvertedConcreteId = + assets_common::TrustBackedAssetsConvertedConcreteId; + +/// Means for transacting assets besides the native currency on this chain. +pub type FungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + Assets, + // Use this currency when it is a fungible asset matching the given location or name: + TrustBackedAssetsConvertedConcreteId, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We only want to allow teleports of known assets. We use non-zero issuance as an indication + // that this asset is known. + LocalMint>, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// `AssetId`/`Balance` converter for `ForeignAssets`. +pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< + ( + // Ignore `TrustBackedAssets` explicitly + StartsWith, + // Ignore assets that start explicitly with our `GlobalConsensus(NetworkId)`, means: + // - foreign assets from our consensus should be: `MultiLocation {parents: 1, + // X*(Parachain(xyz), ..)}` + // - foreign assets outside our consensus with the same `GlobalConsensus(NetworkId)` won't + // be accepted here + StartsWithExplicitGlobalConsensus, + ), + Balance, +>; + +/// Means for transacting foreign assets from different global consensus. +pub type ForeignFungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + ForeignAssets, + // Use this currency when it is a fungible asset matching the given location or name: + ForeignAssetsConvertedConcreteId, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We dont need to check teleports here. + NoChecking, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// `AssetId`/`Balance` converter for `PoolAssets`. +pub type PoolAssetsConvertedConcreteId = + assets_common::PoolAssetsConvertedConcreteId; + +/// Means for transacting asset conversion pool assets on this chain. +pub type PoolFungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + PoolAssets, + // Use this currency when it is a fungible asset matching the given location or name: + PoolAssetsConvertedConcreteId, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We only want to allow teleports of known assets. We use non-zero issuance as an indication + // that this asset is known. + LocalMint>, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// Means for transacting assets on this chain. +pub type AssetTransactors = + (CurrencyTransactor, FungiblesTransactor, ForeignFungiblesTransactor, PoolFungiblesTransactor); + +/// Simple `MultiLocation` matcher for Local and Foreign asset `MultiLocation`. +pub struct LocalAndForeignAssetsMultiLocationMatcher; +impl MatchesLocalAndForeignAssetsMultiLocation for LocalAndForeignAssetsMultiLocationMatcher { + fn is_local(location: &MultiLocation) -> bool { + use assets_common::fungible_conversion::MatchesMultiLocation; + TrustBackedAssetsConvertedConcreteId::contains(location) + } + fn is_foreign(location: &MultiLocation) -> bool { + use assets_common::fungible_conversion::MatchesMultiLocation; + ForeignAssetsConvertedConcreteId::contains(location) + } +} +impl Contains for LocalAndForeignAssetsMultiLocationMatcher { + fn contains(location: &MultiLocation) -> bool { + Self::is_local(location) || Self::is_foreign(location) + } +} + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub XcmAssetFeesReceiver: Option = Authorship::author(); +} + +match_types! { + pub type ParentOrParentsPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { .. }) } + }; + pub type ParentOrSiblings: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(_) } + }; +} + +/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly +/// account for proof size weights. +/// +/// Calls that are allowed through this filter must: +/// 1. Have a fixed weight; +/// 2. Cannot lead to another call being made; +/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. +pub struct SafeCallFilter; +impl Contains for SafeCallFilter { + fn contains(call: &RuntimeCall) -> bool { + #[cfg(feature = "runtime-benchmarks")] + { + if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { + return true + } + } + + // Allow to change dedicated storage items (called by governance-like) + match call { + RuntimeCall::System(frame_system::Call::set_storage { items }) + if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || + items + .iter() + .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => + return true, + _ => (), + }; + + matches!( + call, + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::XcmpQueue(..) | + RuntimeCall::MessageQueue(..) | + RuntimeCall::Assets( + pallet_assets::Call::create { .. } | + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::start_destroy { .. } | + pallet_assets::Call::destroy_accounts { .. } | + pallet_assets::Call::destroy_approvals { .. } | + pallet_assets::Call::finish_destroy { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::mint { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, + ) | RuntimeCall::ForeignAssets( + pallet_assets::Call::create { .. } | + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::start_destroy { .. } | + pallet_assets::Call::destroy_accounts { .. } | + pallet_assets::Call::destroy_approvals { .. } | + pallet_assets::Call::finish_destroy { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::mint { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, + ) | RuntimeCall::PoolAssets( + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, + ) | RuntimeCall::AssetConversion( + pallet_asset_conversion::Call::create_pool { .. } | + pallet_asset_conversion::Call::add_liquidity { .. } | + pallet_asset_conversion::Call::remove_liquidity { .. } | + pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | + pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, + ) | RuntimeCall::NftFractionalization( + pallet_nft_fractionalization::Call::fractionalize { .. } | + pallet_nft_fractionalization::Call::unify { .. }, + ) | RuntimeCall::Nfts( + pallet_nfts::Call::create { .. } | + pallet_nfts::Call::force_create { .. } | + pallet_nfts::Call::destroy { .. } | + pallet_nfts::Call::mint { .. } | + pallet_nfts::Call::force_mint { .. } | + pallet_nfts::Call::burn { .. } | + pallet_nfts::Call::transfer { .. } | + pallet_nfts::Call::lock_item_transfer { .. } | + pallet_nfts::Call::unlock_item_transfer { .. } | + pallet_nfts::Call::lock_collection { .. } | + pallet_nfts::Call::transfer_ownership { .. } | + pallet_nfts::Call::set_team { .. } | + pallet_nfts::Call::force_collection_owner { .. } | + pallet_nfts::Call::force_collection_config { .. } | + pallet_nfts::Call::approve_transfer { .. } | + pallet_nfts::Call::cancel_approval { .. } | + pallet_nfts::Call::clear_all_transfer_approvals { .. } | + pallet_nfts::Call::lock_item_properties { .. } | + pallet_nfts::Call::set_attribute { .. } | + pallet_nfts::Call::force_set_attribute { .. } | + pallet_nfts::Call::clear_attribute { .. } | + pallet_nfts::Call::approve_item_attributes { .. } | + pallet_nfts::Call::cancel_item_attributes_approval { .. } | + pallet_nfts::Call::set_metadata { .. } | + pallet_nfts::Call::clear_metadata { .. } | + pallet_nfts::Call::set_collection_metadata { .. } | + pallet_nfts::Call::clear_collection_metadata { .. } | + pallet_nfts::Call::set_accept_ownership { .. } | + pallet_nfts::Call::set_collection_max_supply { .. } | + pallet_nfts::Call::update_mint_settings { .. } | + pallet_nfts::Call::set_price { .. } | + pallet_nfts::Call::buy_item { .. } | + pallet_nfts::Call::pay_tips { .. } | + pallet_nfts::Call::create_swap { .. } | + pallet_nfts::Call::cancel_swap { .. } | + pallet_nfts::Call::claim_swap { .. }, + ) | RuntimeCall::Uniques( + pallet_uniques::Call::create { .. } | + pallet_uniques::Call::force_create { .. } | + pallet_uniques::Call::destroy { .. } | + pallet_uniques::Call::mint { .. } | + pallet_uniques::Call::burn { .. } | + pallet_uniques::Call::transfer { .. } | + pallet_uniques::Call::freeze { .. } | + pallet_uniques::Call::thaw { .. } | + pallet_uniques::Call::freeze_collection { .. } | + pallet_uniques::Call::thaw_collection { .. } | + pallet_uniques::Call::transfer_ownership { .. } | + pallet_uniques::Call::set_team { .. } | + pallet_uniques::Call::approve_transfer { .. } | + pallet_uniques::Call::cancel_approval { .. } | + pallet_uniques::Call::force_item_status { .. } | + pallet_uniques::Call::set_attribute { .. } | + pallet_uniques::Call::clear_attribute { .. } | + pallet_uniques::Call::set_metadata { .. } | + pallet_uniques::Call::clear_metadata { .. } | + pallet_uniques::Call::set_collection_metadata { .. } | + pallet_uniques::Call::clear_collection_metadata { .. } | + pallet_uniques::Call::set_accept_ownership { .. } | + pallet_uniques::Call::set_collection_max_supply { .. } | + pallet_uniques::Call::set_price { .. } | + pallet_uniques::Call::buy_item { .. } + ) | RuntimeCall::ToWestendXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } + ) + ) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + // Allow XCMs with some computed origins to pass through. + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent, its pluralities (i.e. governance bodies), relay treasury pallet and + // BridgeHub get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + Equals, + )>, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +/// Multiplier used for dedicated `TakeFirstAssetTrader` with `Assets` instance. +pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentialDepositMultiplier< + Runtime, + WeightToFee, + pallet_assets::BalanceToAssetBalance, + TrustBackedAssetsInstance, +>; + +/// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. +pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = + AssetFeeAsExistentialDepositMultiplier< + Runtime, + WeightToFee, + pallet_assets::BalanceToAssetBalance, + ForeignAssetsInstance, + >; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::CONTRACTS_ID | + system_parachain::ENCOINTER_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// +/// - ROC with the parent Relay Chain and sibling system parachains; and +/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). +pub type TrustedTeleporters = ( + ConcreteAssetFromSystem, + IsForeignConcreteAsset>>, +); + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus + // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being + // held). Asset Hub may _act_ as a reserve location for ROC and assets created + // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). + type IsReserve = (bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset,); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = WeightInfoBounds< + crate::weights::xcm::AssetHubRococoXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type Trader = ( + UsingComponents>, + // This trader allows to pay with `is_sufficient=true` "Trust Backed" assets from dedicated + // `pallet_assets` instance - `Assets`. + cumulus_primitives_utility::TakeFirstAssetTrader< + AccountId, + AssetFeeAsExistentialDepositMultiplierFeeCharger, + TrustBackedAssetsConvertedConcreteId, + Assets, + cumulus_primitives_utility::XcmFeesTo32ByteAccount< + FungiblesTransactor, + AccountId, + XcmAssetFeesReceiver, + >, + >, + // This trader allows to pay with `is_sufficient=true` "Foreign" assets from dedicated + // `pallet_assets` instance - `ForeignAssets`. + cumulus_primitives_utility::TakeFirstAssetTrader< + AccountId, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, + ForeignAssetsConvertedConcreteId, + ForeignAssets, + cumulus_primitives_utility::XcmFeesTo32ByteAccount< + ForeignFungiblesTransactor, + AccountId, + XcmAssetFeesReceiver, + >, + >, + ); + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = (bridging::to_westend::UniversalAliases,); + type CallDispatcher = WithOriginFilter; + type SafeCallFilter = SafeCallFilter; + type Aliasers = Nothing; +} + +/// Converts a local signed origin into an XCM multilocation. +/// Forms the basis for local origins sending/executing XCMs. +pub type LocalOriginToLocation = SignedToAccountId32; + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// For routing XCM messages which do not cross local consensus boundary. +type LocalXcmRouter = ( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +); + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + LocalXcmRouter, + // Router which wraps and sends xcm to BridgeHub to be delivered to the Westend + // GlobalConsensus + ToWestendXcmRouter, +)>; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // We want to disallow users sending (arbitrary) XCMs from this chain. + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + // We support local origins dispatching XCM executions in principle... + type ExecuteXcmOrigin = EnsureXcmOrigin; + // ... but disallow generic XCM execution. As a result only teleports and reserve transfers are + // allowed. + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = WeightInfoBounds< + crate::weights::xcm::AssetHubRococoXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type ForeignCreatorsSovereignAccountOf = ( + SiblingParachainConvertsVia, + AccountId32Aliases, + ParentIsPreset, +); + +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct BenchmarkMultiLocationConverter { + _phantom: sp_std::marker::PhantomData, +} + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion::BenchmarkHelper> + for BenchmarkMultiLocationConverter +where + SelfParaId: frame_support::traits::Get, +{ + fn asset_id(asset_id: u32) -> MultiLocation { + MultiLocation { + parents: 1, + interior: X3( + Parachain(SelfParaId::get().into()), + PalletInstance(::index() as u8), + GeneralIndex(asset_id.into()), + ), + } + } + fn multiasset_id(asset_id: u32) -> sp_std::boxed::Box { + sp_std::boxed::Box::new(Self::asset_id(asset_id)) + } +} + +/// All configuration related to bridging +pub mod bridging { + use super::*; + use assets_common::matching; + use sp_std::collections::btree_set::BTreeSet; + + // common/shared parameters + parameter_types! { + /// Base price of every byte of the Rococo -> Westend message. Can be adjusted via + /// governance `set_storage` call. + /// + /// Default value is our estimation of the: + /// + /// 1) an approximate cost of XCM execution (`ExportMessage` and surroundings) at Rococo bridge hub; + /// + /// 2) the approximate cost of Rococo -> Westend message delivery transaction on Westend Bridge Hub, + /// converted into ROCs using 1:1 conversion rate; + /// + /// 3) the approximate cost of Rococo -> Westend message confirmation transaction on Rococo Bridge Hub. + pub storage XcmBridgeHubRouterBaseFee: Balance = + bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get() + .saturating_add(bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds::get()) + .saturating_add(bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get()); + /// Price of every byte of the Rococo -> Westend message. Can be adjusted via + /// governance `set_storage` call. + pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); + + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID; + pub SiblingBridgeHub: MultiLocation = MultiLocation::new(1, X1(Parachain(SiblingBridgeHubParaId::get()))); + /// Router expects payment with this `AssetId`. + /// (`AssetId` has to be aligned with `BridgeTable`) + pub XcmBridgeHubRouterFeeAssetId: AssetId = TokenLocation::get().into(); + + pub BridgeTable: sp_std::vec::Vec = + sp_std::vec::Vec::new().into_iter() + .chain(to_westend::BridgeTable::get()) + .collect(); + } + + pub type NetworkExportTable = xcm_builder::NetworkExportTable; + + pub mod to_westend { + use super::*; + + parameter_types! { + pub SiblingBridgeHubWithBridgeHubWestendInstance: MultiLocation = MultiLocation::new( + 1, + X2( + Parachain(SiblingBridgeHubParaId::get()), + PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX) + ) + ); + + pub const WestendNetwork: NetworkId = NetworkId::Westend; + pub AssetHubWestend: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(WestendNetwork::get()), Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID))); + pub WndLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(WestendNetwork::get()))); + + pub WndFromAssetHubWestend: (MultiAssetFilter, MultiLocation) = ( + Wild(AllOf { fun: WildFungible, id: Concrete(WndLocation::get()) }), + AssetHubWestend::get() + ); + + /// Set up exporters configuration. + /// `Option` represents static "base fee" which is used for total delivery fee calculation. + pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + NetworkExportTableItem::new( + WestendNetwork::get(), + Some(sp_std::vec![ + AssetHubWestend::get().interior.split_global().expect("invalid configuration for AssetHubWestend").1, + ]), + SiblingBridgeHub::get(), + // base delivery fee to local `BridgeHub` + Some(( + XcmBridgeHubRouterFeeAssetId::get(), + XcmBridgeHubRouterBaseFee::get(), + ).into()) + ) + ]; + + /// Universal aliases + pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( + sp_std::vec![ + (SiblingBridgeHubWithBridgeHubWestendInstance::get(), GlobalConsensus(WestendNetwork::get())) + ] + ); + } + + impl Contains<(MultiLocation, Junction)> for UniversalAliases { + fn contains(alias: &(MultiLocation, Junction)) -> bool { + UniversalAliases::get().contains(alias) + } + } + + /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. + /// Locations from which the runtime accepts reserved assets. + pub type IsTrustedBridgedReserveLocationForConcreteAsset = + matching::IsTrustedBridgedReserveLocationForConcreteAsset< + UniversalLocation, + ( + // allow receive WND from AssetHubWestend + xcm_builder::Case, + // and nothing else + ), + >; + + impl Contains for ToWestendXcmRouter { + fn contains(call: &RuntimeCall) -> bool { + matches!( + call, + RuntimeCall::ToWestendXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } + ) + ) + } + } + } + + /// Benchmarks helper for bridging configuration. + #[cfg(feature = "runtime-benchmarks")] + pub struct BridgingBenchmarksHelper; + + #[cfg(feature = "runtime-benchmarks")] + impl BridgingBenchmarksHelper { + pub fn prepare_universal_alias() -> Option<(MultiLocation, Junction)> { + let alias = + to_westend::UniversalAliases::get() + .into_iter() + .find_map(|(location, junction)| { + match to_westend::SiblingBridgeHubWithBridgeHubWestendInstance::get() + .eq(&location) + { + true => Some((location, junction)), + false => None, + } + }); + assert!(alias.is_some(), "we expect here BridgeHubRococo to Westend mapping at least"); + Some(alias.unwrap()) + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bb71a77de7dccb6552e70bbec8d0679d29d7bd6 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -0,0 +1,860 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for the Rococo Assets Hub chain. + +use asset_hub_rococo_runtime::xcm_config::{ + AssetFeeAsExistentialDepositMultiplierFeeCharger, TokenLocation, + TrustBackedAssetsPalletLocation, +}; +pub use asset_hub_rococo_runtime::{ + xcm_config::{ + self, bridging, CheckingAccount, ForeignCreatorsSovereignAccountOf, LocationToAccountId, + XcmConfig, + }, + AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, + ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, + RuntimeCall, RuntimeEvent, SessionKeys, System, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, +}; +use asset_test_utils::{ + test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, +}; +use codec::{Decode, Encode}; +use cumulus_primitives_utility::ChargeWeightInFungibles; +use frame_support::{ + assert_noop, assert_ok, + traits::fungibles::InspectEnumerable, + weights::{Weight, WeightToFee as WeightToFeeT}, +}; +use parachains_common::{ + rococo::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, +}; +use sp_runtime::traits::MaybeEquivalence; +use xcm::latest::prelude::*; +use xcm_executor::traits::{Identity, JustTry, WeightTrader}; + +const ALICE: [u8; 32] = [1u8; 32]; +const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; + +type AssetIdForTrustBackedAssetsConvert = + assets_common::AssetIdForTrustBackedAssetsConvert; + +type RuntimeHelper = asset_test_utils::RuntimeHelper; + +fn collator_session_key(account: [u8; 32]) -> CollatorSessionKey { + CollatorSessionKey::new( + AccountId::from(account), + AccountId::from(account), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(account)) }, + ) +} + +fn collator_session_keys() -> CollatorSessionKeys { + CollatorSessionKeys::default().add(collator_session_key(ALICE)) +} + +#[test] +fn test_asset_xcm_trader() { + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + // We need root origin to create a sufficient asset + let minimum_asset_balance = 3333333_u128; + let local_asset_id = 1; + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + local_asset_id.into(), + AccountId::from(ALICE).into(), + true, + minimum_asset_balance + )); + + // We first mint enough asset for the account to exist for assets + assert_ok!(Assets::mint( + RuntimeHelper::origin_of(AccountId::from(ALICE)), + local_asset_id.into(), + AccountId::from(ALICE).into(), + minimum_asset_balance + )); + + // get asset id as multilocation + let asset_multilocation = + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); + + // Set Alice as block author, who will receive fees + RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); + + // We are going to buy 4e9 weight + let bought = Weight::from_parts(4_000_000_000u64, 0); + + // Lets calculate amount needed + let asset_amount_needed = + AssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( + local_asset_id, + bought, + ) + .expect("failed to compute"); + + // Lets pay with: asset_amount_needed + asset_amount_extra + let asset_amount_extra = 100_u128; + let asset: MultiAsset = + (asset_multilocation, asset_amount_needed + asset_amount_extra).into(); + + let mut trader = ::Trader::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + + // Lets buy_weight and make sure buy_weight does not return an error + let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); + // Check whether a correct amount of unused assets is returned + assert_ok!( + unused_assets.ensure_contains(&(asset_multilocation, asset_amount_extra).into()) + ); + + // Drop trader + drop(trader); + + // Make sure author(Alice) has received the amount + assert_eq!( + Assets::balance(local_asset_id, AccountId::from(ALICE)), + minimum_asset_balance + asset_amount_needed + ); + + // We also need to ensure the total supply increased + assert_eq!( + Assets::total_supply(local_asset_id), + minimum_asset_balance + asset_amount_needed + ); + }); +} + +#[test] +fn test_asset_xcm_trader_with_refund() { + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + // We need root origin to create a sufficient asset + // We set existential deposit to be identical to the one for Balances first + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + 1.into(), + AccountId::from(ALICE).into(), + true, + ExistentialDeposit::get() + )); + + // We first mint enough asset for the account to exist for assets + assert_ok!(Assets::mint( + RuntimeHelper::origin_of(AccountId::from(ALICE)), + 1.into(), + AccountId::from(ALICE).into(), + ExistentialDeposit::get() + )); + + let mut trader = ::Trader::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + + // Set Alice as block author, who will receive fees + RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); + + // We are going to buy 4e9 weight + let bought = Weight::from_parts(4_000_000_000u64, 0); + + let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); + + // lets calculate amount needed + let amount_bought = WeightToFee::weight_to_fee(&bought); + + let asset: MultiAsset = (asset_multilocation, amount_bought).into(); + + // Make sure buy_weight does not return an error + assert_ok!(trader.buy_weight(bought, asset.clone().into(), &ctx)); + + // Make sure again buy_weight does return an error + // This assert relies on the fact, that we use `TakeFirstAssetTrader` in `WeightTrader` + // tuple chain, which cannot be called twice + assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); + + // We actually use half of the weight + let weight_used = bought / 2; + + // Make sure refurnd works. + let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); + + assert_eq!( + trader.refund_weight(bought - weight_used, &ctx), + Some((asset_multilocation, amount_refunded).into()) + ); + + // Drop trader + drop(trader); + + // We only should have paid for half of the bought weight + let fees_paid = WeightToFee::weight_to_fee(&weight_used); + + assert_eq!( + Assets::balance(1, AccountId::from(ALICE)), + ExistentialDeposit::get() + fees_paid + ); + + // We also need to ensure the total supply increased + assert_eq!(Assets::total_supply(1), ExistentialDeposit::get() + fees_paid); + }); +} + +#[test] +fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + // We need root origin to create a sufficient asset + // We set existential deposit to be identical to the one for Balances first + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + 1.into(), + AccountId::from(ALICE).into(), + true, + ExistentialDeposit::get() + )); + + let mut trader = ::Trader::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + + // Set Alice as block author, who will receive fees + RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); + + // We are going to buy small amount + let bought = Weight::from_parts(500_000_000u64, 0); + + let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); + + let amount_bought = WeightToFee::weight_to_fee(&bought); + + assert!( + amount_bought < ExistentialDeposit::get(), + "we are testing what happens when the amount does not exceed ED" + ); + + let asset: MultiAsset = (asset_multilocation, amount_bought).into(); + + // Buy weight should return an error + assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); + + // not credited since the ED is higher than this value + assert_eq!(Assets::balance(1, AccountId::from(ALICE)), 0); + + // We also need to ensure the total supply did not increase + assert_eq!(Assets::total_supply(1), 0); + }); +} + +#[test] +fn test_that_buying_ed_refund_does_not_refund() { + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + // We need root origin to create a sufficient asset + // We set existential deposit to be identical to the one for Balances first + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + 1.into(), + AccountId::from(ALICE).into(), + true, + ExistentialDeposit::get() + )); + + let mut trader = ::Trader::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + + // Set Alice as block author, who will receive fees + RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); + + // We are gonna buy ED + let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); + + let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); + + let amount_bought = WeightToFee::weight_to_fee(&bought); + + assert!( + amount_bought < ExistentialDeposit::get(), + "we are testing what happens when the amount does not exceed ED" + ); + + // We know we will have to buy at least ED, so lets make sure first it will + // fail with a payment of less than ED + let asset: MultiAsset = (asset_multilocation, amount_bought).into(); + assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); + + // Now lets buy ED at least + let asset: MultiAsset = (asset_multilocation, ExistentialDeposit::get()).into(); + + // Buy weight should work + assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); + + // Should return None. We have a specific check making sure we dont go below ED for + // drop payment + assert_eq!(trader.refund_weight(bought, &ctx), None); + + // Drop trader + drop(trader); + + // Make sure author(Alice) has received the amount + assert_eq!(Assets::balance(1, AccountId::from(ALICE)), ExistentialDeposit::get()); + + // We also need to ensure the total supply increased + assert_eq!(Assets::total_supply(1), ExistentialDeposit::get()); + }); +} + +#[test] +fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + // Create a non-sufficient asset with specific existential deposit + let minimum_asset_balance = 1_000_000_u128; + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + 1.into(), + AccountId::from(ALICE).into(), + false, + minimum_asset_balance + )); + + // We first mint enough asset for the account to exist for assets + assert_ok!(Assets::mint( + RuntimeHelper::origin_of(AccountId::from(ALICE)), + 1.into(), + AccountId::from(ALICE).into(), + minimum_asset_balance + )); + + let mut trader = ::Trader::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + + // Set Alice as block author, who will receive fees + RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); + + // We are going to buy 4e9 weight + let bought = Weight::from_parts(4_000_000_000u64, 0); + + // lets calculate amount needed + let asset_amount_needed = WeightToFee::weight_to_fee(&bought); + + let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); + + let asset: MultiAsset = (asset_multilocation, asset_amount_needed).into(); + + // Make sure again buy_weight does return an error + assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); + + // Drop trader + drop(trader); + + // Make sure author(Alice) has NOT received the amount + assert_eq!(Assets::balance(1, AccountId::from(ALICE)), minimum_asset_balance); + + // We also need to ensure the total supply NOT increased + assert_eq!(Assets::total_supply(1), minimum_asset_balance); + }); +} + +#[test] +fn test_assets_balances_api_works() { + use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApi; + + ExtBuilder::::default() + .with_collators(vec![AccountId::from(ALICE)]) + .with_session_keys(vec![( + AccountId::from(ALICE), + AccountId::from(ALICE), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, + )]) + .build() + .execute_with(|| { + let local_asset_id = 1; + let foreign_asset_id_multilocation = + MultiLocation { parents: 1, interior: X2(Parachain(1234), GeneralIndex(12345)) }; + + // check before + assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); + assert_eq!( + ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), + 0 + ); + assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); + assert!(Runtime::query_account_balances(AccountId::from(ALICE)) + .unwrap() + .try_as::() + .unwrap() + .is_none()); + + // Drip some balance + use frame_support::traits::fungible::Mutate; + let some_currency = ExistentialDeposit::get(); + Balances::mint_into(&AccountId::from(ALICE), some_currency).unwrap(); + + // We need root origin to create a sufficient asset + let minimum_asset_balance = 3333333_u128; + assert_ok!(Assets::force_create( + RuntimeHelper::root_origin(), + local_asset_id.into(), + AccountId::from(ALICE).into(), + true, + minimum_asset_balance + )); + + // We first mint enough asset for the account to exist for assets + assert_ok!(Assets::mint( + RuntimeHelper::origin_of(AccountId::from(ALICE)), + local_asset_id.into(), + AccountId::from(ALICE).into(), + minimum_asset_balance + )); + + // create foreign asset + let foreign_asset_minimum_asset_balance = 3333333_u128; + assert_ok!(ForeignAssets::force_create( + RuntimeHelper::root_origin(), + foreign_asset_id_multilocation, + AccountId::from(SOME_ASSET_ADMIN).into(), + false, + foreign_asset_minimum_asset_balance + )); + + // We first mint enough asset for the account to exist for assets + assert_ok!(ForeignAssets::mint( + RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), + foreign_asset_id_multilocation, + AccountId::from(ALICE).into(), + 6 * foreign_asset_minimum_asset_balance + )); + + // check after + assert_eq!( + Assets::balance(local_asset_id, AccountId::from(ALICE)), + minimum_asset_balance + ); + assert_eq!( + ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), + 6 * minimum_asset_balance + ); + assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); + + let result: MultiAssets = Runtime::query_account_balances(AccountId::from(ALICE)) + .unwrap() + .try_into() + .unwrap(); + assert_eq!(result.len(), 3); + + // check currency + assert!(result.inner().iter().any(|asset| asset.eq( + &assets_common::fungible_conversion::convert_balance::( + some_currency + ) + .unwrap() + ))); + // check trusted asset + assert!(result.inner().iter().any(|asset| asset.eq(&( + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), + minimum_asset_balance + ) + .into()))); + // check foreign asset + assert!(result.inner().iter().any(|asset| asset.eq(&( + Identity::convert_back(&foreign_asset_id_multilocation).unwrap(), + 6 * foreign_asset_minimum_asset_balance + ) + .into()))); + }); +} + +asset_test_utils::include_teleports_for_native_asset_works!( + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + CheckingAccount, + WeightToFee, + ParachainSystem, + collator_session_keys(), + ExistentialDeposit::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + 1000 +); + +asset_test_utils::include_teleports_for_foreign_assets_works!( + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + CheckingAccount, + WeightToFee, + ParachainSystem, + ForeignCreatorsSovereignAccountOf, + ForeignAssetsInstance, + collator_session_keys(), + ExistentialDeposit::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }) +); + +asset_test_utils::include_asset_transactor_transfer_with_local_consensus_currency_works!( + Runtime, + XcmConfig, + collator_session_keys(), + ExistentialDeposit::get(), + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + assert!(ForeignAssets::asset_ids().collect::>().is_empty()); + }), + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + assert!(ForeignAssets::asset_ids().collect::>().is_empty()); + }) +); + +asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( + asset_transactor_transfer_with_trust_backed_assets_works, + Runtime, + XcmConfig, + TrustBackedAssetsInstance, + AssetIdForTrustBackedAssets, + AssetIdForTrustBackedAssetsConvert, + collator_session_keys(), + ExistentialDeposit::get(), + 12345, + Box::new(|| { + assert!(ForeignAssets::asset_ids().collect::>().is_empty()); + }), + Box::new(|| { + assert!(ForeignAssets::asset_ids().collect::>().is_empty()); + }) +); + +asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( + asset_transactor_transfer_with_foreign_assets_works, + Runtime, + XcmConfig, + ForeignAssetsInstance, + MultiLocation, + JustTry, + collator_session_keys(), + ExistentialDeposit::get(), + MultiLocation { parents: 1, interior: X2(Parachain(1313), GeneralIndex(12345)) }, + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + }), + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + }) +); + +asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works!( + Runtime, + XcmConfig, + WeightToFee, + ForeignCreatorsSovereignAccountOf, + ForeignAssetsInstance, + MultiLocation, + JustTry, + collator_session_keys(), + ExistentialDeposit::get(), + AssetDeposit::get(), + MetadataDepositBase::get(), + MetadataDepositPerByte::get(), + Box::new(|pallet_asset_call| RuntimeCall::ForeignAssets(pallet_asset_call).encode()), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::ForeignAssets(pallet_asset_event)) => Some(pallet_asset_event), + _ => None, + } + }), + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + assert!(ForeignAssets::asset_ids().collect::>().is_empty()); + }), + Box::new(|| { + assert!(Assets::asset_ids().collect::>().is_empty()); + assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); + }) +); + +fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( + bridging_configuration: fn() -> TestBridgingConfig, +) { + asset_test_utils::test_cases_over_bridge::limited_reserve_transfer_assets_for_native_asset_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + bridging_configuration, + WeightLimit::Unlimited, + Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), + Some(xcm_config::TreasuryAccount::get()), + ) +} + +mod asset_hub_rococo_tests { + use super::*; + + fn bridging_to_asset_hub_westend() -> TestBridgingConfig { + asset_test_utils::test_cases_over_bridge::TestBridgingConfig { + bridged_network: bridging::to_westend::WestendNetwork::get(), + local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), + local_bridge_hub_location: bridging::SiblingBridgeHub::get(), + bridged_target_location: bridging::to_westend::AssetHubWestend::get(), + } + } + + #[test] + fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_westend_works() { + limited_reserve_transfer_assets_for_native_asset_over_bridge_works( + bridging_to_asset_hub_westend, + ) + } + + #[test] + fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + AccountId::from(BLOCK_AUTHOR_ACCOUNT), + // receiving WNDs + (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Westend)) }, 1000000000000, 1_000_000_000), + bridging_to_asset_hub_westend, + ( + X1(PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)), + GlobalConsensus(Westend), + X1(Parachain(1000)) + ) + ) + } + + #[test] + fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToWestendXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_westend, + || { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + .into(), + } + ] + .into() + }, + || { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: false, + } + ) + .encode() + .into(), + } + ] + .into() + }, + ) + } + + #[test] + fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToWestendXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode(), + bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ); + } + + #[test] + fn check_sane_weight_report_bridge_status_for_westend() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); + } + + #[test] + fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); + } +} + +#[test] +fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { + asset_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridging::XcmBridgeHubRouterByteFee, + Balance, + >( + collator_session_keys(), + 1000, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridging::XcmBridgeHubRouterByteFee::key().to_vec(), + bridging::XcmBridgeHubRouterByteFee::get(), + ) + }, + |old_value| { + if let Some(new_value) = old_value.checked_add(1) { + new_value + } else { + old_value.checked_sub(1).unwrap() + } + }, + ) +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index f7f6fdf68e46692182839bdd06b6ea7d7fde5b19..49b80b067cfbf13c30f827434e63713937ddfe95 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -4,12 +4,13 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -66,22 +67,29 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } +# Bridges +pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } + [dev-dependencies] hex-literal = "0.4.1" asset-test-utils = { path = "../test-utils" } -sp-io = { path = "../../../../../substrate/primitives/io" } [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -90,9 +98,11 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] runtime-benchmarks = [ "assets-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -103,6 +113,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", @@ -111,6 +122,7 @@ runtime-benchmarks = [ "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm-bridge-hub-router/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -136,6 +148,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-nft-fractionalization/try-runtime", "pallet-nfts/try-runtime", @@ -145,6 +158,7 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "pallet-uniques/try-runtime", "pallet-utility/try-runtime", + "pallet-xcm-bridge-hub-router/try-runtime", "pallet-xcm/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", @@ -152,6 +166,10 @@ try-runtime = [ ] std = [ "assets-common/std", + "bp-asset-hub-rococo/std", + "bp-asset-hub-westend/std", + "bp-bridge-hub-rococo/std", + "bp-bridge-hub-westend/std", "codec/std", "cumulus-pallet-aura-ext/std", "cumulus-pallet-dmp-queue/std", @@ -176,6 +194,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-nft-fractionalization/std", "pallet-nfts-runtime-api/std", @@ -188,12 +207,14 @@ std = [ "pallet-uniques/std", "pallet-utility/std", "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub-router/std", "pallet-xcm/std", "parachain-info/std", "parachains-common/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", + "primitive-types/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", @@ -216,3 +237,8 @@ std = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 943332087627c56765d1ee4199c9a9ffd7d03854..d52edfe479ce4bcb25f6cf216888d510f94d4484 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -36,6 +36,7 @@ use assets_common::{ }; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -43,7 +44,7 @@ use frame_support::{ ord_parameter_types, parameter_types, traits::{ tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, - ConstU64, ConstU8, InstanceFilter, + ConstU64, ConstU8, Equals, InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight}, BoundedVec, PalletId, @@ -53,10 +54,12 @@ use frame_system::{ EnsureRoot, EnsureSigned, EnsureSignedBy, }; use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; +use pallet_nfts::{DestroyWitness, PalletFeatures}; +use pallet_xcm::EnsureXcm; pub use parachains_common as common; use parachains_common::{ impls::DealWithFees, + message_queue::*, westend::{consensus::*, currency::*, fee::WeightToFee}, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, @@ -66,9 +69,11 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Permill, RuntimeDebug, + ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; use sp_std::prelude::*; #[cfg(feature = "std")] @@ -77,8 +82,7 @@ use sp_version::RuntimeVersion; use xcm::opaque::v3::MultiLocation; use xcm_config::{ ForeignAssetsConvertedConcreteId, PoolAssetsConvertedConcreteId, - TrustBackedAssetsConvertedConcreteId, WestendLocation, XcmConfig, - XcmOriginToTransactDispatchOrigin, + TrustBackedAssetsConvertedConcreteId, WestendLocation, XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -88,7 +92,7 @@ use assets_common::{ foreign_creators::ForeignCreators, matching::FromSiblingParachain, MultiLocationForAssetId, }; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm_executor::XcmExecutor; +use xcm::latest::prelude::*; use crate::xcm_config::ForeignCreatorsSovereignAccountOf; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -107,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_003_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, @@ -202,6 +206,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); // We allow each account to have holds on it from: // - `NftFractionalization`: 1 @@ -583,10 +588,11 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -602,24 +608,69 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::WestendLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } parameter_types! { @@ -778,6 +829,37 @@ impl pallet_nfts::Config for Runtime { type Helper = (); } +/// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global +/// consensus with dynamic fees and back-pressure. +pub type ToRococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance1; +impl pallet_xcm_bridge_hub_router::Config for Runtime { + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; + + type UniversalLocation = xcm_config::UniversalLocation; + type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; + type Bridges = xcm_config::bridging::NetworkExportTable; + + #[cfg(not(feature = "runtime-benchmarks"))] + type BridgeHubOrigin = EnsureXcm>; + #[cfg(feature = "runtime-benchmarks")] + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + // for running benchmarks + EnsureRoot, + // for running tests with `--feature runtime-benchmarks` + EnsureXcm>, + >; + + type ToBridgeHubSender = XcmpQueue; + type WithBridgeHubChannel = + cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< + xcm_config::bridging::SiblingBridgeHubParaId, + Runtime, + >; + + type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; + type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -809,6 +891,9 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + // Bridge utilities. + ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 34, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 35, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -861,8 +946,79 @@ pub type Migrations = ( pallet_multisig::migrations::v1::MigrateToV1, // unreleased InitStorageVersions, + // unreleased + DeleteUndecodableStorage, ); +/// Asset Hub Westend has some undecodable storage, delete it. +/// See for more info. +/// +/// First we remove the bad Hold, then the bad NFT collection. +pub struct DeleteUndecodableStorage; + +impl frame_support::traits::OnRuntimeUpgrade for DeleteUndecodableStorage { + fn on_runtime_upgrade() -> Weight { + use sp_core::crypto::Ss58Codec; + + let mut writes = 0; + + // Remove Holds for account with undecodable hold + // Westend doesn't have any HoldReasons implemented yet, so it's safe to just blanket remove + // any for this account. + match AccountId::from_ss58check("5GCCJthVSwNXRpbeg44gysJUx9vzjdGdfWhioeM7gCg6VyXf") { + Ok(a) => { + log::info!("Removing holds for account with bad hold"); + pallet_balances::Holds::::remove(a); + writes.saturating_inc(); + }, + Err(_) => { + log::error!("CleanupUndecodableStorage: Somehow failed to convert valid SS58 address into an AccountId!"); + }, + }; + + // Destroy undecodable NFT item 1 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 1, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 1"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Destroy undecodable NFT item 2 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 2, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 2"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Finally, we can destroy the collection + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_destroy_collection( + 3, + DestroyWitness { attributes: 0, item_metadatas: 1, item_configs: 0 }, + None, + ) { + Ok(_) => { + log::info!("Destroyed undecodable NFT collection"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT collection: {:?}", e); + }, + }; + + ::DbWeight::get().reads_writes(0, writes) + } +} + /// Migration to initialize storage versions for pallets added after genesis. /// /// Ideally this would be done automatically (see @@ -874,7 +1030,6 @@ pub struct InitStorageVersions; impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { fn on_runtime_upgrade() -> Weight { use frame_support::traits::{GetStorageVersion, StorageVersion}; - use sp_runtime::traits::Saturating; let mut writes = 0; @@ -907,19 +1062,16 @@ pub type Executive = frame_executive::Executive< Migrations, >; -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_nft_fractionalization, NftFractionalization] [pallet_nfts, Nfts] @@ -930,8 +1082,10 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] + [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1065,10 +1219,10 @@ impl_runtime_apis! { fn system_attribute( collection: u32, - item: u32, + item: Option, key: Vec, ) -> Option> { - >::system_attribute(&collection, &item, &key) + >::system_attribute(&collection, item.as_ref(), &key) } fn collection_attribute(collection: u32, key: Vec) -> Option> { @@ -1215,6 +1369,8 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1230,6 +1386,8 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; + type ToRococo = XcmBridgeHubRouterBench; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1258,13 +1416,77 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + + use pallet_xcm_bridge_hub_router::benchmarking::{ + Pallet as XcmBridgeHubRouterBench, + Config as XcmBridgeHubRouterConfig, + }; + + impl XcmBridgeHubRouterConfig for Runtime { + fn make_congested() { + cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + } + fn ensure_bridged_target_destination() -> MultiLocation { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + xcm_config::bridging::to_rococo::AssetHubRococo::get() + } + } + use xcm::latest::prelude::*; use xcm_config::{MaxAssetsIntoHolding, WestendLocation}; use pallet_xcm_benchmarks::asset_instance_from; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + WestendLocation::get(), + ExistentialDeposit::get() + ).into()); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { Ok(WestendLocation::get()) } @@ -1301,7 +1523,13 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(UNITS), id: Concrete(WestendLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; + // AssetHubWestend trusts AssetHubRococo as reserve for ROCs + pub TrustedReserve: Option<(MultiLocation, MultiAsset)> = Some( + ( + xcm_config::bridging::to_rococo::AssetHubRococo::get(), + MultiAsset::from((xcm_config::bridging::to_rococo::RocLocation::get(), 1000000000000 as u128)) + ) + ); } impl pallet_xcm_benchmarks::fungible::Config for Runtime { @@ -1320,6 +1548,7 @@ impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { @@ -1331,7 +1560,10 @@ impl_runtime_apis! { } fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) + match xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() { + Some(alias) => Ok(alias), + None => Err(BenchmarkError::Skip) + } } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { @@ -1370,6 +1602,8 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; + type ToRococo = XcmBridgeHubRouterBench; + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), @@ -1421,7 +1655,6 @@ pub mod migrations { }; use parachains_common::impls::AccountIdOf; use sp_runtime::{traits::StaticLookup, Saturating}; - use xcm::latest::prelude::*; /// Temporary migration because of bug with native asset, it can be removed once applied on /// `AssetHubWestend`. Migrates pools with `MultiLocation { parents: 0, interior: Here }` to diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..c1e5c6a74293995b6e3702f19b8b3750c34b192f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemine-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemine/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_622_000 picoseconds. + Weight::from_parts(1_709_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 22_138 + .saturating_add(Weight::from_parts(23_923_169, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs index cda66f6ea7eee25ece39733cf266dbf16c5c0338..b34c959a355e84e0c7773ef92d12aa215cb5eeee 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// asset-hub-westend-dev +// --output +// cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_109_000 picoseconds. - Weight::from_parts(5_324_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3517` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_183_000 picoseconds. - Weight::from_parts(5_408_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65747` + // Estimated: `69212` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 69212)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs index c70ea9d58b2649c5f6fbf53cf620757c4aa19f1b..6c741af2a13dcca9cc490c13933397f6ab841c28 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 281c013b3372f57a24b57812de91e684ffabd0c9..1646c00989d543bb876bb04ad3c297516e6dd946 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -1,20 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; @@ -24,6 +25,7 @@ pub mod pallet_assets_local; pub mod pallet_assets_pool; pub mod pallet_balances; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nft_fractionalization; pub mod pallet_nfts; @@ -33,6 +35,7 @@ pub mod pallet_timestamp; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; +pub mod pallet_xcm_bridge_hub_router; pub mod paritydb_weights; pub mod rocksdb_weights; pub mod xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs index 2f39df65403043dad536ccb180c396108c73f487..e48f2e2ef7267cf714b5ea385290c4180365ddb5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_asset_conversion` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs index 5be1319b10dfb221230a66429c578248fc30a9d0..52ba2fd6c40fcfea552b181bcb05cfecab75f6b4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs index aa09be829c85c6e19d252ad03dd584e34024abc4..e78366b91cbe8100c2a46c47a8708c3f1aed918d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs index bfe73e1cfafa5dda9690f4b57f07e08c6ea3b740..65cae81069c40f7369d31d8411bf882521b71e11 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_assets` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_balances.rs index f6bf09d63bae20a899301bda372a48ad389c935c..c98ac75ff662c35deeda1e98eed7b99b5fa525ef 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs index 2473c58e45818145674d62c31f6c6a07b6f6794f..1fac2d59ab9609a0220a8d39a3a711dd8bd98437 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -124,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..45531ccfa797c52ead00833f765347f1282816c7 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemine-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemine/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 13_668_000 picoseconds. + Weight::from_parts(13_668_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_106_000 picoseconds. + Weight::from_parts(11_106_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_921_000 picoseconds. + Weight::from_parts(4_921_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_879_000 picoseconds. + Weight::from_parts(6_879_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_564_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 59_963_000 picoseconds. + Weight::from_parts(59_963_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 7_200_000 picoseconds. + Weight::from_parts(7_200_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 41_366_000 picoseconds. + Weight::from_parts(41_366_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 60_538_000 picoseconds. + Weight::from_parts(60_538_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 73_665_000 picoseconds. + Weight::from_parts(73_665_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs index 107d78c98f978f6500f721193fc81ec009204b98..27687e10751b3ad052ddd6a9269cdcc415ef563d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nft_fractionalization.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nft_fractionalization.rs index e155e1bada3abc8f959404b112d7d393c40c0a61..601b47227c5840f373414bdad25c5956cbe05569 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nft_fractionalization.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nft_fractionalization.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_nft_fractionalization` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nfts.rs index 687dfa07f75148de35519507e1721f61dcf7cee8..fa7085c7344f91a2e16cf64e4efff73829f92512 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nfts.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_nfts.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_nfts` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs index 657bd2764cf2d88bdb22d322fc7a67f881c9f3be..d0042419719ae57792fa7218f0fe654dbfb5e846 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_proxy` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_session.rs index 926021917441904d3553f996e3fe9074ba1fe97a..2f9a39d2f801568835e5f85a8e9b29cbd3565937 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_timestamp.rs index 13f18861d37e8d715e0fb19d71bd60af8dcb8321..85207bc7c7e580d42d0cb4e7b6cdf13a4088f97d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_uniques.rs index 53bead08f5d4cd9ed4388555a58eca1ae183a2ab..8c20f1601541e604c941f7cad3c39a3000a9ac95 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_uniques.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_uniques.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_uniques` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs index 7db443ebbf19967d00676518a53909130ae77a86..a7952d6da00eaade8d02d4f44cf809c48ae4989c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 1cc4c2d0e24cf205e5ad33bf40e93dd22f864d80..340edafb0b0c3ea6319e1c2213bcface8a9399b4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_833_000 picoseconds. - Weight::from_parts(30_472_000, 0) + // Minimum execution time: 25_534_000 picoseconds. + Weight::from_parts(26_413_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -76,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 22_922_000 picoseconds. - Weight::from_parts(23_650_000, 0) + // Minimum execution time: 20_513_000 picoseconds. + Weight::from_parts(20_837_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -87,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 17_468_000 picoseconds. - Weight::from_parts(18_068_000, 0) + // Minimum execution time: 14_977_000 picoseconds. + Weight::from_parts(15_207_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -96,8 +96,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_780_000 picoseconds. - Weight::from_parts(9_201_000, 0) + // Minimum execution time: 7_440_000 picoseconds. + Weight::from_parts(7_651_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -106,8 +106,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_886_000 picoseconds. - Weight::from_parts(9_102_000, 0) + // Minimum execution time: 7_253_000 picoseconds. + Weight::from_parts(7_584_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +117,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_665_000 picoseconds. - Weight::from_parts(2_884_000, 0) + // Minimum execution time: 2_299_000 picoseconds. + Weight::from_parts(2_435_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +144,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 34_513_000 picoseconds. - Weight::from_parts(36_207_000, 0) + // Minimum execution time: 29_440_000 picoseconds. + Weight::from_parts(30_675_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,10 +170,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 35_770_000 picoseconds. - Weight::from_parts(36_462_000, 0) + // Minimum execution time: 31_876_000 picoseconds. + Weight::from_parts(32_588_000, 0) .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +182,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_763_000 picoseconds. - Weight::from_parts(3_079_000, 0) + // Minimum execution time: 2_385_000 picoseconds. + Weight::from_parts(2_607_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_170_000 picoseconds. - Weight::from_parts(17_674_000, 0) + // Minimum execution time: 16_927_000 picoseconds. + Weight::from_parts(17_554_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +205,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 16_857_000 picoseconds. - Weight::from_parts(17_407_000, 0) + // Minimum execution time: 16_965_000 picoseconds. + Weight::from_parts(17_807_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +217,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 19_040_000 picoseconds. - Weight::from_parts(19_550_000, 0) + // Minimum execution time: 18_763_000 picoseconds. + Weight::from_parts(19_359_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +240,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 31_623_000 picoseconds. - Weight::from_parts(32_646_000, 0) + // Minimum execution time: 27_371_000 picoseconds. + Weight::from_parts(28_185_000, 0) .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -246,8 +252,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_148_000 picoseconds. - Weight::from_parts(9_402_000, 0) + // Minimum execution time: 9_165_000 picoseconds. + Weight::from_parts(9_539_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +263,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_630_000 picoseconds. - Weight::from_parts(17_941_000, 0) + // Minimum execution time: 17_384_000 picoseconds. + Weight::from_parts(17_777_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +287,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 38_425_000 picoseconds. - Weight::from_parts(39_219_000, 0) + // Minimum execution time: 34_260_000 picoseconds. + Weight::from_parts(35_428_000, 0) .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_710_000 picoseconds. + Weight::from_parts(4_900_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_843_000 picoseconds. + Weight::from_parts(27_404_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d0d0cbc655586937893e4d017475175b2fca63e --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -0,0 +1,124 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_bridge_hub_router` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_bridge_hub_router +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_bridge_hub_router`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn on_initialize_when_non_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `1678` + // Minimum execution time: 8_157_000 picoseconds. + Weight::from_parts(8_481_000, 0) + .saturating_add(Weight::from_parts(0, 1678)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn on_initialize_when_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 3_319_000 picoseconds. + Weight::from_parts(3_445_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `117` + // Estimated: `1502` + // Minimum execution time: 10_396_000 picoseconds. + Weight::from_parts(10_914_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `426` + // Estimated: `3891` + // Minimum execution time: 45_902_000 picoseconds. + Weight::from_parts(46_887_000, 0) + .saturating_add(Weight::from_parts(0, 3891)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index bb850ac72c07d3ebfe576041bc56a6da5051ab9d..bcd51167f97e93432ef7694a16895bcdb50a9667 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -61,16 +60,8 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -127,10 +118,7 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, @@ -220,7 +208,7 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { XcmGeneric::::clear_transact_status() } fn universal_origin(_: &Junction) -> Weight { - Weight::MAX + XcmGeneric::::universal_origin() } fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { Weight::MAX diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index d6763d2fc66f4fb68b9a0b2ed1ade7a7858d6e3d..eaf07aac52cefa88f524e6f3a2180ab9faf2b088 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 25_411_000 picoseconds. - Weight::from_parts(25_663_000, 3593) + // Minimum execution time: 20_295_000 picoseconds. + Weight::from_parts(21_142_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,15 +64,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 49_478_000 picoseconds. - Weight::from_parts(50_417_000, 6196) + // Minimum execution time: 42_356_000 picoseconds. + Weight::from_parts(43_552_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: `System::Account` (r:2 w:2) + // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -89,48 +88,53 @@ impl WeightInfo { pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `6196` - // Minimum execution time: 72_958_000 picoseconds. - Weight::from_parts(74_503_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Estimated: `8799` + // Minimum execution time: 85_553_000 picoseconds. + Weight::from_parts(87_177_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Estimated: `1489` + // Minimum execution time: 6_166_000 picoseconds. + Weight::from_parts(6_352_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 456_993_000 picoseconds. - Weight::from_parts(469_393_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 184_462_000 picoseconds. + Weight::from_parts(189_593_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_580_000 picoseconds. - Weight::from_parts(3_717_000, 0) + // Minimum execution time: 3_018_000 picoseconds. + Weight::from_parts(3_098_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,15 +142,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 25_087_000 picoseconds. - Weight::from_parts(25_788_000, 3593) + // Minimum execution time: 18_583_000 picoseconds. + Weight::from_parts(19_057_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:1 w:1) + // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -160,20 +166,24 @@ impl WeightInfo { pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 50_824_000 picoseconds. - Weight::from_parts(52_309_000, 3610) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Estimated: `6196` + // Minimum execution time: 56_666_000 picoseconds. + Weight::from_parts(58_152_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) @@ -182,9 +192,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 31_854_000 picoseconds. - Weight::from_parts(32_553_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Minimum execution time: 44_197_000 picoseconds. + Weight::from_parts(45_573_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e776529eb7f8e0cfdc70dcc76200e1ec21de7aeb..fc196abea0f5e61d746760e2b2bf5a7d8d0a476b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,31 +49,35 @@ pub struct WeightInfo(PhantomData); impl WeightInfo { // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 405_795_000 picoseconds. - Weight::from_parts(421_225_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 415_033_000 picoseconds. + Weight::from_parts(429_573_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_021_000 picoseconds. - Weight::from_parts(4_234_000, 0) + // Minimum execution time: 3_193_000 picoseconds. + Weight::from_parts(3_620_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -84,79 +85,83 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 11_004_000 picoseconds. - Weight::from_parts(11_217_000, 3568) + // Minimum execution time: 8_045_000 picoseconds. + Weight::from_parts(8_402_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_888_000 picoseconds. - Weight::from_parts(13_249_000, 0) + // Minimum execution time: 9_827_000 picoseconds. + Weight::from_parts(10_454_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_504_000 picoseconds. - Weight::from_parts(4_984_000, 0) + // Minimum execution time: 3_330_000 picoseconds. + Weight::from_parts(3_677_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_733_000 picoseconds. - Weight::from_parts(2_887_000, 0) + // Minimum execution time: 1_947_000 picoseconds. + Weight::from_parts(2_083_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_753_000 picoseconds. - Weight::from_parts(2_844_000, 0) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_993_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_741_000 picoseconds. - Weight::from_parts(2_826_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_048_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_417_000 picoseconds. - Weight::from_parts(3_525_000, 0) + // Minimum execution time: 2_683_000 picoseconds. + Weight::from_parts(3_064_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(2_853_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_159_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 27_035_000 picoseconds. - Weight::from_parts(27_734_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 53_116_000 picoseconds. + Weight::from_parts(54_154_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -164,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 15_728_000 picoseconds. - Weight::from_parts(16_145_000, 3625) + // Minimum execution time: 12_381_000 picoseconds. + Weight::from_parts(12_693_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -173,11 +178,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_630_000 picoseconds. - Weight::from_parts(2_700_000, 0) + // Minimum execution time: 1_933_000 picoseconds. + Weight::from_parts(1_983_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -192,9 +199,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_996_000 picoseconds. - Weight::from_parts(30_620_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) + // Minimum execution time: 24_251_000 picoseconds. + Weight::from_parts(24_890_000, 3610) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) @@ -203,127 +210,145 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_629_000 picoseconds. - Weight::from_parts(4_861_000, 0) + // Minimum execution time: 3_850_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 135_145_000 picoseconds. - Weight::from_parts(142_115_000, 0) + // Minimum execution time: 112_248_000 picoseconds. + Weight::from_parts(124_454_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_948_000 picoseconds. - Weight::from_parts(12_160_000, 0) + // Minimum execution time: 11_457_000 picoseconds. + Weight::from_parts(12_060_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_718_000 picoseconds. - Weight::from_parts(2_794_000, 0) + // Minimum execution time: 1_959_000 picoseconds. + Weight::from_parts(2_076_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_590_000 picoseconds. - Weight::from_parts(2_674_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(1_994_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_858_000 picoseconds. - Weight::from_parts(2_939_000, 0) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_394_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 30_652_000 picoseconds. - Weight::from_parts(31_552_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 58_011_000 picoseconds. + Weight::from_parts(59_306_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_408_000 picoseconds. - Weight::from_parts(5_597_000, 0) + // Minimum execution time: 5_031_000 picoseconds. + Weight::from_parts(5_243_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 27_144_000 picoseconds. - Weight::from_parts(27_736_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 53_078_000 picoseconds. + Weight::from_parts(54_345_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_696_000 picoseconds. - Weight::from_parts(2_802_000, 0) + // Minimum execution time: 1_936_000 picoseconds. + Weight::from_parts(2_002_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_655_000 picoseconds. - Weight::from_parts(2_720_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(1_950_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_599_000 picoseconds. - Weight::from_parts(2_723_000, 0) + // Minimum execution time: 1_882_000 picoseconds. + Weight::from_parts(1_977_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn universal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 3_912_000 picoseconds. + Weight::from_parts(4_167_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_630_000 picoseconds. - Weight::from_parts(2_728_000, 0) + // Minimum execution time: 1_911_000 picoseconds. + Weight::from_parts(1_971_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_797_000 picoseconds. - Weight::from_parts(2_928_000, 0) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(2_076_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index a0921c50dc59bbf495e6983f355f3fa09cd992ee..4760e087e24fc1b17467f4035a2efa808aa0d0d1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -14,36 +14,45 @@ // limitations under the License. use super::{ - AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, ParachainInfo, - ParachainSystem, PolkadotXcm, PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, + AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, + FeeAssetId, ForeignAssets, ForeignAssetsInstance, ParachainInfo, ParachainSystem, PolkadotXcm, + PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ToRococoXcmRouter, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, }; -use crate::ForeignAssets; use assets_common::{ local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, - matching::{ - FromSiblingParachain, IsForeignConcreteAsset, StartsWith, StartsWithExplicitGlobalConsensus, - }, + matching::{FromSiblingParachain, IsForeignConcreteAsset}, }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing, PalletInfoAccess}, + traits::{ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::AssetFeeAsExistentialDepositMultiplier}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ + AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem, + RelayOrOtherSystemParachains, + }, + TREASURY_PALLET_ID, +}; use polkadot_parachain_primitives::primitives::Sibling; -use sp_runtime::traits::ConvertInto; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; +use westend_runtime_constants::system_parachain; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, - EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NativeAsset, - NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + EnsureXcmOrigin, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, + IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, + StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -64,6 +73,8 @@ parameter_types! { pub PoolAssetsPalletLocation: MultiLocation = PalletInstance(::index() as u8).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); } /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used @@ -79,6 +90,9 @@ pub type LocationToAccountId = ( // Foreign chain account alias into local accounts according to a hash of their standard // description. HashedDescription>, + // Different global consensus parachain sovereign account. + // (Used for over-bridge transfers and reserve processing) + GlobalConsensusParachainConvertsFor, ); /// Means for transacting the native currency on this chain. @@ -95,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -116,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly @@ -147,7 +161,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `PoolAssets` +/// `AssetId`/`Balance` converter for `PoolAssets`. pub type PoolAssetsConvertedConcreteId = assets_common::PoolAssetsConvertedConcreteId; @@ -226,9 +240,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type TreasuryPallet: impl Contains = { - MultiLocation { parents: 1, interior: X1(PalletInstance(37)) } - }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -248,15 +259,28 @@ impl Contains for SafeCallFilter { } } + // Allow to change dedicated storage items (called by governance-like) + match call { + RuntimeCall::System(frame_system::Call::set_storage { items }) + if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || + items + .iter() + .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => + return true, + _ => (), + }; + matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( @@ -269,7 +293,7 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | + RuntimeCall::MessageQueue(..) | RuntimeCall::Assets( pallet_assets::Call::create { .. } | pallet_assets::Call::force_create { .. } | @@ -437,7 +461,9 @@ impl Contains for SafeCallFilter { pallet_uniques::Call::set_accept_ownership { .. } | pallet_uniques::Call::set_collection_max_supply { .. } | pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. }, + pallet_uniques::Call::buy_item { .. } + ) | RuntimeCall::ToRococoXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) ) } @@ -453,12 +479,16 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Parent, its pluralities (i.e. governance bodies) and treasury pallet get - // free execution. - AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, TreasuryPallet)>, + // Parent, its pluralities (i.e. governance bodies), relay treasury pallet and + // BridgeHub get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + Equals, + )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, ), @@ -481,23 +511,55 @@ pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentia TrustBackedAssetsInstance, >; +/// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. +pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = + AssetFeeAsExistentialDepositMultiplier< + Runtime, + WeightToFee, + pallet_assets::BalanceToAssetBalance, + ForeignAssetsInstance, + >; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::COLLECTIVES_ID | + system_parachain::BRIDGE_HUB_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// +/// - WND with the parent Relay Chain and sibling system parachains; and +/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). +pub type TrustedTeleporters = ( + ConcreteAssetFromSystem, + IsForeignConcreteAsset>>, +); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; type XcmSender = XcmRouter; type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - // Asset Hub Westend does not recognize a reserve location for any asset. This does not prevent - // Asset Hub acting _as_ a reserve location for WND and assets created under `pallet-assets`. - // For WND, users must use teleport where allowed (e.g. with the Relay Chain). - type IsReserve = (); - // We allow: - // - teleportation of WND - // - teleportation of sibling parachain's assets (as ForeignCreators) - type IsTeleporter = ( - NativeAsset, - IsForeignConcreteAsset>>, - ); + // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus + // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being + // held). Asset Hub may _act_ as a reserve location for WND and assets created + // under `pallet-assets`. Users must use teleport where allowed (e.g. WND with the Relay Chain). + type IsReserve = (bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset,); + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -507,6 +569,8 @@ impl xcm_executor::Config for XcmConfig { >; type Trader = ( UsingComponents>, + // This trader allows to pay with `is_sufficient=true` "Trust Backed" assets from dedicated + // `pallet_assets` instance - `Assets`. cumulus_primitives_utility::TakeFirstAssetTrader< AccountId, AssetFeeAsExistentialDepositMultiplierFeeCharger, @@ -518,6 +582,19 @@ impl xcm_executor::Config for XcmConfig { XcmAssetFeesReceiver, >, >, + // This trader allows to pay with `is_sufficient=true` "Foreign" assets from dedicated + // `pallet_assets` instance - `ForeignAssets`. + cumulus_primitives_utility::TakeFirstAssetTrader< + AccountId, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, + ForeignAssetsConvertedConcreteId, + ForeignAssets, + cumulus_primitives_utility::XcmFeesTo32ByteAccount< + ForeignFungiblesTransactor, + AccountId, + XcmAssetFeesReceiver, + >, + >, ); type ResponseHandler = PolkadotXcm; type AssetTrap = PolkadotXcm; @@ -527,9 +604,12 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); - type UniversalAliases = Nothing; + type UniversalAliases = (bridging::to_rococo::UniversalAliases,); type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; @@ -538,19 +618,25 @@ impl xcm_executor::Config for XcmConfig { /// Local origins on this chain are allowed to dispatch XCM sends/executions. pub type LocalOriginToLocation = SignedToAccountId32; -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( +pub type PriceForParentDelivery = + ExponentialPrice; + +/// For routing XCM messages which do not cross local consensus boundary. +type LocalXcmRouter = ( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, -)>; +); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + LocalXcmRouter, + // Router which wraps and sends xcm to BridgeHub to be delivered to the Rococo + // GlobalConsensus + ToRococoXcmRouter, +)>; impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -577,8 +663,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -631,3 +715,141 @@ where sp_std::boxed::Box::new(Self::asset_id(asset_id)) } } + +/// All configuration related to bridging +pub mod bridging { + use super::*; + use assets_common::matching; + use sp_std::collections::btree_set::BTreeSet; + + parameter_types! { + /// Base price of every byte of the Westend -> Rococo message. Can be adjusted via + /// governance `set_storage` call. + /// + /// Default value is our estimation of the: + /// + /// 1) an approximate cost of XCM execution (`ExportMessage` and surroundings) at Westend bridge hub; + /// + /// 2) the approximate cost of Westend -> Rococo message delivery transaction on Rococo Bridge Hub, + /// converted into WNDs using 1:1 conversion rate; + /// + /// 3) the approximate cost of Westend -> Rococo message confirmation transaction on Westend Bridge Hub. + pub storage XcmBridgeHubRouterBaseFee: Balance = + bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds::get() + .saturating_add(bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get()) + .saturating_add(bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get()); + /// Price of every byte of the Westend -> Rococo message. Can be adjusted via + /// governance `set_storage` call. + pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); + + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID; + pub SiblingBridgeHub: MultiLocation = MultiLocation::new(1, X1(Parachain(SiblingBridgeHubParaId::get()))); + /// Router expects payment with this `AssetId`. + /// (`AssetId` has to be aligned with `BridgeTable`) + pub XcmBridgeHubRouterFeeAssetId: AssetId = WestendLocation::get().into(); + + pub BridgeTable: sp_std::vec::Vec = + sp_std::vec::Vec::new().into_iter() + .chain(to_rococo::BridgeTable::get()) + .collect(); + } + + pub type NetworkExportTable = xcm_builder::NetworkExportTable; + + pub mod to_rococo { + use super::*; + + parameter_types! { + pub SiblingBridgeHubWithBridgeHubRococoInstance: MultiLocation = MultiLocation::new( + 1, + X2( + Parachain(SiblingBridgeHubParaId::get()), + PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX) + ) + ); + + pub const RococoNetwork: NetworkId = NetworkId::Rococo; + pub AssetHubRococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID))); + pub RocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(RococoNetwork::get()))); + + pub RocFromAssetHubRococo: (MultiAssetFilter, MultiLocation) = ( + Wild(AllOf { fun: WildFungible, id: Concrete(RocLocation::get()) }), + AssetHubRococo::get() + ); + + /// Set up exporters configuration. + /// `Option` represents static "base fee" which is used for total delivery fee calculation. + pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + NetworkExportTableItem::new( + RococoNetwork::get(), + Some(sp_std::vec![ + AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, + ]), + SiblingBridgeHub::get(), + // base delivery fee to local `BridgeHub` + Some(( + XcmBridgeHubRouterFeeAssetId::get(), + XcmBridgeHubRouterBaseFee::get(), + ).into()) + ) + ]; + + /// Universal aliases + pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( + sp_std::vec![ + (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) + ] + ); + } + + impl Contains<(MultiLocation, Junction)> for UniversalAliases { + fn contains(alias: &(MultiLocation, Junction)) -> bool { + UniversalAliases::get().contains(alias) + } + } + + /// Reserve locations filter for `xcm_executor::Config::IsReserve`. + /// Locations from which the runtime accepts reserved assets. + pub type IsTrustedBridgedReserveLocationForConcreteAsset = + matching::IsTrustedBridgedReserveLocationForConcreteAsset< + UniversalLocation, + ( + // allow receive ROC from AssetHubRococo + xcm_builder::Case, + // and nothing else + ), + >; + + impl Contains for ToRococoXcmRouter { + fn contains(call: &RuntimeCall) -> bool { + matches!( + call, + RuntimeCall::ToRococoXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } + ) + ) + } + } + } + + /// Benchmarks helper for bridging configuration. + #[cfg(feature = "runtime-benchmarks")] + pub struct BridgingBenchmarksHelper; + + #[cfg(feature = "runtime-benchmarks")] + impl BridgingBenchmarksHelper { + pub fn prepare_universal_alias() -> Option<(MultiLocation, Junction)> { + let alias = + to_rococo::UniversalAliases::get().into_iter().find_map(|(location, junction)| { + match to_rococo::SiblingBridgeHubWithBridgeHubRococoInstance::get() + .eq(&location) + { + true => Some((location, junction)), + false => None, + } + }); + assert!(alias.is_some(), "we expect here BridgeHubWestend to Rococo mapping at least"); + Some(alias.unwrap()) + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 599ff90e254aa5f4fb745e153b8a3e9b9addb0e8..7922b04e8077b229eda981f9d107dca0289380c1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -19,20 +19,19 @@ use asset_hub_westend_runtime::{ xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - WestendLocation, + self, bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, + ForeignCreatorsSovereignAccountOf, LocationToAccountId, TrustBackedAssetsPalletLocation, + WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, MetadataDepositBase, MetadataDepositPerByte, RuntimeCall, - RuntimeEvent, + AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, + ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, + RuntimeCall, RuntimeEvent, SessionKeys, ToRococoXcmRouterInstance, TrustBackedAssetsInstance, + XcmpQueue, }; -pub use asset_hub_westend_runtime::{ - xcm_config::{CheckingAccount, TrustBackedAssetsPalletLocation, XcmConfig}, - AllowMultiAssetPools, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, ParachainSystem, Runtime, SessionKeys, System, - TrustBackedAssetsInstance, +use asset_test_utils::{ + test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, }; -use asset_test_utils::{CollatorSessionKeys, ExtBuilder, XcmReceivedFrom}; -use codec::{Decode, DecodeLimit, Encode}; +use codec::{Decode, Encode}; use cumulus_primitives_utility::ChargeWeightInFungibles; use frame_support::{ assert_noop, assert_ok, @@ -42,14 +41,10 @@ use frame_support::{ use parachains_common::{ westend::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, }; -use sp_io; use sp_runtime::traits::MaybeEquivalence; use std::convert::Into; -use xcm::{latest::prelude::*, VersionedXcm, MAX_XCM_DECODE_DEPTH}; -use xcm_executor::{ - traits::{Identity, JustTry, WeightTrader}, - XcmExecutor, -}; +use xcm::latest::prelude::*; +use xcm_executor::traits::{Identity, JustTry, WeightTrader}; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; @@ -59,14 +54,18 @@ type AssetIdForTrustBackedAssetsConvert = type RuntimeHelper = asset_test_utils::RuntimeHelper; -fn collator_session_keys() -> CollatorSessionKeys { - CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, +fn collator_session_key(account: [u8; 32]) -> CollatorSessionKey { + CollatorSessionKey::new( + AccountId::from(account), + AccountId::from(account), + SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(account)) }, ) } +fn collator_session_keys() -> CollatorSessionKeys { + CollatorSessionKeys::default().add(collator_session_key(ALICE)) +} + #[test] fn test_asset_xcm_trader() { ExtBuilder::::default() @@ -253,7 +252,7 @@ fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - // We are going to buy 5e9 weight + // We are going to buy small amount let bought = Weight::from_parts(500_000_000u64, 0); let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); @@ -526,12 +525,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -641,28 +634,207 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p }) ); -#[test] -fn plain_receive_teleported_asset_works() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - let data = hex_literal::hex!("02100204000100000b00a0724e18090a13000100000b00a0724e180901e20f5e480d010004000101001299557001f55815d3fcb53c74463acb0cf6d14d4639b340982c60877f384609").to_vec(); - let message_id = sp_io::hashing::blake2_256(&data); +fn bridging_to_asset_hub_rococo() -> TestBridgingConfig { + TestBridgingConfig { + bridged_network: bridging::to_rococo::RococoNetwork::get(), + local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), + local_bridge_hub_location: bridging::SiblingBridgeHub::get(), + bridged_target_location: bridging::to_rococo::AssetHubRococo::get(), + } +} - let maybe_msg = VersionedXcm::::decode_all_with_depth_limit( - MAX_XCM_DECODE_DEPTH, - &mut data.as_ref(), +#[test] +fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() { + asset_test_utils::test_cases_over_bridge::limited_reserve_transfer_assets_for_native_asset_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + bridging_to_asset_hub_rococo, + WeightLimit::Unlimited, + Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), + Some(xcm_config::TreasuryAccount::get()), + ) +} +#[test] +fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + AccountId::from(BLOCK_AUTHOR_ACCOUNT), + // receiving ROCs + (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }, 1000000000000, 1_000_000_000), + bridging_to_asset_hub_rococo, + ( + X1(PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)), + GlobalConsensus(Rococo), + X1(Parachain(1000)) ) - .map(xcm::v3::Xcm::::try_from).expect("failed").expect("failed"); + ) +} +#[test] +fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToRococoXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_rococo, + || { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + .into(), + } + ] + .into() + }, + || { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: false, + } + ) + .encode() + .into(), + } + ] + .into() + }, + ) +} - let outcome = - XcmExecutor::::execute_xcm(Parent, maybe_msg, message_id, RuntimeHelper::xcm_max_weight(XcmReceivedFrom::Parent)); - assert_eq!(outcome.ensure_complete(), Ok(())); +#[test] +fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToRococoXcmRouter(pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, }) + .encode(), + bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ) +} + +#[test] +fn check_sane_weight_report_bridge_status() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); +} + +#[test] +fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { + asset_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridging::XcmBridgeHubRouterByteFee, + Balance, + >( + collator_session_keys(), + 1000, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridging::XcmBridgeHubRouterByteFee::key().to_vec(), + bridging::XcmBridgeHubRouterByteFee::get(), + ) + }, + |old_value| { + if let Some(new_value) = old_value.checked_add(1) { + new_value + } else { + old_value.checked_sub(1).unwrap() + } + }, + ) +} + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); } diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 0cd5de2ddcd47df05cf3f9dc76c39ada47ac0cb1..49fc2a0fa5eba5cc5897c144347e3da0aa029202 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -4,10 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Assets common utilities" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.20", default-features = false } impl-trait-for-tuples = "0.2.2" @@ -43,6 +44,7 @@ std = [ "pallet-asset-tx-payment/std", "pallet-xcm/std", "parachains-common/std", + "scale-info/std", "sp-api/std", "sp-runtime/std", "sp-std/std", @@ -52,6 +54,7 @@ std = [ ] runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", "frame-support/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 560a89b131c03520da8a574620f8641f87e66ccf..f45c3289aab49c16e2b435671d3625f78692f07d 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -21,11 +21,11 @@ pub mod local_and_foreign_assets; pub mod matching; pub mod runtime_api; -use crate::matching::{Equals, LocalMultiLocationPattern, ParentLocation, StartsWith}; -use frame_support::traits::EverythingBut; +use crate::matching::{LocalMultiLocationPattern, ParentLocation}; +use frame_support::traits::{Equals, EverythingBut}; use parachains_common::AssetIdForTrustBackedAssets; use xcm::prelude::MultiLocation; -use xcm_builder::{AsPrefixedGeneralIndex, MatchedConvertedConcreteId}; +use xcm_builder::{AsPrefixedGeneralIndex, MatchedConvertedConcreteId, StartsWith}; use xcm_executor::traits::{Identity, JustTry}; /// `MultiLocation` vs `AssetIdForTrustBackedAssets` converter for `TrustBackedAssets` @@ -96,9 +96,9 @@ pub type PoolAssetsConvertedConcreteId = #[cfg(test)] mod tests { use super::*; - use crate::matching::StartsWithExplicitGlobalConsensus; use sp_runtime::traits::MaybeEquivalence; use xcm::latest::prelude::*; + use xcm_builder::StartsWithExplicitGlobalConsensus; use xcm_executor::traits::{Error as MatchError, MatchesFungibles}; #[test] diff --git a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs index 5c66d1cabe571a5098f85fba47a81a4058cacbd6..9f429016f5302a28a61753331126a48f77ee7565 100644 --- a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs +++ b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs @@ -407,13 +407,14 @@ where #[cfg(test)] mod tests { use crate::{ - local_and_foreign_assets::MultiLocationConverter, matching::StartsWith, - AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert, + local_and_foreign_assets::MultiLocationConverter, AssetIdForPoolAssetsConvert, + AssetIdForTrustBackedAssetsConvert, }; use frame_support::traits::EverythingBut; use pallet_asset_conversion::{MultiAssetIdConversionResult, MultiAssetIdConverter}; use sp_runtime::traits::MaybeEquivalence; use xcm::latest::prelude::*; + use xcm_builder::StartsWith; #[test] fn test_multi_location_converter_works() { diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 08d391170014cfb0958887c09372f6e368910a56..9149728125212c9c3c980b115aa7d142905baf2a 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -14,37 +14,12 @@ // limitations under the License. use cumulus_primitives_core::ParaId; -use frame_support::{ - pallet_prelude::Get, - traits::{Contains, ContainsPair}, -}; +use frame_support::{pallet_prelude::Get, traits::ContainsPair}; use xcm::{ latest::prelude::{MultiAsset, MultiLocation}, prelude::*, }; - -pub struct StartsWith(sp_std::marker::PhantomData); -impl> Contains for StartsWith { - fn contains(t: &MultiLocation) -> bool { - t.starts_with(&Location::get()) - } -} - -pub struct Equals(sp_std::marker::PhantomData); -impl> Contains for Equals { - fn contains(t: &MultiLocation) -> bool { - t == &Location::get() - } -} - -pub struct StartsWithExplicitGlobalConsensus(sp_std::marker::PhantomData); -impl> Contains - for StartsWithExplicitGlobalConsensus -{ - fn contains(t: &MultiLocation) -> bool { - matches!(t.interior.global_consensus(), Ok(requested_network) if requested_network.eq(&Network::get())) - } -} +use xcm_builder::ensure_is_remote; frame_support::parameter_types! { pub LocalMultiLocationPattern: MultiLocation = MultiLocation::new(0, Here); @@ -82,3 +57,41 @@ impl> ContainsPair } } } + +/// Adapter verifies if it is allowed to receive `MultiAsset` from `MultiLocation`. +/// +/// Note: `MultiLocation` has to be from a different global consensus. +pub struct IsTrustedBridgedReserveLocationForConcreteAsset( + sp_std::marker::PhantomData<(UniversalLocation, Reserves)>, +); +impl< + UniversalLocation: Get, + Reserves: ContainsPair, + > ContainsPair + for IsTrustedBridgedReserveLocationForConcreteAsset +{ + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let universal_source = UniversalLocation::get(); + log::trace!( + target: "xcm::contains", + "IsTrustedBridgedReserveLocationForConcreteAsset asset: {:?}, origin: {:?}, universal_source: {:?}", + asset, origin, universal_source + ); + + // check remote origin + let _ = match ensure_is_remote(universal_source, *origin) { + Ok(devolved) => devolved, + Err(_) => { + log::trace!( + target: "xcm::contains", + "IsTrustedBridgedReserveLocationForConcreteAsset origin: {:?} is not remote to the universal_source: {:?}", + origin, universal_source + ); + return false + }, + }; + + // check asset according to the configured reserve locations + Reserves::contains(asset, origin) + } +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 0a27535ed22bd447774fbf13636f1a2c09a5699c..1dc7cecbb62eec9724fd626d69fe1c3e0f0554f0 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -23,21 +24,24 @@ sp-core = { path = "../../../../../substrate/primitives/core", default-features # Cumulus cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../../primitives/parachain-inherent", default-features = false } cumulus-test-relay-sproof-builder = { path = "../../../../test/relay-sproof-builder", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } + +# Bridges +pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -49,7 +53,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" default = [ "std" ] std = [ "assets-common/std", - "cumulus-pallet-dmp-queue/std", + "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", @@ -61,6 +65,7 @@ std = [ "pallet-balances/std", "pallet-collator-selection/std", "pallet-session/std", + "pallet-xcm-bridge-hub-router/std", "pallet-xcm/std", "parachain-info/std", "parachains-common/std", @@ -71,6 +76,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs index 7177726e07046e1d55a015016e88924db418077a..872ad06ddd5b063d4013b296654541c1a6e02681 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs @@ -17,4 +17,65 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets. pub mod test_cases; +pub mod test_cases_over_bridge; +pub mod xcm_helpers; + +use frame_support::traits::ProcessMessageError; pub use parachains_runtimes_test_utils::*; +use std::fmt::Debug; + +use xcm::latest::prelude::*; +use xcm_builder::{CreateMatcher, MatchXcm}; + +/// Given a message, a sender, and a destination, it returns the delivery fees +fn get_fungible_delivery_fees(destination: MultiLocation, message: Xcm<()>) -> u128 { + let Ok((_, delivery_fees)) = validate_send::(destination, message) else { + unreachable!("message can be sent; qed") + }; + if let Some(delivery_fee) = delivery_fees.inner().first() { + let Fungible(delivery_fee_amount) = delivery_fee.fun else { + unreachable!("asset is fungible; qed"); + }; + delivery_fee_amount + } else { + 0 + } +} + +/// Helper function to verify `xcm` contains all relevant instructions expected on destination +/// chain as part of a reserve-asset-transfer. +pub(crate) fn assert_matches_reserve_asset_deposited_instructions( + xcm: &mut Xcm, + expected_reserve_assets_deposited: &MultiAssets, + expected_beneficiary: &MultiLocation, +) { + let _ = xcm + .0 + .matcher() + .skip_inst_while(|inst| !matches!(inst, ReserveAssetDeposited(..))) + .expect("no instruction ReserveAssetDeposited?") + .match_next_inst(|instr| match instr { + ReserveAssetDeposited(reserve_assets) => { + assert_eq!(reserve_assets, expected_reserve_assets_deposited); + Ok(()) + }, + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction ReserveAssetDeposited") + .match_next_inst(|instr| match instr { + ClearOrigin => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction ClearOrigin") + .match_next_inst(|instr| match instr { + BuyExecution { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction BuyExecution") + .match_next_inst(|instr| match instr { + DepositAsset { assets: _, beneficiary } if beneficiary == expected_beneficiary => + Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction DepositAsset"); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 7a8d571403c6b5c5f856dd30c08eaa092423cda5..f1cc76350a003937d4c429ba1f98e5e2149a8cf4 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -15,23 +15,29 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets. +use super::xcm_helpers; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; +use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ assert_noop, assert_ok, - traits::{fungibles::InspectEnumerable, Get, OnFinalize, OnInitialize, OriginTrait}, + traits::{ + fungible::Mutate, fungibles::InspectEnumerable, Currency, Get, OnFinalize, OnInitialize, + OriginTrait, + }, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use parachains_common::{AccountId, Balance}; use parachains_runtimes_test_utils::{ - assert_metadata, assert_total, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, - ValidatorIdOf, XcmReceivedFrom, + assert_metadata, assert_total, mock_open_hrmp_channel, AccountIdOf, BalanceOf, + CollatorSessionKeys, ExtBuilder, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ traits::{MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; -use xcm::latest::prelude::*; +use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; type RuntimeHelper = @@ -40,8 +46,8 @@ type RuntimeHelper = // Re-export test_case from `parachains-runtimes-test-utils` pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; -/// Test-case makes sure that `Runtime` can receive native asset from relay chain -/// and can teleport it back and to the other parachains +/// Test-case makes sure that `Runtime` can receive native asset from relay chain and can teleport +/// it back pub fn teleports_for_native_asset_works< Runtime, AllPalletsWithoutSystem, @@ -54,9 +60,6 @@ pub fn teleports_for_native_asset_works< existential_deposit: BalanceOf, target_account: AccountIdOf, unwrap_pallet_xcm_event: Box) -> Option>>, - unwrap_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, runtime_para_id: u32, ) where Runtime: frame_system::Config @@ -78,6 +81,7 @@ pub fn teleports_for_native_asset_works< Into<<::RuntimeOrigin as OriginTrait>::AccountId>, <::Lookup as StaticLookup>::Source: From<::AccountId>, + ::AccountId: From, XcmConfig: xcm_executor::Config, CheckingAccount: Get>, HrmpChannelOpener: frame_support::inherent::ProvideInherent< @@ -89,6 +93,7 @@ pub fn teleports_for_native_asset_works< .with_session_keys(collator_session_keys.session_keys()) .with_safe_xcm_version(XCM_VERSION) .with_para_id(runtime_para_id.into()) + .with_tracing() .build() .execute_with(|| { let mut alice = [0u8; 32]; @@ -96,7 +101,7 @@ pub fn teleports_for_native_asset_works< let included_head = RuntimeHelper::::run_to_block( 2, - AccountId::from(alice), + AccountId::from(alice).into(), ); // check Balances before assert_eq!(>::free_balance(&target_account), 0.into()); @@ -107,7 +112,7 @@ pub fn teleports_for_native_asset_works< let native_asset_id = MultiLocation::parent(); let buy_execution_fee_amount_eta = - WeightToFee::weight_to_fee(&Weight::from_parts(90_000_000_000, 0)); + WeightToFee::weight_to_fee(&Weight::from_parts(90_000_000_000, 1024)); let native_asset_amount_unit = existential_deposit; let native_asset_amount_received = native_asset_amount_unit * 10.into() + buy_execution_fee_amount_eta.into(); @@ -124,7 +129,7 @@ pub fn teleports_for_native_asset_works< id: Concrete(native_asset_id), fun: Fungible(buy_execution_fee_amount_eta), }, - weight_limit: Limited(Weight::from_parts(303531000, 65536)), + weight_limit: Limited(Weight::from_parts(3035310000, 65536)), }, DepositAsset { assets: Wild(AllCounted(1)), @@ -159,12 +164,13 @@ pub fn teleports_for_native_asset_works< // 2. try to teleport asset back to the relaychain { let dest = MultiLocation::parent(); - let dest_beneficiary = MultiLocation::parent() + let mut dest_beneficiary = MultiLocation::parent() .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); @@ -174,6 +180,21 @@ pub fn teleports_for_native_asset_works< target_account_balance_before_teleport - existential_deposit ); + // Mint funds into account to ensure it has enough balance to pay delivery fees + let delivery_fees = + xcm_helpers::transfer_assets_delivery_fees::( + (native_asset_id, native_asset_to_teleport_away.into()).into(), + 0, + Unlimited, + dest_beneficiary, + dest, + ); + >::mint_into( + &target_account, + delivery_fees.into(), + ) + .unwrap(); + assert_ok!(RuntimeHelper::::do_teleport_assets::( RuntimeHelper::::origin_of(target_account.clone()), dest, @@ -183,6 +204,7 @@ pub fn teleports_for_native_asset_works< included_head.clone(), &alice, )); + // check balances assert_eq!( >::free_balance(&target_account), @@ -202,54 +224,53 @@ pub fn teleports_for_native_asset_works< ); } - // 3. try to teleport asset away to other parachain (1234) + // 3. try to teleport assets away to other parachain (2345): should not work as we don't + // trust `IsTeleporter` for `(relay-native-asset, para(2345))` pair { - let other_para_id = 1234; + let other_para_id = 2345; let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); + let native_asset_to_teleport_away = native_asset_amount_unit * 3.into(); assert!( native_asset_to_teleport_away < target_account_balance_before_teleport - existential_deposit ); - - assert_ok!(RuntimeHelper::::do_teleport_assets::( - RuntimeHelper::::origin_of(target_account.clone()), - dest, - dest_beneficiary, - (native_asset_id, native_asset_to_teleport_away.into()), - Some((runtime_para_id, other_para_id)), - included_head, - &alice, - )); + assert_eq!( + RuntimeHelper::::do_teleport_assets::( + RuntimeHelper::::origin_of(target_account.clone()), + dest, + dest_beneficiary, + (native_asset_id, native_asset_to_teleport_away.into()), + Some((runtime_para_id, other_para_id)), + included_head, + &alice, + ), + Err(DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0,], + message: Some("Filtered",), + },),) + ); // check balances assert_eq!( >::free_balance(&target_account), - target_account_balance_before_teleport - native_asset_to_teleport_away + target_account_balance_before_teleport ); assert_eq!( >::free_balance(&CheckingAccount::get()), 0.into() ); - - // check events - RuntimeHelper::::assert_pallet_xcm_event_outcome( - &unwrap_pallet_xcm_event, - |outcome| { - assert_ok!(outcome.ensure_complete()); - }, - ); - assert!(RuntimeHelper::::xcmp_queue_message_sent(unwrap_xcmp_queue_event) - .is_some()); } }) } @@ -266,7 +287,6 @@ macro_rules! include_teleports_for_native_asset_works( $collator_session_key:expr, $existential_deposit:expr, $unwrap_pallet_xcm_event:expr, - $unwrap_xcmp_queue_event:expr, $runtime_para_id:expr ) => { #[test] @@ -286,15 +306,14 @@ macro_rules! include_teleports_for_native_asset_works( $existential_deposit, target_account, $unwrap_pallet_xcm_event, - $unwrap_xcmp_queue_event, $runtime_para_id ) } } ); -/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain relay -/// chain +/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain, and +/// can teleport it back pub fn teleports_for_foreign_assets_works< Runtime, AllPalletsWithoutSystem, @@ -346,9 +365,10 @@ pub fn teleports_for_foreign_assets_works< Into<<::RuntimeOrigin as OriginTrait>::AccountId>, <::Lookup as StaticLookup>::Source: From<::AccountId>, + ::AccountId: From, ForeignAssetsPalletInstance: 'static, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; let foreign_asset_id_multilocation = MultiLocation { parents: 1, @@ -368,7 +388,7 @@ pub fn teleports_for_foreign_assets_works< fun: Fungible(buy_execution_fee_amount), }; - let teleported_foreign_asset_amount = 10000000000000; + let teleported_foreign_asset_amount = 10_000_000_000_000; let runtime_para_id = 1000; ExtBuilder::::default() .with_collators(collator_session_keys.collators()) @@ -391,18 +411,18 @@ pub fn teleports_for_foreign_assets_works< let included_head = RuntimeHelper::::run_to_block( 2, - AccountId::from(alice), + AccountId::from(alice).into(), ); // checks target_account before assert_eq!( >::free_balance(&target_account), existential_deposit ); + // check `CheckingAccount` before assert_eq!( >::free_balance(&CheckingAccount::get()), existential_deposit ); - // check `CheckingAccount` before assert_eq!( >::balance( foreign_asset_id_multilocation.into(), @@ -440,7 +460,7 @@ pub fn teleports_for_foreign_assets_works< >(foreign_asset_id_multilocation, 0, 0); assert!(teleported_foreign_asset_amount > asset_minimum_asset_balance); - // 1. process received teleported assets from relaychain + // 1. process received teleported assets from sibling parachain (foreign_para_id) let xcm = Xcm(vec![ // BuyExecution with relaychain native token WithdrawAsset(buy_execution_fee.clone().into()), @@ -518,12 +538,13 @@ pub fn teleports_for_foreign_assets_works< // 2. try to teleport asset back to source parachain (foreign_para_id) { let dest = MultiLocation::new(1, X1(Parachain(foreign_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::balance( @@ -538,6 +559,21 @@ pub fn teleports_for_foreign_assets_works< .into() ); + // Make sure the target account has enough native asset to pay for delivery fees + let delivery_fees = + xcm_helpers::transfer_assets_delivery_fees::( + (foreign_asset_id_multilocation, asset_to_teleport_away).into(), + 0, + Unlimited, + dest_beneficiary, + dest, + ); + >::mint_into( + &target_account, + delivery_fees.into(), + ) + .unwrap(); + assert_ok!(RuntimeHelper::::do_teleport_assets::( RuntimeHelper::::origin_of(target_account.clone()), dest, @@ -1005,6 +1041,7 @@ macro_rules! include_asset_transactor_transfer_with_pallet_assets_instance_works } ); +/// Test-case makes sure that `Runtime` can create and manage `ForeignAssets` pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works< Runtime, XcmConfig, @@ -1059,7 +1096,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone + Copy, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_multilocation = MultiLocation { parents: 1, interior: X2(Parachain(2222), GeneralIndex(1234567)) }; let asset_id = AssetIdConverter::convert(&foreign_asset_id_multilocation).unwrap(); @@ -1190,7 +1227,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor .is_empty()); // check update metadata - use frame_support::traits::tokens::fungibles::roles::Inspect as InspectRoles; + use frame_support::traits::fungibles::roles::Inspect as InspectRoles; assert_eq!( >::owner( asset_id.into() @@ -1339,3 +1376,199 @@ macro_rules! include_create_and_manage_foreign_assets_for_local_consensus_parach } } ); + +/// Test-case makes sure that `Runtime` can reserve-transfer asset to other parachains (where +/// teleport is not trusted) +pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + HrmpChannelSource, + LocationToAccountId, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + alice_account: AccountIdOf, + unwrap_pallet_xcm_event: Box) -> Option>>, + unwrap_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + weight_limit: WeightLimit, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + HrmpChannelSource: XcmpMessageSource, +{ + let runtime_para_id = 1000; + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .with_safe_xcm_version(3) + .with_para_id(runtime_para_id.into()) + .build() + .execute_with(|| { + let mut alice = [0u8; 32]; + alice[0] = 1; + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + + // reserve-transfer native asset with local reserve to remote parachain (2345) + + let other_para_id = 2345; + let native_asset = MultiLocation::parent(); + let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + .appended_with(AccountId32 { + network: None, + id: sp_runtime::AccountId32::new([3; 32]).into(), + }) + .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); + + let reserve_account = LocationToAccountId::convert_location(&dest) + .expect("Sovereign account for reserves"); + let balance_to_transfer = 1_000_000_000_000_u128; + + // open HRMP to other parachain + mock_open_hrmp_channel::( + runtime_para_id.into(), + other_para_id.into(), + included_head, + &alice, + ); + + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 40_000_000_000u128; + // drip 2xED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = existential_deposit.saturating_mul(2.into()) + + balance_to_transfer.into() + + delivery_fees_buffer.into(); + let _ = >::deposit_creating( + &alice_account, + alice_account_init_balance, + ); + // SA of target location needs to have at least ED, otherwise making reserve fails + let _ = >::deposit_creating( + &reserve_account, + existential_deposit, + ); + + // we just check here, that user retains enough balance after withdrawal + // and also we check if `balance_to_transfer` is more than `existential_deposit`, + assert!( + (>::free_balance(&alice_account) - + balance_to_transfer.into()) >= + existential_deposit + ); + // SA has just ED + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + ); + + // local native asset (pallet_balances) + let asset_to_transfer = MultiAsset { + fun: Fungible(balance_to_transfer.into()), + id: Concrete(native_asset), + }; + + // pallet_xcm call reserve transfer + assert_ok!(>::limited_reserve_transfer_assets( + RuntimeHelper::::origin_of(alice_account.clone()), + Box::new(dest.into_versioned()), + Box::new(dest_beneficiary.into_versioned()), + Box::new(VersionedMultiAssets::from(MultiAssets::from(asset_to_transfer))), + 0, + weight_limit, + )); + + // check events + // check pallet_xcm attempted + RuntimeHelper::::assert_pallet_xcm_event_outcome( + &unwrap_pallet_xcm_event, + |outcome| { + assert_ok!(outcome.ensure_complete()); + }, + ); + + // check that xcm was sent + let xcm_sent_message_hash = >::events() + .into_iter() + .filter_map(|e| unwrap_xcmp_queue_event(e.event.encode())) + .find_map(|e| match e { + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { message_hash } => + Some(message_hash), + _ => None, + }); + + // read xcm + let xcm_sent = RuntimeHelper::::take_xcm( + other_para_id.into(), + ) + .unwrap(); + + let delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(dest, Xcm::try_from(xcm_sent.clone()).unwrap()); + + assert_eq!( + xcm_sent_message_hash, + Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) + ); + let mut xcm_sent: Xcm<()> = xcm_sent.try_into().expect("versioned xcm"); + + // check sent XCM Program to other parachain + println!("reserve_transfer_native_asset_works sent xcm: {:?}", xcm_sent); + let reserve_assets_deposited = MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(1000000000000), + }]); + + assert_matches_reserve_asset_deposited_instructions( + &mut xcm_sent, + &reserve_assets_deposited, + &dest_beneficiary, + ); + + // check alice account decreased by balance_to_transfer ( + delivery_fees) + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance - balance_to_transfer.into() - delivery_fees.into() + ); + + // check reserve account + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + }) +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs new file mode 100644 index 0000000000000000000000000000000000000000..851fcd5c7d68f8cfcc8354c1d70f0dbb9c450031 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -0,0 +1,598 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Module contains predefined test-case scenarios for `Runtime` with various assets transferred +//! over a bridge. + +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; +use codec::Encode; +use cumulus_primitives_core::XcmpMessageSource; +use frame_support::{ + assert_ok, + traits::{Currency, Get, OnFinalize, OnInitialize, OriginTrait, ProcessMessageError}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use parachains_common::{AccountId, Balance}; +use parachains_runtimes_test_utils::{ + mock_open_hrmp_channel, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, RuntimeHelper, + ValidatorIdOf, XcmReceivedFrom, +}; +use sp_runtime::{traits::StaticLookup, Saturating}; +use xcm::{latest::prelude::*, VersionedMultiAssets}; +use xcm_builder::{CreateMatcher, MatchXcm}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; + +pub struct TestBridgingConfig { + pub bridged_network: NetworkId, + pub local_bridge_hub_para_id: u32, + pub local_bridge_hub_location: MultiLocation, + pub bridged_target_location: MultiLocation, +} + +/// Test-case makes sure that `Runtime` can initiate **reserve transfer assets** over bridge. +pub fn limited_reserve_transfer_assets_for_native_asset_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + HrmpChannelSource, + LocationToAccountId, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + alice_account: AccountIdOf, + unwrap_pallet_xcm_event: Box) -> Option>>, + unwrap_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + prepare_configuration: fn() -> TestBridgingConfig, + weight_limit: WeightLimit, + maybe_paid_export_message: Option, + delivery_fees_account: Option>, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + HrmpChannelSource: XcmpMessageSource, +{ + let runtime_para_id = 1000; + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .with_safe_xcm_version(3) + .with_para_id(runtime_para_id.into()) + .build() + .execute_with(|| { + let mut alice = [0u8; 32]; + alice[0] = 1; + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + + // prepare bridge config + let TestBridgingConfig { + bridged_network, + local_bridge_hub_para_id, + bridged_target_location: target_location_from_different_consensus, + .. + } = prepare_configuration(); + + let reserve_account = + LocationToAccountId::convert_location(&target_location_from_different_consensus) + .expect("Sovereign account for reserves"); + let balance_to_transfer = 1_000_000_000_000_u128; + let native_asset = MultiLocation::parent(); + + // open HRMP to bridge hub + mock_open_hrmp_channel::( + runtime_para_id.into(), + local_bridge_hub_para_id.into(), + included_head, + &alice, + ); + + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 8_000_000_000_000u128; + // drip ED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = + existential_deposit + balance_to_transfer.into() + delivery_fees_buffer.into(); + let _ = >::deposit_creating( + &alice_account, + alice_account_init_balance, + ); + // SA of target location needs to have at least ED, otherwise making reserve fails + let _ = >::deposit_creating( + &reserve_account, + existential_deposit, + ); + + // we just check here, that user retains enough balance after withdrawal + // and also we check if `balance_to_transfer` is more than `existential_deposit`, + assert!( + (>::free_balance(&alice_account) - + balance_to_transfer.into()) >= + existential_deposit + ); + // SA has just ED + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + ); + + let delivery_fees_account_balance_before = delivery_fees_account + .as_ref() + .map(|dfa| >::free_balance(dfa)) + .unwrap_or(0.into()); + + // local native asset (pallet_balances) + let asset_to_transfer = MultiAsset { + fun: Fungible(balance_to_transfer.into()), + id: Concrete(native_asset), + }; + + // destination is (some) account relative to the destination different consensus + let target_destination_account = MultiLocation { + parents: 0, + interior: X1(AccountId32 { + network: Some(bridged_network), + id: sp_runtime::AccountId32::new([3; 32]).into(), + }), + }; + + let assets_to_transfer = MultiAssets::from(asset_to_transfer); + let mut expected_assets = assets_to_transfer.clone(); + let context = XcmConfig::UniversalLocation::get(); + expected_assets + .reanchor(&target_location_from_different_consensus, context) + .unwrap(); + + let expected_beneficiary = target_destination_account; + + // do pallet_xcm call reserve transfer + assert_ok!(>::limited_reserve_transfer_assets( + RuntimeHelper::::origin_of(alice_account.clone()), + Box::new(target_location_from_different_consensus.into_versioned()), + Box::new(target_destination_account.into_versioned()), + Box::new(VersionedMultiAssets::from(assets_to_transfer)), + 0, + weight_limit, + )); + + // check events + // check pallet_xcm attempted + RuntimeHelper::::assert_pallet_xcm_event_outcome( + &unwrap_pallet_xcm_event, + |outcome| { + assert_ok!(outcome.ensure_complete()); + }, + ); + + // check that xcm was sent + let xcm_sent_message_hash = >::events() + .into_iter() + .filter_map(|e| unwrap_xcmp_queue_event(e.event.encode())) + .find_map(|e| match e { + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { message_hash } => + Some(message_hash), + _ => None, + }); + + // read xcm + let xcm_sent = RuntimeHelper::::take_xcm( + local_bridge_hub_para_id.into(), + ) + .unwrap(); + assert_eq!( + xcm_sent_message_hash, + Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) + ); + let mut xcm_sent: Xcm<()> = xcm_sent.try_into().expect("versioned xcm"); + + // check sent XCM ExportMessage to BridgeHub + + let mut delivery_fees = 0; + // 1. check paid or unpaid + if let Some(expected_fee_asset_id) = maybe_paid_export_message { + xcm_sent + .0 + .matcher() + .match_next_inst(|instr| match instr { + WithdrawAsset(_) => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("contains WithdrawAsset") + .match_next_inst(|instr| match instr { + BuyExecution { fees, .. } if fees.id.eq(&expected_fee_asset_id) => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("contains BuyExecution") + } else { + xcm_sent + .0 + .matcher() + .match_next_inst(|instr| match instr { + // first instruction could be UnpaidExecution (because we could have + // explicit unpaid execution on BridgeHub) + UnpaidExecution { weight_limit, check_origin } + if weight_limit == &Unlimited && check_origin.is_none() => + Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("contains UnpaidExecution") + } + // 2. check ExportMessage + .match_next_inst(|instr| match instr { + // next instruction is ExportMessage + ExportMessage { network, destination, xcm: inner_xcm } => { + assert_eq!(network, &bridged_network); + let (_, target_location_junctions_without_global_consensus) = + target_location_from_different_consensus + .interior + .split_global() + .expect("split works"); + assert_eq!(destination, &target_location_junctions_without_global_consensus); + // Call `SendXcm::validate` to get delivery fees. + delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(target_location_from_different_consensus, inner_xcm.clone()); + assert_matches_reserve_asset_deposited_instructions( + inner_xcm, + &expected_assets, + &expected_beneficiary, + ); + Ok(()) + }, + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("contains ExportMessage"); + + // check alice account decreased by balance_to_transfer + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance + .saturating_sub(balance_to_transfer.into()) + .saturating_sub(delivery_fees.into()) + ); + + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + + // check dedicated account increased by delivery fees (if configured) + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after - delivery_fees.into() >= + delivery_fees_account_balance_before + ); + } + }) +} + +pub fn receive_reserve_asset_deposited_from_different_consensus_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ForeignAssetsPalletInstance, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + target_account: AccountIdOf, + block_author_account: AccountIdOf, + ( + foreign_asset_id_multilocation, + transfered_foreign_asset_id_amount, + foreign_asset_id_minimum_balance, + ): (MultiLocation, u128, u128), + prepare_configuration: fn() -> TestBridgingConfig, + (bridge_instance, universal_origin, descend_origin): (Junctions, Junction, Junctions), /* bridge adds origin manipulation on the way */ +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_assets::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + >::AssetId: + From + Into, + >::AssetIdParameter: + From + Into, + >::Balance: + From + Into + From, + ::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId> + + Into, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ForeignAssetsPalletInstance: 'static, +{ + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .build() + .execute_with(|| { + // Set account as block author, who will receive fees + RuntimeHelper::::run_to_block( + 2, + block_author_account.clone().into(), + ); + + // prepare bridge config + let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); + + // drip 'ED' user target account + let _ = >::deposit_creating( + &target_account, + existential_deposit, + ); + + // sovereign account as foreign asset owner (can be whoever for this scenario, doesnt + // matter) + let sovereign_account_as_owner_of_foreign_asset = + LocationToAccountId::convert_location(&MultiLocation::parent()).unwrap(); + + // staking pot account for collecting local native fees from `BuyExecution` + let staking_pot = >::account_id(); + let _ = >::deposit_creating( + &staking_pot, + existential_deposit, + ); + + // create foreign asset for wrapped/derivated representation + assert_ok!( + >::force_create( + RuntimeHelper::::root_origin(), + foreign_asset_id_multilocation.into(), + sovereign_account_as_owner_of_foreign_asset.clone().into(), + true, // is_sufficient=true + foreign_asset_id_minimum_balance.into() + ) + ); + + // Balances before + assert_eq!( + >::free_balance(&target_account), + existential_deposit.clone() + ); + assert_eq!( + >::free_balance(&block_author_account), + 0.into() + ); + assert_eq!( + >::free_balance(&staking_pot), + existential_deposit.clone() + ); + + // ForeignAssets balances before + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &target_account + ), + 0.into() + ); + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &block_author_account + ), + 0.into() + ); + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &staking_pot + ), + 0.into() + ); + + let expected_assets = MultiAssets::from(vec![MultiAsset { + id: Concrete(foreign_asset_id_multilocation), + fun: Fungible(transfered_foreign_asset_id_amount), + }]); + let expected_beneficiary = MultiLocation { + parents: 0, + interior: X1(AccountId32 { network: None, id: target_account.clone().into() }), + }; + + // Call received XCM execution + let xcm = Xcm(vec![ + DescendOrigin(bridge_instance), + UniversalOrigin(universal_origin), + DescendOrigin(descend_origin), + ReserveAssetDeposited(expected_assets.clone()), + ClearOrigin, + BuyExecution { + fees: MultiAsset { + id: Concrete(foreign_asset_id_multilocation), + fun: Fungible(transfered_foreign_asset_id_amount), + }, + weight_limit: Unlimited, + }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, + SetTopic([ + 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, 140, + 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, + ]), + ]); + assert_matches_reserve_asset_deposited_instructions( + &mut xcm.clone(), + &expected_assets, + &expected_beneficiary, + ); + + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + + // execute xcm as XcmpQueue would do + let outcome = XcmExecutor::::execute_xcm( + local_bridge_hub_location, + xcm, + hash, + RuntimeHelper::::xcm_max_weight( + XcmReceivedFrom::Sibling, + ), + ); + assert_eq!(outcome.ensure_complete(), Ok(())); + + // author actual balance after (received fees from Trader for ForeignAssets) + let author_received_fees = + >::balance( + foreign_asset_id_multilocation.into(), + &block_author_account, + ); + + // Balances after (untouched) + assert_eq!( + >::free_balance(&target_account), + existential_deposit.clone() + ); + assert_eq!( + >::free_balance(&block_author_account), + 0.into() + ); + assert_eq!( + >::free_balance(&staking_pot), + existential_deposit.clone() + ); + + // ForeignAssets balances after + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &target_account + ), + (transfered_foreign_asset_id_amount - author_received_fees.into()).into() + ); + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &block_author_account + ), + author_received_fees + ); + assert_eq!( + >::balance( + foreign_asset_id_multilocation.into(), + &staking_pot + ), + 0.into() + ); + }) +} + +pub fn report_bridge_status_from_xcm_bridge_router_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + XcmBridgeHubRouterInstance, +>( + collator_session_keys: CollatorSessionKeys, + prepare_configuration: fn() -> TestBridgingConfig, + congested_message: fn() -> Xcm, + uncongested_message: fn() -> Xcm, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_xcm_bridge_hub_router::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + XcmBridgeHubRouterInstance: 'static, +{ + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .build() + .execute_with(|| { + let report_bridge_status = |is_congested: bool| { + // prepare bridge config + let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); + + // Call received XCM execution + let xcm = if is_congested { congested_message() } else { uncongested_message() }; + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + + // execute xcm as XcmpQueue would do + let outcome = XcmExecutor::::execute_xcm( + local_bridge_hub_location, + xcm, + hash, + RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), + ); + assert_eq!(outcome.ensure_complete(), Ok(())); + assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); + }; + + report_bridge_status(true); + report_bridge_status(false); + }) +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..0aebe38fef539db79c14f2d29053fc006565da79 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs @@ -0,0 +1,108 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Helpers for calculating XCM delivery fees. + +use xcm::latest::prelude::*; + +/// Returns the delivery fees amount for pallet xcm's `teleport_assets` and +/// `reserve_transfer_assets` extrinsics. +/// Because it returns only a `u128`, it assumes delivery fees are only paid +/// in one asset and that asset is known. +pub fn transfer_assets_delivery_fees( + assets: MultiAssets, + fee_asset_item: u32, + weight_limit: WeightLimit, + beneficiary: MultiLocation, + destination: MultiLocation, +) -> u128 { + let message = teleport_assets_dummy_message(assets, fee_asset_item, weight_limit, beneficiary); + get_fungible_delivery_fees::(destination, message) +} + +/// Returns the delivery fees amount for a query response as a result of the execution +/// of a `ExpectError` instruction with no error. +pub fn query_response_delivery_fees(querier: MultiLocation) -> u128 { + // Message to calculate delivery fees, it's encoded size is what's important. + // This message reports that there was no error, if an error is reported, the encoded size would + // be different. + let message = Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + QueryResponse { + query_id: 0, // Dummy query id + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }, + SetTopic([0u8; 32]), // Dummy topic + ]); + get_fungible_delivery_fees::(querier, message) +} + +/// Returns the delivery fees amount for the execution of `PayOverXcm` +pub fn pay_over_xcm_delivery_fees( + interior: Junctions, + destination: MultiLocation, + beneficiary: MultiLocation, + asset: MultiAsset, +) -> u128 { + // This is a dummy message. + // The encoded size is all that matters for delivery fees. + let message = Xcm(vec![ + DescendOrigin(interior), + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + SetAppendix(Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + ReportError(QueryResponseInfo { destination, query_id: 0, max_weight: Weight::zero() }), + ])), + TransferAsset { beneficiary, assets: vec![asset].into() }, + ]); + get_fungible_delivery_fees::(destination, message) +} + +/// Approximates the actual message sent by the teleport extrinsic. +/// The assets are not reanchored and the topic is a dummy one. +/// However, it should have the same encoded size, which is what matters for delivery fees. +/// Also has same encoded size as the one created by the reserve transfer assets extrinsic. +fn teleport_assets_dummy_message( + assets: MultiAssets, + fee_asset_item: u32, + weight_limit: WeightLimit, + beneficiary: MultiLocation, +) -> Xcm<()> { + Xcm(vec![ + ReceiveTeleportedAsset(assets.clone()), // Same encoded size as `ReserveAssetDeposited` + ClearOrigin, + BuyExecution { fees: assets.get(fee_asset_item as usize).unwrap().clone(), weight_limit }, + DepositAsset { assets: Wild(AllCounted(assets.len() as u32)), beneficiary }, + SetTopic([0u8; 32]), // Dummy topic + ]) +} + +/// Given a message, a sender, and a destination, it returns the delivery fees +fn get_fungible_delivery_fees(destination: MultiLocation, message: Xcm<()>) -> u128 { + let Ok((_, delivery_fees)) = validate_send::(destination, message) else { + unreachable!("message can be sent; qed") + }; + if let Some(delivery_fee) = delivery_fees.inner().first() { + let Fungible(delivery_fee_amount) = delivery_fee.fun else { + unreachable!("asset is fungible; qed"); + }; + delivery_fee_amount + } else { + 0 + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index 487c601ef84023310fe7e15c5642ca060c9e550b..cf617db730dd7faa19d932dbe6a0406b7a0c15f2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -1,13 +1,14 @@ - [Bridge-hub Parachains](#bridge-hub-parachains) - [Requirements for local run/testing](#requirements-for-local-runtesting) - - [How to test local Rococo <-> Wococo bridge](#how-to-test-local-rococo---wococo-bridge) - - [Run chains (Rococo + BridgeHub, Wococo + BridgeHub) with - zombienet](#run-chains-rococo--bridgehub-wococo--bridgehub-with-zombienet) - - [Run relayer (BridgeHubRococo, BridgeHubWococo)](#run-relayer-bridgehubrococo-bridgehubwococo) - - [Run with script (alternative 1)](#run-with-script-alternative-1) - - [Run with binary (alternative 2)](#run-with-binary-alternative-2) - - [Send messages - transfer asset over bridge](#send-messages---transfer-asset-over-bridge) - - [How to test live BridgeHubRococo/BridgeHubWococo](#how-to-test-live-bridgehubrococobridgehubwococo) + - [How to test local Rococo <-> Westend bridge](#how-to-test-local-rococo---westend-bridge) + - [Run Rococo/Westend chains with zombienet](#run-rococowestend-chains-with-zombienet) + - [Init bridge and run relayer between BridgeHubRococo and + BridgeHubWestend](#init-bridge-and-run-relayer-between-bridgehubrococo-and-bridgehubwestend) + - [Initialize configuration for transfer asset over bridge + (ROCs/WNDs)](#initialize-configuration-for-transfer-asset-over-bridge-rocswnds) + - [Send messages - transfer asset over bridge (ROCs/WNDs)](#send-messages---transfer-asset-over-bridge-rocswnds) + - [Claim relayer's rewards on BridgeHubRococo and + BridgeHubWestend](#claim-relayers-rewards-on-bridgehubrococo-and-bridgehubwestend) - [How to test local BridgeHubKusama/BridgeHubPolkadot](#how-to-test-local-bridgehubkusamabridgehubpolkadot) # Bridge-hub Parachains @@ -42,17 +43,19 @@ Copy the apropriate binary (zombienet-linux) from the latest release to ~/local_ --- # 2. Build polkadot binary -git clone https://github.com/paritytech/polkadot.git -cd polkadot -# if you want to test Kusama/Polkadot bridge, we need "sudo pallet + fast-runtime", -# so please, find the latest polkadot's repository branch `it/release-vX.Y.Z-fast-sudo` -# e.g: -# git checkout -b it/release-v0.9.43-fast-sudo --track origin/it/release-v0.9.43-fast-sudo +We need polkadot binary with "fast-runtime" feature: -cargo build --release --features fast-runtime +cd +cargo build --release --features fast-runtime --bin polkadot cp target/release/polkadot ~/local_bridge_testing/bin/polkadot +cargo build --release --features fast-runtime --bin polkadot-prepare-worker +cp target/release/polkadot-prepare-worker ~/local_bridge_testing/bin/polkadot-prepare-worker + +cargo build --release --features fast-runtime --bin polkadot-execute-worker +cp target/release/polkadot-execute-worker ~/local_bridge_testing/bin/polkadot-execute-worker + --- # 3. Build substrate-relay binary @@ -71,162 +74,131 @@ cp target/release/substrate-relay ~/local_bridge_testing/bin/substrate-relay --- # 4. Build cumulus polkadot-parachain binary -cd - -# checkout desired branch or use master: -# git checkout -b master --track origin/master +cd -cargo build --release --locked --bin polkadot-parachain +cargo build --release -p polkadot-parachain-bin cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub - - - -# !!! READ HERE (TODO remove once all mentioned branches bellow are merged) -# The use case "moving assets over bridge" is not merged yet and is implemented in separate branches. -# So, if you want to try it, you need to checkout different branch and continue with these instructions there. - -# For Kusama/Polkadot local bridge testing: -# -# build BridgeHubs (polkadot-parachain) from branch: -# git checkout -b bridge-hub-kusama-polkadot --track origin/bridge-hub-kusama-polkadot -# cargo build --release --locked --bin polkadot-parachain -# cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain -# -# build AssetHubs (polkadot-parachain-asset-hub) from branch: -# git checkout -b bko-transfer-asset-via-bridge-pallet-xcm --track origin/bko-transfer-asset-via-bridge-pallet-xcm -# cargo build --release --locked --bin polkadot-parachain -# cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub - -# For Rococo/Wococo local bridge testing: -# -# build AssetHubs (polkadot-parachain-asset-hub) from branch: -# git checkout -b bko-transfer-asset-via-bridge-pallet-xcm-ro-wo --track origin/bko-transfer-asset-via-bridge-pallet-xcm-ro-wo -# cargo build --release --locked --bin polkadot-parachain -# cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub ``` -## How to test local Rococo <-> Wococo bridge +## How to test local Rococo <-> Westend bridge -### Run chains (Rococo + BridgeHub + AssetHub, Wococo + BridgeHub + AssetHub) with zombienet +### Run Rococo/Westend chains with zombienet ``` +cd + # Rococo + BridgeHubRococo + AssetHub for Rococo (mirroring Kusama) POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml + ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml ``` ``` -# Wococo + BridgeHubWococo + AssetHub for Wococo (mirroring Polkadot) +cd + +# Westend + BridgeHubWestend + AssetHub for Westend (mirroring Polkadot) POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml +POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ + ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml ``` -### Run relayer (BridgeHubRococo, BridgeHubWococo) +### Init bridge and run relayer between BridgeHubRococo and BridgeHubWestend **Accounts of BridgeHub parachains:** - `Bob` is pallet owner of all bridge pallets -#### Run with script (alternative 1) -``` -cd -./scripts/bridges_rococo_wococo.sh run-relay -``` - -#### Run with binary (alternative 2) -Need to wait for parachain activation (start producing blocks), then run: - -``` -# 1. Init bridges: - -# Rococo -> Wococo -RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-wococo \ - --source-host localhost \ - --source-port 9942 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ - --target-version-mode Auto \ - --target-signer //Bob - -# Wococo -> Rococo -RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge wococo-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 9945 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ - --target-version-mode Auto \ - --target-signer //Bob - -# 2. Relay relay-chain headers, parachain headers and messages** -RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-wococo \ - --rococo-host localhost \ - --rococo-port 9942 \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host localhost \ - --bridge-hub-rococo-port 8943 \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer //Charlie \ - --wococo-headers-to-bridge-hub-rococo-signer //Bob \ - --wococo-parachains-to-bridge-hub-rococo-signer //Bob \ - --bridge-hub-rococo-transactions-mortality 4 \ - --wococo-host localhost \ - --wococo-port 9945 \ - --wococo-version-mode Auto \ - --bridge-hub-wococo-host localhost \ - --bridge-hub-wococo-port 8945 \ - --bridge-hub-wococo-version-mode Auto \ - --bridge-hub-wococo-signer //Charlie \ - --rococo-headers-to-bridge-hub-wococo-signer //Bob \ - --rococo-parachains-to-bridge-hub-wococo-signer //Bob \ - --bridge-hub-wococo-transactions-mortality 4 \ - --lane 00000001 +#### Run with script +``` +cd + +./cumulus/scripts/bridges_rococo_westend.sh run-relay ``` **Check relay-chain headers relaying:** - Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoGrandpa** - Keys: **bestFinalized()** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoGrandpa** - Keys: **bestFinalized()** + **bridgeWestendGrandpa** - Keys: **bestFinalized()** +- Westend parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: + **bridgeRococoGrandpa** - Keys: **bestFinalized()** **Check parachain headers relaying:** - Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoParachain** - Keys: **bestParaHeads()** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoParachain** - Keys: **bestParaHeads()** + **bridgeWestendParachains** - Keys: **parasInfo(None)** +- Westend parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: + **bridgeRococoParachains** - Keys: **parasInfo(None)** -### Send messages - transfer asset over bridge +### Initialize configuration for transfer asset over bridge (ROCs/WNDs) -TODO: see `# !!! READ HERE` above +This initialization does several things: +- creates `ForeignAssets` for wrappedROCs/wrappedWNDs +- drips SA for AssetHubRococo on AssetHubWestend (and vice versa) which holds reserved assets on source chains +``` +cd + +./cumulus/scripts/bridges_rococo_westend.sh init-asset-hub-rococo-local +./cumulus/scripts/bridges_rococo_westend.sh init-bridge-hub-rococo-local +./cumulus/scripts/bridges_rococo_westend.sh init-asset-hub-westend-local +./cumulus/scripts/bridges_rococo_westend.sh init-bridge-hub-westend-local +``` + +### Send messages - transfer asset over bridge (ROCs/WNDs) + +Do reserve-backed transfers: +``` +cd -## How to test live BridgeHubRococo/BridgeHubWococo -(here is still deployed older PoC from branch `origin/bko-transfer-asset-via-bridge`, which uses custom extrinsic, which -is going to be replaced by `pallet_xcm` usage) -- uses account seed on Live Rococo:Rockmine2 - ``` - cd - ./scripts/bridges_rococo_wococo.sh transfer-asset-from-asset-hub-rococo - ``` - -- open explorers: - Rockmine2 (see events `xcmpQueue.XcmpMessageSent`, `bridgeTransfer.ReserveAssetsDeposited`, - `bridgeTransfer.TransferInitiated`) - https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fws-rococo-rockmine2-collator-node-0.parity-testnet.parity.io#/explorer - - BridgeHubRococo (see `bridgeWococoMessages.MessageAccepted`) - https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Frococo-bridge-hub-rpc.polkadot.io#/explorer - BridgeHubWococo (see - `bridgeRococoMessages.MessagesReceived`) - https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fwococo-bridge-hub-rpc.polkadot.io#/explorer - Wockmint (see - `xcmpQueue.Success` for `transfer-asset` and `xcmpQueue.Fail` for `ping-via-bridge`) - https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fwococo-wockmint-rpc.polkadot.io#/explorer - BridgeHubRococo (see - `bridgeWococoMessages.MessagesDelivered`) +# ROCs from Rococo's Asset Hub to Westend's. +./cumulus/scripts/bridges_rococo_westend.sh reserve-transfer-assets-from-asset-hub-rococo-local +``` +``` +cd + +# WNDs from Westend's Asset Hub to Rococo's. +./cumulus/scripts/bridges_rococo_westend.sh reserve-transfer-assets-from-asset-hub-westend-local +``` + +- open explorers: (see zombienets) + - AssetHubRococo (see events `xcmpQueue.XcmpMessageSent`, `polkadotXcm.Attempted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer + - BridgeHubRococo (see `bridgeWestendMessages.MessageAccepted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer + - BridgeHubWestend (see `bridgeRococoMessages.MessagesReceived`, `xcmpQueue.XcmpMessageSent`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer + - AssetHubWestend (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer + - BridgeHubRocococ (see `bridgeWestendMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer + +Do reserve withdraw transfers: (when previous is finished) +``` +cd + +# wrappedWNDs from Rococo's Asset Hub to Westend's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-rococo-local +``` +``` +cd + +# wrappedROCs from Westend's Asset Hub to Rococo's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-westend-local +``` + +### Claim relayer's rewards on BridgeHubRococo and BridgeHubWestend + +**Accounts of BridgeHub parachains:** +- `//Charlie` is relayer account on BridgeHubRococo +- `//Charlie` is relayer account on BridgeHubWestend + +``` +cd + +# Claim rewards on BridgeHubWestend: +./cumulus/scripts/bridges_rococo_westend.sh claim-rewards-bridge-hub-rococo-local + +# Claim rewards on BridgeHubWestend: +./cumulus/scripts/bridges_rococo_westend.sh claim-rewards-bridge-hub-westend-local +``` +- open explorers: (see zombienets) + - BridgeHubRococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer + - BridgeHubWestend (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer ## How to test local BridgeHubKusama/BridgeHubPolkadot diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml index bfb0b9e712709f01da525c9b2fcf71d33bf35d38..2cd002b1c6013fe470c9be756cdfff34f01a0a45 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Kusama's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -12,7 +13,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" @@ -60,6 +61,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} @@ -68,7 +70,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [dev-dependencies] @@ -98,6 +100,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", "pallet-timestamp/std", @@ -134,9 +137,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -144,6 +149,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -171,6 +177,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 791751e7736851e46c26fb3056bafaed36389baf..d2db0340790e4b6792e758f5d7bba8941d644b9c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -26,6 +26,7 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ @@ -40,12 +41,15 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -56,9 +60,7 @@ use frame_system::{ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{ - FellowshipLocation, GovernanceLocation, XcmConfig, XcmOriginToTransactDispatchOrigin, -}; +use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -70,13 +72,13 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use parachains_common::{ impls::DealWithFees, kusama::{consensus::*, currency::*, fee::WeightToFee}, + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; // XCM Imports use xcm::latest::prelude::BodyId; -use xcm_executor::XcmExecutor; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -107,7 +109,10 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -247,6 +252,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -273,11 +279,12 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -292,6 +299,32 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { @@ -307,20 +340,25 @@ pub type RootOrFellows = EitherOfDiverse< impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } pub const PERIOD: u32 = 6 * HOURS; @@ -430,6 +468,7 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -437,23 +476,21 @@ construct_runtime!( } ); -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -633,6 +670,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -668,12 +706,47 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::KsmRelayLocation; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + xcm_config::KsmRelayLocation::get(), + ExistentialDeposit::get() + ).into()); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { Ok(KsmRelayLocation::get()) } @@ -714,6 +787,7 @@ impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..a30a2ae8d4d0b50cc5ef9e17e1028e457611f31e --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-kusama-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_686_000 picoseconds. + Weight::from_parts(1_761_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 28_250 + .saturating_add(Weight::from_parts(24_261_433, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs index 991cba573bf2a64bf917f5c50a246354c7f82355..ffd311ceecdce25e25c39eddd13aad73e60e3ae4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// bridge-hub-kusama-dev +// --output +// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_129_000 picoseconds. - Weight::from_parts(5_367_000, 0) + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_050_000 picoseconds. - Weight::from_parts(5_565_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 60_000_000 picoseconds. + Weight::from_parts(63_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs index 5a0a60cc9952dc8f7eb98458a848b24b9c166f3a..6b9313cdababf085c24f22e79b63acc86534bfbb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs index e226021e77ab58d51bda2eaa23ff732d53e7f4cb..36733d6d4a6e8f09cd21bd0f14ae8bde53a720c1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs @@ -18,11 +18,14 @@ //! Expose the auto generated weight files. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs index 51ca2e660b3b578a5d23e38eb215e2b4a5c5debc..04ceb5bed756964dd5932cf649f2b016e06326a5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs index fa0ac199ca2c06d07a757e191d9a00e9e1f9cb7e..72d8ba4045a95ec6aba6d16670dfb3310c2114ff 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -124,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5a4235055d120f85eec6279a963b8e39c009c62 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-kusama-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_692_000 picoseconds. + Weight::from_parts(11_692_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 10_614_000 picoseconds. + Weight::from_parts(10_614_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 7_085_000 picoseconds. + Weight::from_parts(7_085_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_813_000 picoseconds. + Weight::from_parts(5_813_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_090_000 picoseconds. + Weight::from_parts(6_090_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_905_000 picoseconds. + Weight::from_parts(58_905_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 6_501_000 picoseconds. + Weight::from_parts(6_501_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 39_695_000 picoseconds. + Weight::from_parts(39_695_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 50_543_000 picoseconds. + Weight::from_parts(50_543_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 69_294_000 picoseconds. + Weight::from_parts(69_294_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs index 96b2d859ed864d8a368bc258038890825c8b4d36..f4135e975fbed00ea5dfd0628138b4cf7bd9c7ca 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs index cc1b4aeb0ddb6718b57ebfd65c985fc4071867b1..f508e1daaef02ac7fe2d2e022e0507960fc797ee 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs index 32f6e4a6b43598cf3b3b6a6df7a571bd38f44697..6162b1d48c5fe60e4501d40b95fcaa5e37ae9be2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs index 15b06676cd364efcd9d80361f7f7437cab032efe..93d0ea596e721127fe459d6a59105ebbb5d19a7c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs index 71bc583077100ecb9edfbca039a9c7b539ff6d66..7f4c2026f2bd8ce762dbd54ba94f55ded5b38de1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_523_000 picoseconds. - Weight::from_parts(28_238_000, 0) + // Minimum execution time: 22_520_000 picoseconds. + Weight::from_parts(23_167_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -76,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_139_000 picoseconds. - Weight::from_parts(24_806_000, 0) + // Minimum execution time: 19_639_000 picoseconds. + Weight::from_parts(20_230_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_988_000 picoseconds. - Weight::from_parts(9_227_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_496_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_571_000 picoseconds. - Weight::from_parts(2_667_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_359_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 33_194_000 picoseconds. - Weight::from_parts(34_089_000, 0) + // Minimum execution time: 27_229_000 picoseconds. + Weight::from_parts(27_673_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -167,10 +171,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 35_413_000 picoseconds. - Weight::from_parts(36_359_000, 0) + // Minimum execution time: 29_812_000 picoseconds. + Weight::from_parts(30_649_000, 0) .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_679_000 picoseconds. - Weight::from_parts(2_823_000, 0) + // Minimum execution time: 2_212_000 picoseconds. + Weight::from_parts(2_367_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_117_000 picoseconds. - Weight::from_parts(15_603_000, 0) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_036_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_978_000 picoseconds. - Weight::from_parts(15_370_000, 0) + // Minimum execution time: 14_662_000 picoseconds. + Weight::from_parts(15_155_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_549_000 picoseconds. - Weight::from_parts(16_944_000, 0) + // Minimum execution time: 16_198_000 picoseconds. + Weight::from_parts(16_456_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -235,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 30_111_000 picoseconds. - Weight::from_parts(30_795_000, 0) + // Minimum execution time: 25_825_000 picoseconds. + Weight::from_parts(26_744_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -248,7 +254,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `136` // Estimated: `8551` // Minimum execution time: 8_622_000 picoseconds. - Weight::from_parts(8_865_000, 0) + Weight::from_parts(8_931_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_194_000 picoseconds. - Weight::from_parts(15_646_000, 0) + // Minimum execution time: 15_397_000 picoseconds. + Weight::from_parts(15_650_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -280,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 36_625_000 picoseconds. - Weight::from_parts(37_571_000, 0) + // Minimum execution time: 32_330_000 picoseconds. + Weight::from_parts(33_255_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs index 0e740922f339de717f04f0881c165786c36ba47a..71732961d3de12bde9058665c4937426b4ed9f88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -61,16 +60,8 @@ impl XcmWeightInfo for BridgeHubKusamaXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -127,10 +118,7 @@ impl XcmWeightInfo for BridgeHubKusamaXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 6c8c7ab66bbdb8e149a04e56b59f9a4fe7a75722..ff3cb452a8a4fad0ea7996080e159ae1327df538 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=bridge-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 24_064_000 picoseconds. - Weight::from_parts(24_751_000, 3593) + // Minimum execution time: 25_447_000 picoseconds. + Weight::from_parts(25_810_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 51_097_000 picoseconds. - Weight::from_parts(51_960_000, 6196) + // Minimum execution time: 53_908_000 picoseconds. + Weight::from_parts(54_568_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +87,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 75_319_000 picoseconds. - Weight::from_parts(77_356_000, 6196) + // Minimum execution time: 79_923_000 picoseconds. + Weight::from_parts(80_790_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -101,8 +98,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -120,8 +117,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_392_000 picoseconds. - Weight::from_parts(29_943_000, 3535) + // Minimum execution time: 31_923_000 picoseconds. + Weight::from_parts(32_499_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -129,8 +126,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_637_000 picoseconds. - Weight::from_parts(3_720_000, 0) + // Minimum execution time: 3_903_000 picoseconds. + Weight::from_parts(4_065_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 25_045_000 picoseconds. - Weight::from_parts(25_546_000, 3593) + // Minimum execution time: 26_987_000 picoseconds. + Weight::from_parts(27_486_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -161,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 51_450_000 picoseconds. - Weight::from_parts(52_354_000, 3593) + // Minimum execution time: 56_012_000 picoseconds. + Weight::from_parts(58_067_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_711_000 picoseconds. - Weight::from_parts(30_759_000, 3535) + // Minimum execution time: 32_350_000 picoseconds. + Weight::from_parts(33_403_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index b1e8107b30bbddb9e30f0bf3c648627993fa4448..c5c14e6917eb244d91a5f655880199b523046a11 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs index 696462be9c45c31a8d4a6d9491ed1c9475fda00d..b3703eee8301d37299553751804a14814313beba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs @@ -16,7 +16,8 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, + Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, + CENTS, }; use frame_support::{ match_types, parameter_types, @@ -24,8 +25,9 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteNativeAssetFrom}; +use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -147,7 +149,7 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) + RuntimeCall::MessageQueue(..) ) } } @@ -162,7 +164,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. @@ -177,6 +179,10 @@ pub type Barrier = TrailingSetTopicAsId< >, >; +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - KSM with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -186,8 +192,7 @@ impl xcm_executor::Config for XcmConfig { // BridgeHub does not recognize a reserve location for any asset. Users must teleport KSM // where allowed (e.g. with the Relay Chain). type IsReserve = (); - /// Only allow teleportation of KSM. - type IsTeleporter = ConcreteNativeAssetFrom; + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -217,20 +222,25 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(KsmRelayLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -259,8 +269,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs index 893524e12f66230f5de2a48966344b8b93f08489..36d8f0846af28020d217a82440f5cb8bc24b0e9a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml index eb0c18f5b460b0d15cb96890e219e9fe6187d8f9..3847a352e0782bda61c26ba6207677ae85e2228d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Polkadot's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -12,7 +13,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" @@ -27,6 +28,7 @@ frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", defau pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} @@ -68,7 +70,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [dev-dependencies] @@ -98,6 +100,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", "pallet-timestamp/std", @@ -134,9 +137,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -144,6 +149,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -171,6 +177,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 928b9d091ec5bf85a69582794dd23e933637eccf..02f05a8bb8770530ce85d94166928683ba9b58df 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -26,6 +26,7 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ @@ -40,12 +41,15 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -56,9 +60,7 @@ use frame_system::{ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{ - FellowshipLocation, GovernanceLocation, XcmConfig, XcmOriginToTransactDispatchOrigin, -}; +use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -70,13 +72,13 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use parachains_common::{ impls::DealWithFees, + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, polkadot::{consensus::*, currency::*, fee::WeightToFee}, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; // XCM Imports use xcm::latest::prelude::BodyId; -use xcm_executor::XcmExecutor; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -107,7 +109,10 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -247,6 +252,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -273,11 +279,12 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -290,6 +297,32 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; } +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl parachain_info::Config for Runtime {} impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -307,20 +340,25 @@ pub type RootOrFellows = EitherOfDiverse< impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } pub const PERIOD: u32 = 6 * HOURS; @@ -430,6 +468,7 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -437,23 +476,22 @@ construct_runtime!( } ); -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -633,6 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -668,12 +707,47 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::DotRelayLocation; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + xcm_config::DotRelayLocation::get(), + ExistentialDeposit::get() + ).into()); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { Ok(DotRelayLocation::get()) } @@ -714,6 +788,7 @@ impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b0cface1466a7833e3a57cbbb21efb2714e4591 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-polkadot-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_686_000 picoseconds. + Weight::from_parts(1_729_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 19_565 + .saturating_add(Weight::from_parts(24_482_828, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs index 98834cc44e880e432c13c4b1b3d8a5dfe3cbf624..ac6ad093faf043b9825ef4b0dd241e4d115def07 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// bridge-hub-polkadot-dev +// --output +// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_043_000 picoseconds. - Weight::from_parts(5_211_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_011_000 picoseconds. - Weight::from_parts(5_171_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 61_000_000 picoseconds. + Weight::from_parts(64_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs index 4aeb4660d8786b81100e26f1309dc2be8898e874..8676be67b2f57529c091cb497b77932d9bb21c04 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs index e226021e77ab58d51bda2eaa23ff732d53e7f4cb..36733d6d4a6e8f09cd21bd0f14ae8bde53a720c1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs @@ -18,11 +18,14 @@ //! Expose the auto generated weight files. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs index 5abe64bb411750f714e65038a2c750932240f2f7..b95ea83585f9963b15d5ba122418f4eca4ba646b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs index e0f4156fe4d4c4d3f31229bdaa6308456a5cabb7..f7c78f7db82a0bcf55db5fbfb261e707a084cd9b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -124,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..38cc21cfad950169b062476a23b2dc1ee60bb237 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-polkadot-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 38_974_000 picoseconds. + Weight::from_parts(38_974_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_194_000 picoseconds. + Weight::from_parts(11_194_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 5_196_000 picoseconds. + Weight::from_parts(5_196_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_408_000 picoseconds. + Weight::from_parts(6_408_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_354_000 picoseconds. + Weight::from_parts(6_354_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 63_855_000 picoseconds. + Weight::from_parts(63_855_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 6_764_000 picoseconds. + Weight::from_parts(6_764_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 40_293_000 picoseconds. + Weight::from_parts(40_293_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 50_903_000 picoseconds. + Weight::from_parts(50_903_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 96_657_000 picoseconds. + Weight::from_parts(96_657_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs index 4625c4f474ea2f807652e549898a0f38be510554..44f3da351f65fe43c04e29e45a69cb17221eba6b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs index 29bc576ebc80d13e02f52837a5eae57436f278ff..86ecc787e97c1c86d02c9478cf947d1af7a1a1a3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs index 8252834cc11e3cebb131332a92ffc80d25921dd4..a0984d72aaca375e798ee4545b3c261b8596f223 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs index 5205e9fff8593dda91a9f4b1433c1c5ff4fb3e55..2f04094b34787105b3ea1df10d7ce697198be792 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs index ffc5fa2fc23ed2c4cb5a0624866dc800ab5ae1e3..b73c009cbda09645e222a339a7f187237f9f7eb3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_510_000 picoseconds. - Weight::from_parts(25_755_000, 0) + // Minimum execution time: 22_442_000 picoseconds. + Weight::from_parts(23_346_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -76,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_125_000 picoseconds. - Weight::from_parts(25_559_000, 0) + // Minimum execution time: 19_655_000 picoseconds. + Weight::from_parts(20_086_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_625_000 picoseconds. - Weight::from_parts(9_232_000, 0) + // Minimum execution time: 6_858_000 picoseconds. + Weight::from_parts(7_225_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_690_000 picoseconds. - Weight::from_parts(2_906_000, 0) + // Minimum execution time: 2_099_000 picoseconds. + Weight::from_parts(2_190_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_131_000 picoseconds. - Weight::from_parts(31_138_000, 0) + // Minimum execution time: 27_073_000 picoseconds. + Weight::from_parts(27_584_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_411_000 picoseconds. - Weight::from_parts(33_009_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 29_949_000 picoseconds. + Weight::from_parts(30_760_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_548_000 picoseconds. - Weight::from_parts(2_727_000, 0) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(2_276_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_298_000 picoseconds. - Weight::from_parts(15_964_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_131_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_927_000 picoseconds. - Weight::from_parts(15_528_000, 0) + // Minimum execution time: 14_523_000 picoseconds. + Weight::from_parts(15_113_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_409_000 picoseconds. - Weight::from_parts(16_960_000, 0) + // Minimum execution time: 15_989_000 picoseconds. + Weight::from_parts(16_518_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -235,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_204_000 picoseconds. - Weight::from_parts(28_641_000, 0) + // Minimum execution time: 25_127_000 picoseconds. + Weight::from_parts(25_773_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +253,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `8551` - // Minimum execution time: 8_576_000 picoseconds. - Weight::from_parts(8_895_000, 0) + // Minimum execution time: 8_352_000 picoseconds. + Weight::from_parts(8_592_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_726_000, 0) + // Minimum execution time: 14_658_000 picoseconds. + Weight::from_parts(15_345_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -280,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 34_186_000 picoseconds. - Weight::from_parts(35_204_000, 0) + // Minimum execution time: 31_478_000 picoseconds. + Weight::from_parts(32_669_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_066_000 picoseconds. + Weight::from_parts(4_267_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_260_000 picoseconds. + Weight::from_parts(25_570_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs index 4f8c2dec7a8c890f1b4cd9ccd2cf24e92cea3817..33a48f368122167c716155c21a3312d44c5e2dc7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -61,16 +60,8 @@ impl XcmWeightInfo for BridgeHubPolkadotXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -127,10 +118,7 @@ impl XcmWeightInfo for BridgeHubPolkadotXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, @@ -154,10 +142,7 @@ impl XcmWeightInfo for BridgeHubPolkadotXcmWeight { _dest: &MultiLocation, _xcm: &Xcm<()>, ) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(200_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) } fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { XcmGeneric::::report_holding() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 7c525dca051d277497e9c64395bd69d6fa5eabc2..814c416bd4c0cbf84e756392650a3a8432470428 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,44 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=bridge-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 23_862_000 picoseconds. - Weight::from_parts(24_603_000, 3593) + // Minimum execution time: 24_237_000 picoseconds. + Weight::from_parts(24_697_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 51_101_000 picoseconds. - Weight::from_parts(51_976_000, 6196) + // Minimum execution time: 52_269_000 picoseconds. + Weight::from_parts(53_848_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +87,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 72_983_000 picoseconds. - Weight::from_parts(74_099_000, 6196) + // Minimum execution time: 77_611_000 picoseconds. + Weight::from_parts(82_634_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -101,8 +98,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -120,8 +117,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 27_131_000 picoseconds. - Weight::from_parts(28_062_000, 3535) + // Minimum execution time: 29_506_000 picoseconds. + Weight::from_parts(30_269_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -129,8 +126,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_564_000 picoseconds. - Weight::from_parts(3_738_000, 0) + // Minimum execution time: 3_541_000 picoseconds. + Weight::from_parts(3_629_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 24_453_000 picoseconds. - Weight::from_parts(25_216_000, 3593) + // Minimum execution time: 25_651_000 picoseconds. + Weight::from_parts(26_078_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -161,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 48_913_000 picoseconds. - Weight::from_parts(50_202_000, 3593) + // Minimum execution time: 52_050_000 picoseconds. + Weight::from_parts(53_293_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 27_592_000 picoseconds. - Weight::from_parts(28_099_000, 3535) + // Minimum execution time: 30_009_000 picoseconds. + Weight::from_parts(30_540_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 7968649d143c1ca54d05285d0958379a2124fe7a..9a039a6d63b26c5d5d615980b40a0a99a2058834 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs index 0965600c246810d7566bc9de08b60a787d175352..61eee1c4c5a7e5ff000b03e36ef2b7bc305d91bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs @@ -16,7 +16,8 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, + Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, + CENTS, }; use frame_support::{ match_types, parameter_types, @@ -24,8 +25,9 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteNativeAssetFrom}; +use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -150,7 +152,7 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) + RuntimeCall::MessageQueue(..) ) } } @@ -165,7 +167,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality @@ -181,6 +183,10 @@ pub type Barrier = TrailingSetTopicAsId< >, >; +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - DOT with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -190,8 +196,7 @@ impl xcm_executor::Config for XcmConfig { // BridgeHub does not recognize a reserve location for any asset. Users must teleport DOT // where allowed (e.g. with the Relay Chain). type IsReserve = (); - /// Only allow teleportation of DOT. - type IsTeleporter = ConcreteNativeAssetFrom; + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -221,20 +226,25 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(DotRelayLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -263,8 +273,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs index 0be87bd46facfc079cda4a1ea4cf6b7a34114d75..3156a5fe68e52758d9432d0f19cb301a7fcd8c3c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index fec00fe0794c5202900da8570b2c174b16769f39..c475768d5dde598b6f691b9948f8f6bc649a11e7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -12,7 +13,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" @@ -28,6 +29,7 @@ pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} @@ -65,16 +67,18 @@ cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-fea cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } # Bridges +bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -82,7 +86,7 @@ bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", d bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-wococo = { path = "../../../../../bridges/primitives/chain-wococo", default-features = false } +bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } @@ -98,8 +102,10 @@ sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] default = [ "std" ] std = [ + "bp-asset-hub-rococo/std", + "bp-asset-hub-westend/std", "bp-bridge-hub-rococo/std", - "bp-bridge-hub-wococo/std", + "bp-bridge-hub-westend/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", @@ -107,7 +113,7 @@ std = [ "bp-relayers/std", "bp-rococo/std", "bp-runtime/std", - "bp-wococo/std", + "bp-westend/std", "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", @@ -134,6 +140,7 @@ std = [ "pallet-bridge-parachains/std", "pallet-bridge-relayers/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", "pallet-timestamp/std", @@ -172,9 +179,11 @@ std = [ runtime-benchmarks = [ "bridge-runtime-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -186,6 +195,7 @@ runtime-benchmarks = [ "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -217,6 +227,7 @@ try-runtime = [ "pallet-bridge-parachains/try-runtime", "pallet-bridge-relayers/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", @@ -229,3 +240,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..8153e52beacc359705542a0b3e03952c975c82e0 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Bridge definitions that can be used by multiple BridgeHub flavors. +//! All configurations here should be dedicated to a single chain; in other words, we don't need two +//! chains for a single pallet configuration. +//! +//! For example, the messaging pallet needs to know the sending and receiving chains, but the +//! GRANDPA tracking pallet only needs to be aware of one chain. + +use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; +use bp_parachains::SingleParaStoredHeaderDataBuilder; +use frame_support::{parameter_types, traits::ConstU32}; + +parameter_types! { + pub const RelayChainHeadersToKeep: u32 = 1024; + pub const ParachainHeadsToKeep: u32 = 64; + + pub const WestendBridgeParachainPalletName: &'static str = "Paras"; + pub const MaxWestendParaHeadDataSize: u32 = bp_westend::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; + + pub storage RequiredStakeForStakeAndSlash: Balance = 1_000_000; + pub const RelayerStakeLease: u32 = 8; + pub const RelayerStakeReserveId: [u8; 8] = *b"brdgrlrs"; + + pub storage DeliveryRewardInBalance: u64 = 1_000_000; +} + +/// Add GRANDPA bridge pallet to track Westend relay chain. +pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; +impl pallet_bridge_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type BridgedChain = bp_westend::Westend; + type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type HeadersToKeep = RelayChainHeadersToKeep; + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; +} + +/// Add parachain bridge pallet to track Westend BridgeHub parachain +pub type BridgeParachainWestendInstance = pallet_bridge_parachains::Instance3; +impl pallet_bridge_parachains::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_bridge_parachains::WeightInfo; + type BridgesGrandpaPalletInstance = BridgeGrandpaWestendInstance; + type ParasPalletName = WestendBridgeParachainPalletName; + type ParaStoredHeaderDataBuilder = + SingleParaStoredHeaderDataBuilder; + type HeadsToKeep = ParachainHeadsToKeep; + type MaxParaHeadDataSize = MaxWestendParaHeadDataSize; +} + +/// Allows collect and claim rewards for relayers +impl pallet_bridge_relayers::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Reward = Balance; + type PaymentProcedure = + bp_relayers::PayRewardFromAccount, AccountId>; + type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< + AccountId, + BlockNumber, + Balances, + RelayerStakeReserveId, + RequiredStakeForStakeAndSlash, + RelayerStakeLease, + >; + type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs deleted file mode 100644 index f59c9e238f5f276e233d357a66a9de87bb6c8fbc..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions that are used on Rococo to bridge with Wococo. - -use crate::{ - BridgeParachainWococoInstance, BridgeWococoMessages, ParachainInfo, Runtime, - WithBridgeHubWococoMessagesInstance, XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{SenderAndLane, XcmBlobHauler, XcmBlobHaulerAdapter}, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pub BridgeWococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); - pub WococoGlobalConsensusNetwork: NetworkId = NetworkId::Wococo; - pub ActiveOutboundLanesToBridgeHubWococo: &'static [bp_messages::LaneId] = &[DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO]; - pub PriorityBoostPerMessage: u64 = 921_900_294; - - pub FromAssetHubRococoToAssetHubWococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(1000))).into(), - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO, - ); -} - -/// Proof of messages, coming from Wococo. -pub type FromWococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for Rococo Bridge Hub -> Wococo Bridge Hub messages. -pub type ToWococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -pub type OnBridgeHubRococoBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubRococoUniversalLocation, - BridgeWococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the otherside -pub type ToBridgeHubWococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - WococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubWococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubWococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubWococoMessagesInstance; - type SenderAndLane = FromAssetHubRococoToAssetHubWococoRoute; - - type ToSourceChainSender = crate::XcmRouter; - type CongestedMessage = (); - type UncongestedMessage = (); -} -pub const DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO: LaneId = LaneId([0, 0, 0, 1]); - -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWococo -pub struct WithBridgeHubWococoMessageBridge; -impl MessageBridge for WithBridgeHubWococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWococoInstance, - bp_bridge_hub_wococo::BridgeHubWococo, - >; -} - -/// Message verifier for BridgeHubWococo messages sent from BridgeHubRococo -pub type ToBridgeHubWococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWococo messages. -pub type ToBridgeHubWococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubWococo {} - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = crate::RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Wococo parachain. -pub type BridgeRefundBridgeHubWococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane, - ActualFeeRefund, - PriorityBoostPerMessage, - StrBridgeRefundBridgeHubWococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(BridgeRefundBridgeHubWococoMessages); - -parameter_types! { - pub const BridgeHubWococoMessagesLane: bp_messages::LaneId = DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::BridgeGrandpaWococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - - #[test] - fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_rococo::BridgeHubRococo, - Runtime, - WithBridgeHubWococoMessagesInstance, - >( - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubWococoMessagesInstance, - bridge: WithBridgeHubWococoMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_wococo::Wococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_rococo::BlockLength::get(), - block_weights: bp_bridge_hub_rococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_wococo::WITH_WOCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - }, - }); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs deleted file mode 100644 index a0b16bace51d0c480cc4569f2d2ab8074150af16..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions that are used on Wococo to bridge with Rococo. - -use crate::{ - BridgeParachainRococoInstance, BridgeRococoMessages, ParachainInfo, Runtime, - WithBridgeHubRococoMessagesInstance, XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{SenderAndLane, XcmBlobHauler, XcmBlobHaulerAdapter}, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pub BridgeHubWococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(ParachainInfo::parachain_id().into())); - pub BridgeRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; - pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO]; - pub PriorityBoostPerMessage: u64 = 921_900_294; - - pub FromAssetHubWococoToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(1000))).into(), - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO, - ); -} - -/// Proof of messages, coming from Rococo. -pub type FromRococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for Rococo Bridge Hub -> Wococo Bridge Hub messages. -pub type ToRococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -pub type OnBridgeHubWococoBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubWococoUniversalLocation, - BridgeRococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the otherside -pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - RococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubRococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubRococoMessagesInstance; - type SenderAndLane = FromAssetHubWococoToAssetHubRococoRoute; - - type ToSourceChainSender = crate::XcmRouter; - type CongestedMessage = (); - type UncongestedMessage = (); -} -pub const DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 1]); - -/// Messaging Bridge configuration for BridgeHubWococo -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWococo; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Message verifier for BridgeHubRococo messages sent from BridgeHubWococo -pub type ToBridgeHubRococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubWococo -> BridgeHubRococo messages. -pub type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl ThisChainWithMessages for BridgeHubWococo { - type RuntimeOrigin = crate::RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type BridgeRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane, - ActualFeeRefund, - PriorityBoostPerMessage, - StrBridgeRefundBridgeHubRococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(BridgeRefundBridgeHubRococoMessages); - -parameter_types! { - pub const BridgeHubRococoMessagesLane: bp_messages::LaneId = DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::BridgeGrandpaRococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - - #[test] - fn ensure_bridge_hub_wococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_wococo::BridgeHubWococo, - Runtime, - WithBridgeHubRococoMessagesInstance, - >( - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_wococo::Wococo, - bridged_chain: bp_rococo::Rococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_wococo::BlockLength::get(), - block_weights: bp_bridge_hub_wococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, - }); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..f3c1c9597b52194aa07bec1bda79664868b1a993 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -0,0 +1,322 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Bridge definitions used on BridgeHub with the Rococo flavor for bridging to BridgeHubWestend. + +use crate::{ + bridge_common_config::{BridgeParachainWestendInstance, DeliveryRewardInBalance}, + weights, AccountId, BridgeWestendMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, + XcmRouter, +}; +use bp_messages::LaneId; +use bridge_runtime_common::{ + messages, + messages::{ + source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, + target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, + MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, + }, + messages_xcm_extension::{ + SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, + XcmBlobMessageDispatch, + }, + refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, RefundableParachain, + }, +}; + +use codec::Encode; +use frame_support::{parameter_types, traits::PalletInfoAccess}; +use sp_runtime::RuntimeDebug; +use xcm::{ + latest::prelude::*, + prelude::{InteriorMultiLocation, NetworkId}, +}; +use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; + +parameter_types! { + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = + bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = + bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; + pub const BridgeHubWestendChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WESTEND_CHAIN_ID; + pub BridgeRococoToWestendMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); + pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); + pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::Westend; + pub ActiveOutboundLanesToBridgeHubWestend: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND]; + pub const AssetHubRococoToAssetHubWestendMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND; + // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value + pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; + + pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); + pub AssetHubWestendParaId: cumulus_primitives_core::ParaId = bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID.into(); + + pub FromAssetHubRococoToAssetHubWestendRoute: SenderAndLane = SenderAndLane::new( + ParentThen(X1(Parachain(AssetHubRococoParaId::get().into()))).into(), + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + ); + + pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); + + pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); +} +pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND: LaneId = LaneId([0, 0, 0, 2]); + +fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested, + } + ) + .encode() + .into(), + } + ] +} + +/// Proof of messages, coming from Westend. +pub type FromWestendBridgeHubMessagesProof = + FromBridgedChainMessagesProof; +/// Messages delivery proof for Rococo Bridge Hub -> Westend Bridge Hub messages. +pub type ToWestendBridgeHubMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof; + +/// Dispatches received XCM messages from other bridge +type FromWestendMessageBlobDispatcher = BridgeBlobDispatcher< + XcmRouter, + BridgeHubRococoUniversalLocation, + BridgeRococoToWestendMessagesPalletInstance, +>; + +/// Export XCM messages to be relayed to the other side +pub type ToBridgeHubWestendHaulBlobExporter = HaulBlobExporter< + XcmBlobHaulerAdapter, + WestendGlobalConsensusNetwork, + (), +>; +pub struct ToBridgeHubWestendXcmBlobHauler; +impl XcmBlobHauler for ToBridgeHubWestendXcmBlobHauler { + type Runtime = Runtime; + type MessagesInstance = WithBridgeHubWestendMessagesInstance; + type SenderAndLane = FromAssetHubRococoToAssetHubWestendRoute; + + type ToSourceChainSender = XcmRouter; + type CongestedMessage = CongestedMessage; + type UncongestedMessage = UncongestedMessage; +} + +/// On messages delivered callback. +type OnMessagesDeliveredFromWestend = XcmBlobHaulerAdapter; + +/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWestend +pub struct WithBridgeHubWestendMessageBridge; +impl MessageBridge for WithBridgeHubWestendMessageBridge { + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = + bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; + type ThisChain = BridgeHubRococo; + type BridgedChain = BridgeHubWestend; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >; +} + +/// Message verifier for BridgeHubWestend messages sent from BridgeHubRococo +pub type ToBridgeHubWestendMessageVerifier = + messages::source::FromThisChainMessageVerifier; + +/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWestend messages. +pub type ToBridgeHubWestendMaximalOutboundPayloadSize = + messages::source::FromThisChainMaximalOutboundPayloadSize; + +/// BridgeHubWestend chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct BridgeHubWestend; + +impl UnderlyingChainProvider for BridgeHubWestend { + type Chain = bp_bridge_hub_westend::BridgeHubWestend; +} + +impl messages::BridgedChainWithMessages for BridgeHubWestend {} + +/// BridgeHubRococo chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct BridgeHubRococo; + +impl UnderlyingChainProvider for BridgeHubRococo { + type Chain = bp_bridge_hub_rococo::BridgeHubRococo; +} + +impl ThisChainWithMessages for BridgeHubRococo { + type RuntimeOrigin = RuntimeOrigin; +} + +/// Signed extension that refunds relayers that are delivering messages from the Westend parachain. +pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< + RefundBridgedParachainMessages< + Runtime, + RefundableParachain< + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >, + RefundableMessagesLane< + WithBridgeHubWestendMessagesInstance, + AssetHubRococoToAssetHubWestendMessagesLane, + >, + ActualFeeRefund, + PriorityBoostPerMessage, + StrOnBridgeHubRococoRefundBridgeHubWestendMessages, + >, +>; +bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWestendMessages); + +/// Add XCM messages support for BridgeHubRococo to support Rococo->Westend XCM messages +pub type WithBridgeHubWestendMessagesInstance = pallet_bridge_messages::Instance3; +impl pallet_bridge_messages::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_bridge_messages::WeightInfo; + type BridgedChainId = BridgeHubWestendChainId; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWestend; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type MaximalOutboundPayloadSize = ToBridgeHubWestendMaximalOutboundPayloadSize; + type OutboundPayload = XcmAsPlainPayload; + + type InboundPayload = XcmAsPlainPayload; + type InboundRelayer = AccountId; + type DeliveryPayments = (); + + type TargetHeaderChain = TargetHeaderChainAdapter; + type LaneMessageVerifier = ToBridgeHubWestendMessageVerifier; + type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< + Runtime, + WithBridgeHubWestendMessagesInstance, + DeliveryRewardInBalance, + >; + + type SourceHeaderChain = SourceHeaderChainAdapter; + type MessageDispatch = XcmBlobMessageDispatch< + FromWestendMessageBlobDispatcher, + Self::WeightInfo, + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< + AssetHubRococoParaId, + Runtime, + >, + >; + type OnMessagesDelivered = OnMessagesDeliveredFromWestend; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::bridge_common_config::BridgeGrandpaWestendInstance; + use bridge_runtime_common::{ + assert_complete_bridge_types, + integrity::{ + assert_complete_bridge_constants, check_message_lane_weights, + AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, + AssertCompleteBridgeConstants, + }, + }; + use parachains_common::{rococo, Balance}; + + /// Every additional message in the message delivery transaction boosts its priority. + /// So the priority of transaction with `N+1` messages is larger than priority of + /// transaction with `N` messages by the `PriorityBoostPerMessage`. + /// + /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. + /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. + /// + /// We want this tip to be large enough (delivery transactions with more messages = less + /// operational costs and a faster bridge), so this value should be significant. + const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + + #[test] + fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { + check_message_lane_weights::< + bp_bridge_hub_rococo::BridgeHubRococo, + Runtime, + WithBridgeHubWestendMessagesInstance, + >( + bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE, + bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + true, + ); + } + + #[test] + fn ensure_bridge_integrity() { + assert_complete_bridge_types!( + runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, + with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, + bridge: WithBridgeHubWestendMessageBridge, + this_chain: bp_rococo::Rococo, + bridged_chain: bp_westend::Westend, + ); + + assert_complete_bridge_constants::< + Runtime, + BridgeGrandpaWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, + >(AssertCompleteBridgeConstants { + this_chain_constants: AssertChainConstants { + block_length: bp_bridge_hub_rococo::BlockLength::get(), + block_weights: bp_bridge_hub_rococo::BlockWeights::get(), + }, + messages_pallet_constants: AssertBridgeMessagesPalletConstants { + max_unrewarded_relayers_in_bridged_confirmation_tx: + bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + max_unconfirmed_messages_in_bridged_confirmation_tx: + bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + bridged_chain_id: bp_runtime::BRIDGE_HUB_WESTEND_CHAIN_ID, + }, + pallet_names: AssertBridgePalletNames { + with_this_chain_messages_pallet_name: + bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, + with_bridged_chain_grandpa_pallet_name: + bp_westend::WITH_WESTEND_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, + }, + }); + + bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + Runtime, + WithBridgeHubWestendMessagesInstance, + PriorityBoostPerMessage, + >(FEE_BOOST_PER_MESSAGE); + + assert_eq!( + BridgeRococoToWestendMessagesPalletInstance::get(), + X1(PalletInstance( + bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX + )) + ); + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index c4e510ee4095e8f125edf9eae33de881fb17e21a..8e138822696e951443d72ee90dcbb901849e9003 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -14,6 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! # Bridge Hub Rococo Runtime +//! +//! This runtime currently supports bridging between: +//! - Rococo <> Westend + #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] @@ -22,8 +27,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod bridge_hub_rococo_config; -pub mod bridge_hub_wococo_config; +pub mod bridge_common_config; +pub mod bridge_to_westend_config; mod weights; pub mod xcm_config; @@ -42,12 +47,13 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -55,42 +61,27 @@ use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, }; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{XcmConfig, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; -use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::HeaderId; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use crate::{ - bridge_hub_rococo_config::{ - BridgeRefundBridgeHubWococoMessages, OnBridgeHubRococoBlobDispatcher, - WithBridgeHubWococoMessageBridge, - }, - bridge_hub_wococo_config::{ - BridgeRefundBridgeHubRococoMessages, OnBridgeHubWococoBlobDispatcher, - WithBridgeHubRococoMessageBridge, - }, - xcm_config::XcmRouter, -}; -use bridge_runtime_common::{ - messages::{source::TargetHeaderChainAdapter, target::SourceHeaderChainAdapter}, - messages_xcm_extension::{XcmAsPlainPayload, XcmBlobMessageDispatch}, -}; use parachains_common::{ impls::DealWithFees, rococo::{consensus::*, currency::*, fee::WeightToFee}, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; -use xcm_executor::XcmExecutor; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -115,7 +106,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, - (BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWococoMessages), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,), ); /// Unchecked extrinsic type as expected by this runtime. @@ -179,7 +170,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_003_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, @@ -297,6 +288,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -323,11 +315,12 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -342,24 +335,69 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::TokenLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } pub const PERIOD: u32 = 6 * HOURS; @@ -435,155 +473,6 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } -// Add bridge pallets (GPA) - -/// Add GRANDPA bridge pallet to track Wococo relay chain on Rococo BridgeHub -pub type BridgeGrandpaWococoInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_wococo::Wococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_bridge_wococo_grandpa::WeightInfo; -} - -/// Add GRANDPA bridge pallet to track Rococo relay chain on Wococo BridgeHub -pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance2; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_bridge_rococo_grandpa::WeightInfo; -} - -parameter_types! { - pub const RelayChainHeadersToKeep: u32 = 1024; - pub const ParachainHeadsToKeep: u32 = 64; - pub const RelayerStakeLease: u32 = 8; - - pub const RococoBridgeParachainPalletName: &'static str = "Paras"; - pub const WococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const MaxWococoParaHeadDataSize: u32 = bp_wococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - - pub storage DeliveryRewardInBalance: u64 = 1_000_000; - pub storage RequiredStakeForStakeAndSlash: Balance = 1_000_000; - - pub const RelayerStakeReserveId: [u8; 8] = *b"brdgrlrs"; -} - -/// Add parachain bridge pallet to track Wococo bridge hub parachain -pub type BridgeParachainWococoInstance = pallet_bridge_parachains::Instance1; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaWococoInstance; - type ParasPalletName = WococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxWococoParaHeadDataSize; -} - -/// Add parachain bridge pallet to track Rococo bridge hub parachain -pub type BridgeParachainRococoInstance = pallet_bridge_parachains::Instance2; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaRococoInstance; - type ParasPalletName = RococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxRococoParaHeadDataSize; -} - -/// Add XCM messages support for BridgeHubRococo to support Rococo->Wococo XCM messages -pub type WithBridgeHubWococoMessagesInstance = pallet_bridge_messages::Instance1; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance::WeightInfo; - type BridgedChainId = bridge_hub_rococo_config::BridgeHubWococoChainId; - type ActiveOutboundLanes = bridge_hub_rococo_config::ActiveOutboundLanesToBridgeHubWococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = - bridge_hub_rococo_config::MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = - bridge_hub_rococo_config::MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = - bridge_hub_rococo_config::ToBridgeHubWococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = bridge_hub_rococo_config::ToBridgeHubWococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubWococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = - XcmBlobMessageDispatch; - type OnMessagesDelivered = (); -} - -/// Add XCM messages support for BridgeHubWococo to support Wococo->Rococo XCM messages -pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance2; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance::WeightInfo; - type BridgedChainId = bridge_hub_wococo_config::BridgeHubRococoChainId; - type ActiveOutboundLanes = bridge_hub_wococo_config::ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = - bridge_hub_wococo_config::MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = - bridge_hub_wococo_config::MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = - bridge_hub_wococo_config::ToBridgeHubRococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = bridge_hub_wococo_config::ToBridgeHubRococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubRococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = - XcmBlobMessageDispatch; - type OnMessagesDelivered = (); -} - -/// Allows collect and claim rewards for relayers -impl pallet_bridge_relayers::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Reward = Balance; - type PaymentProcedure = - bp_relayers::PayRewardFromAccount, AccountId>; - type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< - AccountId, - BlockNumber, - Balances, - RelayerStakeReserveId, - RequiredStakeForStakeAndSlash, - RelayerStakeLease, - >; - type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -609,7 +498,7 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, @@ -617,63 +506,62 @@ construct_runtime!( Utility: pallet_utility::{Pallet, Call, Event} = 40, Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 36, - // Rococo and Wococo Bridge Hubs are sharing the runtime, so this runtime has two sets of - // bridge pallets. Both are deployed at both runtimes, but only one set is actually used - // at particular runtime. + // BridgeHubRococo uses: + // - BridgeWestendGrandpa + // - BridgeWestendParachains + // - BridgeWestendMessages + // - BridgeRelayers - // With-Wococo bridge modules that are active (used) at Rococo Bridge Hub runtime. - BridgeWococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 41, - BridgeWococoParachain: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 42, - BridgeWococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 46, + // GRANDPA bridge modules. + BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 48, - // With-Rococo bridge modules that are active (used) at Wococo Bridge Hub runtime. - BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 43, - BridgeRococoParachain: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 44, - BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 45, + // Parachain bridge modules. + BridgeWestendParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 49, + + // Messaging bridge modules. + BridgeWestendMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 51, BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, + + // Message Queue. Importantly, is registered last so that messages are processed after + // the `on_initialize` hooks of bridging pallets. + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 250, } ); bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, BridgeWococoGrandpa, + BridgeWestendGrandpa, // Parachains - BridgeRococoParachain, BridgeWococoParachain, + BridgeWestendParachains, // Messages - BridgeRococoMessages, BridgeWococoMessages + BridgeWestendMessages } -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] - // Bridge pallets at Rococo - [pallet_bridge_grandpa, BridgeWococoGrandpa] - [pallet_bridge_parachains, BridgeParachainsBench::] - [pallet_bridge_messages, BridgeMessagesBench::] - // Bridge pallets at Wococo - [pallet_bridge_grandpa, BridgeRococoGrandpa] - [pallet_bridge_parachains, BridgeParachainsBench::] - [pallet_bridge_messages, BridgeMessagesBench::] - // Bridge relayer pallets + // Bridge pallets + [pallet_bridge_grandpa, WestendFinality] + [pallet_bridge_parachains, WithinWestend] + [pallet_bridge_messages, RococoToWestend] [pallet_bridge_relayers, BridgeRelayersBench::] ); } @@ -822,58 +710,39 @@ impl_runtime_apis! { } } - impl bp_rococo::RococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoGrandpa::best_finalized() + impl bp_westend::WestendFinalityApi for Runtime { + fn best_finalized() -> Option> { + BridgeWestendGrandpa::best_finalized() } fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeRococoGrandpa::synced_headers_grandpa_info() + ) -> Vec> { + BridgeWestendGrandpa::synced_headers_grandpa_info() } } - impl bp_wococo::WococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeWococoGrandpa::synced_headers_grandpa_info() - } - } - - - impl bp_bridge_hub_rococo::BridgeHubRococoFinalityApi for Runtime { + impl bp_bridge_hub_westend::BridgeHubWestendFinalityApi for Runtime { fn best_finalized() -> Option> { - BridgeRococoParachain::best_parachain_head_id::< - bp_bridge_hub_rococo::BridgeHubRococo + BridgeWestendParachains::best_parachain_head_id::< + bp_bridge_hub_westend::BridgeHubWestend >().unwrap_or(None) } } - impl bp_bridge_hub_wococo::BridgeHubWococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoParachain::best_parachain_head_id::< - bp_bridge_hub_wococo::BridgeHubWococo - >().unwrap_or(None) - } - } - - // This exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::FromBridgeHubWococoInboundLaneApi for Runtime { + // This is exposed by BridgeHubRococo + impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< Runtime, - WithBridgeHubWococoMessagesInstance, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(lane, messages) } } - // This exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::ToBridgeHubWococoOutboundLaneApi for Runtime { + // This is exposed by BridgeHubRococo + impl bp_bridge_hub_westend::ToBridgeHubWestendOutboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, begin: bp_messages::MessageNonce, @@ -881,34 +750,7 @@ impl_runtime_apis! { ) -> Vec { bridge_runtime_common::messages_api::outbound_message_details::< Runtime, - WithBridgeHubWococoMessagesInstance, - >(lane, begin, end) - } - } - - // This is exposed by BridgeHubWococo - impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - WithBridgeHubRococoMessagesInstance, - >(lane, messages) - } - } - - // This is exposed by BridgeHubWococo - impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - WithBridgeHubRococoMessagesInstance, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(lane, begin, end) } } @@ -942,6 +784,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -949,9 +792,11 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - use pallet_bridge_parachains::benchmarking::Pallet as BridgeParachainsBench; - use pallet_bridge_messages::benchmarking::Pallet as BridgeMessagesBench; use pallet_bridge_relayers::benchmarking::Pallet as BridgeRelayersBench; + // Change weight file names. + type WestendFinality = BridgeWestendGrandpa; + type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; + type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -981,20 +826,55 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; - use xcm_config::RelayLocation; + use xcm_config::TokenLocation; + + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; fn valid_destination() -> Result { - Ok(RelayLocation::get()) + Ok(TokenLocation::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // just concrete assets according to relay chain. let assets: Vec = vec![ MultiAsset { - id: Concrete(RelayLocation::get()), + id: Concrete(TokenLocation::get()), fun: Fungible(1_000_000 * UNITS), } ]; @@ -1004,8 +884,8 @@ impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - RelayLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(RelayLocation::get()) }, + TokenLocation::get(), + MultiAsset { fun: Fungible(UNITS), id: Concrete(TokenLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -1020,13 +900,14 @@ impl_runtime_apis! { fn get_multi_asset() -> MultiAsset { MultiAsset { - id: Concrete(RelayLocation::get()), + id: Concrete(TokenLocation::get()), fun: Fungible(UNITS), } } } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { @@ -1042,16 +923,16 @@ impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((RelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((TokenLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result { - Ok(RelayLocation::get()) + Ok(TokenLocation::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = RelayLocation::get(); - let assets: MultiAssets = (Concrete(RelayLocation::get()), 1_000 * UNITS).into(); + let origin = TokenLocation::get(); + let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } @@ -1062,7 +943,7 @@ impl_runtime_apis! { fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Ok((RelayLocation::get(), NetworkId::Wococo, X1(Parachain(100)))) + Ok((TokenLocation::get(), NetworkId::Westend, X1(Parachain(100)))) } fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { @@ -1073,6 +954,10 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + type WestendFinality = BridgeWestendGrandpa; + type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; + type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; + use bridge_runtime_common::messages_benchmarking::{ prepare_message_delivery_proof_from_parachain, prepare_message_proof_from_parachain, @@ -1080,15 +965,14 @@ impl_runtime_apis! { }; use pallet_bridge_messages::benchmarking::{ Config as BridgeMessagesConfig, - Pallet as BridgeMessagesBench, MessageDeliveryProofParams, MessageProofParams, }; - impl BridgeMessagesConfig for Runtime { + impl BridgeMessagesConfig for Runtime { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; + let bench_lane_id = >::bench_lane_id(); + let bridged_chain_id = bp_runtime::BRIDGE_HUB_WESTEND_CHAIN_ID; pallet_bridge_relayers::Pallet::::relayer_reward( relayer, bp_relayers::RewardsAccountParams::new( @@ -1101,67 +985,24 @@ impl_runtime_apis! { fn prepare_message_proof( params: MessageProofParams, - ) -> (bridge_hub_rococo_config::FromWococoBridgeHubMessagesProof, Weight) { + ) -> (bridge_to_westend_config::FromWestendBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); prepare_message_proof_from_parachain::< Runtime, - BridgeGrandpaWococoInstance, - bridge_hub_rococo_config::WithBridgeHubWococoMessageBridge, + bridge_common_config::BridgeGrandpaWestendInstance, + bridge_to_westend_config::WithBridgeHubWestendMessageBridge, >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Rococo), Parachain(42)))) } fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, - ) -> bridge_hub_rococo_config::ToWococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - BridgeGrandpaWococoInstance, - bridge_hub_rococo_config::WithBridgeHubWococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_hub_wococo_config::FromRococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - BridgeGrandpaRococoInstance, - bridge_hub_wococo_config::WithBridgeHubRococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Wococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_hub_wococo_config::ToRococoBridgeHubMessagesDeliveryProof { + ) -> bridge_to_westend_config::ToWestendBridgeHubMessagesDeliveryProof { prepare_message_delivery_proof_from_parachain::< Runtime, - BridgeGrandpaRococoInstance, - bridge_hub_wococo_config::WithBridgeHubRococoMessageBridge, + bridge_common_config::BridgeGrandpaWestendInstance, + bridge_to_westend_config::WithBridgeHubWestendMessageBridge, >(params) } @@ -1172,19 +1013,16 @@ impl_runtime_apis! { } use bridge_runtime_common::parachains_benchmarking::prepare_parachain_heads_proof; - use pallet_bridge_parachains::benchmarking::{ - Config as BridgeParachainsConfig, - Pallet as BridgeParachainsBench, - }; + use pallet_bridge_parachains::benchmarking::Config as BridgeParachainsConfig; use pallet_bridge_relayers::benchmarking::{ Pallet as BridgeRelayersBench, Config as BridgeRelayersConfig, }; - impl BridgeParachainsConfig for Runtime { + impl BridgeParachainsConfig for Runtime { fn parachains() -> Vec { use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_wococo::BridgeHubWococo::PARACHAIN_ID)] + vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_westend::BridgeHubWestend::PARACHAIN_ID)] } fn prepare_parachain_heads_proof( @@ -1197,31 +1035,7 @@ impl_runtime_apis! { bp_polkadot_core::parachains::ParaHeadsProof, Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_rococo::BridgeHubRococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( + prepare_parachain_heads_proof::( parachains, parachain_head_size, proof_size, @@ -1309,12 +1123,10 @@ mod tests { frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, - ( - BridgeRefundBridgeHubRococoMessages::default(), - BridgeRefundBridgeHubWococoMessages::default(), - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),) ); + // for BridgeHubRococo { let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( VERSION.spec_version, @@ -1331,23 +1143,6 @@ mod tests { bhr_indirect_payload.additional_signed().unwrap().encode() ) } - - { - let bhw_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode(), bhw_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode(), - bhw_indirect_payload.additional_signed().unwrap().encode() - ) - } }); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc480c391636a92aad6e303a515524b5ce7ff2b1 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_645_000 picoseconds. + Weight::from_parts(1_717_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 12_258 + .saturating_add(Weight::from_parts(24_890_934, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index 0106d6398f45938ae89603622db23abf300ec3ca..70fc3617bce93e2fa9246a649cd3adf81c1cda23 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// bridge-hub-rococo-dev +// --output +// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 4_930_000 picoseconds. - Weight::from_parts(5_292_000, 0) + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 5_012_000 picoseconds. - Weight::from_parts(5_282_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(73_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs index 3dec4cc7f182c9aede28084122747dca63b24431..b0f7806be8ee7b3509895652a94f10a272913d09 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 54c7c03fb6085dca07eebd9ddfb052d9a5dbb969..a615f5395470064cf9366fa0e7b0fd5b258f9476 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,19 +17,22 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; +use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; + pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; -pub mod pallet_bridge_grandpa_bridge_rococo_grandpa; -pub mod pallet_bridge_grandpa_bridge_wococo_grandpa; -pub mod pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance; -pub mod pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance; -pub mod pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance; -pub mod pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance; +pub mod pallet_bridge_grandpa; +pub mod pallet_bridge_messages; +pub mod pallet_bridge_parachains; pub mod pallet_bridge_relayers; pub mod pallet_collator_selection; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; @@ -50,27 +53,14 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; -impl pallet_bridge_messages::WeightInfoExt for pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance::WeightInfo { - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime() - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt for pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance::WeightInfo { +impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE + bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime() + pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( + ) } fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { @@ -78,14 +68,8 @@ impl pallet_bridge_messages::WeightInfoExt for pallet_bridge_messages_bridge_mes } } -impl pallet_bridge_parachains::WeightInfoExt for pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance::WeightInfo { - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } -} - -impl pallet_bridge_parachains::WeightInfoExt for pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance::WeightInfo { +impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE + bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_balances.rs index 26a188a98610a7ecc822eb5e7e448ad7f1867eb8..ba8551a5ebb96948e46598bc8325e9949080388b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 5fbe2da8eaa31ae2689a389709fa468ee2d975a7..aaa6a3e06221e18d4837175f3040d464607bc003 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -17,25 +17,25 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_grandpa +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,33 +48,31 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: BridgeRococoGrandpa PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa BestFinalized (r:1 w:1) - /// Proof: BridgeRococoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa CurrentAuthoritySet (r:1 w:0) - /// Proof: BridgeRococoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(50250), added: 50745, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashesPointer (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashes (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashes (max_values: Some(1024), max_size: Some(36), added: 1521, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHeaders (r:0 w:2) - /// Proof: BridgeRococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. + /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 838]`. /// The range of component `v` is `[50, 100]`. fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `335 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 241_332_000 picoseconds. - Weight::from_parts(69_790_821, 0) + // Minimum execution time: 311_267_000 picoseconds. + Weight::from_parts(313_903_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 6_013 - .saturating_add(Weight::from_parts(47_580_554, 0).saturating_mul(p.into())) - // Standard Error: 100_298 - .saturating_add(Weight::from_parts(1_213_475, 0).saturating_mul(v.into())) + // Standard Error: 4_779 + .saturating_add(Weight::from_parts(55_265_953, 0).saturating_mul(p.into())) + // Standard Error: 36_883 + .saturating_add(Weight::from_parts(153_660, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_wococo_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_wococo_grandpa.rs deleted file mode 100644 index 5c7c824624748cf1df16cd3c8e198ae52c0fa5fc..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_wococo_grandpa.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `268 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 260_423_000 picoseconds. - Weight::from_parts(46_213_879, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 4_794 - .saturating_add(Weight::from_parts(55_195_440, 0).saturating_mul(p.into())) - // Standard Error: 79_973 - .saturating_add(Weight::from_parts(1_710_302, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs index ec40615dc133949d8c880e00c687cb106d55cdc3..17a45df5bfb3c51d8ce42d262bbff4a496b72acf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs @@ -17,25 +17,25 @@ //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_messages +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,184 +48,195 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 43_187_000 picoseconds. - Weight::from_parts(43_681_000, 0) + // Minimum execution time: 41_577_000 picoseconds. + Weight::from_parts(42_621_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 54_131_000 picoseconds. - Weight::from_parts(54_813_000, 0) + // Minimum execution time: 52_880_000 picoseconds. + Weight::from_parts(53_697_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 48_120_000 picoseconds. - Weight::from_parts(48_733_000, 0) + // Minimum execution time: 47_424_000 picoseconds. + Weight::from_parts(48_445_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 41_028_000 picoseconds. - Weight::from_parts(41_635_000, 0) + // Minimum execution time: 40_619_000 picoseconds. + Weight::from_parts(42_262_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 68_499_000 picoseconds. - Weight::from_parts(69_263_000, 0) + // Minimum execution time: 74_603_000 picoseconds. + Weight::from_parts(78_209_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_277_000 picoseconds. - Weight::from_parts(32_880_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_762_000 picoseconds. + Weight::from_parts(34_405_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_504_000 picoseconds. - Weight::from_parts(33_085_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_805_000 picoseconds. + Weight::from_parts(35_051_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `377` // Estimated: `6086` - // Minimum execution time: 34_963_000 picoseconds. - Weight::from_parts(35_473_000, 0) + // Minimum execution time: 38_612_000 picoseconds. + Weight::from_parts(39_412_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem RelevantMessagingState (r:1 w:0) - /// Proof Skipped: ParachainSystem RelevantMessagingState (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpStatus (r:1 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpStatus (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpMessages (r:0 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpMessages (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[128, 2048]`. + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `635` + // Measured: `669` // Estimated: `52645` - // Minimum execution time: 129_978_000 picoseconds. - Weight::from_parts(98_246_356, 0) + // Minimum execution time: 69_285_000 picoseconds. + Weight::from_parts(70_867_498, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 2_554 - .saturating_add(Weight::from_parts(544_728, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) + // Standard Error: 111 + .saturating_add(Weight::from_parts(7_489, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance.rs deleted file mode 100644 index c3dbc19518bcd420338dbc810c3c8ed4889517f0..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_wococo_messages_instance.rs +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `404` - // Estimated: `52645` - // Minimum execution time: 43_991_000 picoseconds. - Weight::from_parts(45_465_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `404` - // Estimated: `52645` - // Minimum execution time: 54_906_000 picoseconds. - Weight::from_parts(56_412_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `404` - // Estimated: `52645` - // Minimum execution time: 48_906_000 picoseconds. - Weight::from_parts(51_665_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `372` - // Estimated: `52645` - // Minimum execution time: 42_784_000 picoseconds. - Weight::from_parts(44_788_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `372` - // Estimated: `52645` - // Minimum execution time: 75_805_000 picoseconds. - Weight::from_parts(77_731_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 32_012_000 picoseconds. - Weight::from_parts(32_653_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 31_851_000 picoseconds. - Weight::from_parts(33_017_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `6086` - // Minimum execution time: 34_818_000 picoseconds. - Weight::from_parts(35_728_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `672` - // Estimated: `52645` - // Minimum execution time: 172_737_000 picoseconds. - Weight::from_parts(175_172_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 3_669 - .saturating_add(Weight::from_parts(1_013_545, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index c9f1d7e05d34e00fcb1f7b28939e93a01dece2de..5c7c4a63682d50dc6da06f4e878b16e567d73039 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -17,25 +17,25 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_parachains +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,64 +48,63 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_parachains`. pub struct WeightInfo(PhantomData); impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 2]`. + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 34_759_000 picoseconds. - Weight::from_parts(35_709_034, 0) + // Minimum execution time: 31_987_000 picoseconds. + Weight::from_parts(33_060_534, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 36_005_000 picoseconds. - Weight::from_parts(36_492_000, 0) + // Minimum execution time: 33_360_000 picoseconds. + Weight::from_parts(34_182_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 62_374_000 picoseconds. - Weight::from_parts(62_977_000, 0) + // Minimum execution time: 65_246_000 picoseconds. + Weight::from_parts(65_985_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance.rs deleted file mode 100644 index 432f9f9969db11ccd9f38f51473bccc700bb9133..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_wococo_instance.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 34_968_000 picoseconds. - Weight::from_parts(36_202_569, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 36_607_000 picoseconds. - Weight::from_parts(37_304_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 68_548_000 picoseconds. - Weight::from_parts(69_402_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index a934a1be58271fa610733c597d1fce9bc97f588d..70af694645da2d55336b4aa92a2ccf9450d2d92d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_relayers -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_relayers +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 54_291_000 picoseconds. - Weight::from_parts(55_145_000, 0) + // Minimum execution time: 46_579_000 picoseconds. + Weight::from_parts(48_298_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -74,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 28_143_000 picoseconds. - Weight::from_parts(28_920_000, 0) + // Minimum execution time: 24_219_000 picoseconds. + Weight::from_parts(24_993_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -88,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 30_329_000 picoseconds. - Weight::from_parts(30_646_000, 0) + // Minimum execution time: 26_279_000 picoseconds. + Weight::from_parts(26_810_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 29_704_000 picoseconds. - Weight::from_parts(30_269_000, 0) + // Minimum execution time: 27_672_000 picoseconds. + Weight::from_parts(28_946_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -116,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 2_793_000 picoseconds. - Weight::from_parts(2_999_000, 0) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_725_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs index 9cbfa6ce80e3e4b45338786c3705500bf27a220f..f7e233189abb4443320c7bee6f28ee4856302452 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -124,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..2fcd573ceb277116bda67180da8e0701593ab453 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_446_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 10_613_000 picoseconds. + Weight::from_parts(10_613_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_854_000 picoseconds. + Weight::from_parts(4_854_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_748_000 picoseconds. + Weight::from_parts(5_748_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_136_000 picoseconds. + Weight::from_parts(6_136_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 59_505_000 picoseconds. + Weight::from_parts(59_505_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 6_506_000 picoseconds. + Weight::from_parts(6_506_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(40_646_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 51_424_000 picoseconds. + Weight::from_parts(51_424_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 81_153_000 picoseconds. + Weight::from_parts(81_153_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs index 91840ae0c6d77cfb4507d5f7d7b8f2aca7c84b35..832380d3876bc3d19339779c83bba031304d17db 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_session.rs index c9d04f9c6df65892243220531ac9d42dc885702d..06b2f3bcd2718107ac30dd9c78299e963002ae28 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_timestamp.rs index 0a5bf9b9f9c1f8c483c2e2f86ae471b28e7f609a..1c54e9519b3c4b7e175f72a577ba6de5b67556dc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs index 44cd0cf91e7998cb504587c481d13b110c9db072..d96b9e88840fd39e8553275ffd5d1a01cf310a0e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 72bdb282585dec0533662bad5620f2434bcc15b1..5aa4999c624cf1b516ef0930d869dac460680396 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,22 +64,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 29_724_000 picoseconds. - Weight::from_parts(30_440_000, 0) + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(24_684_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 26_779_000 picoseconds. - Weight::from_parts(27_249_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 21_093_000 picoseconds. + Weight::from_parts(21_523_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -107,8 +109,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_170_000 picoseconds. - Weight::from_parts(9_629_000, 0) + // Minimum execution time: 6_938_000 picoseconds. + Weight::from_parts(7_243_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_769_000 picoseconds. - Weight::from_parts(2_933_000, 0) + // Minimum execution time: 2_159_000 picoseconds. + Weight::from_parts(2_290_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +129,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,14 +147,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 34_547_000 picoseconds. - Weight::from_parts(35_653_000, 0) + // Minimum execution time: 28_337_000 picoseconds. + Weight::from_parts(29_265_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -167,10 +173,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 36_274_000 picoseconds. - Weight::from_parts(37_281_000, 0) + // Minimum execution time: 30_599_000 picoseconds. + Weight::from_parts(31_272_000, 0) .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +185,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_749_000 picoseconds. - Weight::from_parts(2_917_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_280_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +196,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `187` // Estimated: `11077` - // Minimum execution time: 17_649_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 18_262_000 picoseconds. + Weight::from_parts(18_640_000, 0) .saturating_add(Weight::from_parts(0, 11077)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `191` // Estimated: `11081` - // Minimum execution time: 17_551_000 picoseconds. - Weight::from_parts(18_176_000, 0) + // Minimum execution time: 18_512_000 picoseconds. + Weight::from_parts(18_888_000, 0) .saturating_add(Weight::from_parts(0, 11081)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +220,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `13563` - // Minimum execution time: 19_261_000 picoseconds. - Weight::from_parts(19_714_000, 0) + // Minimum execution time: 19_362_000 picoseconds. + Weight::from_parts(20_056_000, 0) .saturating_add(Weight::from_parts(0, 13563)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -235,10 +243,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 31_630_000 picoseconds. - Weight::from_parts(32_340_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_075_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +255,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_558_000, 0) + // Minimum execution time: 9_930_000 picoseconds. + Weight::from_parts(10_192_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +266,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `11088` - // Minimum execution time: 18_133_000 picoseconds. - Weight::from_parts(18_663_000, 0) + // Minimum execution time: 18_305_000 picoseconds. + Weight::from_parts(18_738_000, 0) .saturating_add(Weight::from_parts(0, 11088)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -280,10 +290,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `204` // Estimated: `11094` - // Minimum execution time: 38_878_000 picoseconds. - Weight::from_parts(39_779_000, 0) + // Minimum execution time: 34_559_000 picoseconds. + Weight::from_parts(35_241_000, 0) .saturating_add(Weight::from_parts(0, 11094)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_512_000 picoseconds. + Weight::from_parts(4_671_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 26_473_000 picoseconds. + Weight::from_parts(26_960_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 40a2036fb49a9b3ec7c121c5c1dd2a8451118497..1c2334a89e252f85484c1786a7698c49d808d010 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -62,16 +61,8 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve (`IsReserve = ()`), - // but we need this hack for `pallet_xcm::reserve_transfer_assets` - // (TODO) fix https://github.com/paritytech/polkadot/pull/7424 - // (TODO) fix https://github.com/paritytech/polkadot/pull/7546 - fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight { - // TODO: if we change `IsReserve = ...` then use this line... - // TODO: or if remote weight estimation is fixed, then remove - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - hardcoded_weight.min(XcmFungibleWeight::::reserve_asset_deposited()) + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -128,10 +119,7 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { } fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset( assets: &MultiAssetFilter, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 8f9fbc912454b5c89759fad5faf440c3275cbcf1..d7e8c41ff8ac41acfeb60f30774e8282939b6c1c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,28 +17,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 24_521_000 picoseconds. - Weight::from_parts(25_005_000, 3593) + // Minimum execution time: 19_610_000 picoseconds. + Weight::from_parts(19_980_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,15 +65,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 52_274_000 picoseconds. - Weight::from_parts(53_374_000, 6196) + // Minimum execution time: 44_411_000 picoseconds. + Weight::from_parts(45_110_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: `System::Account` (r:2 w:2) + // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -88,12 +88,12 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `260` - // Estimated: `6196` - // Minimum execution time: 77_625_000 picoseconds. - Weight::from_parts(78_530_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `223` + // Estimated: `8799` + // Minimum execution time: 89_739_000 picoseconds. + Weight::from_parts(91_256_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) } // Storage: `Benchmark::Override` (r:0 w:0) // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -101,36 +101,40 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 500_000_000_000 picoseconds. - Weight::from_parts(500_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 32_804_000 picoseconds. - Weight::from_parts(33_462_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `171` + // Estimated: `6196` + // Minimum execution time: 60_045_000 picoseconds. + Weight::from_parts(60_710_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_921_000 picoseconds. - Weight::from_parts(4_050_000, 0) + // Minimum execution time: 3_257_000 picoseconds. + Weight::from_parts(3_392_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -138,15 +142,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 25_436_000 picoseconds. - Weight::from_parts(25_789_000, 3593) + // Minimum execution time: 19_423_000 picoseconds. + Weight::from_parts(19_823_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:1 w:1) + // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -159,32 +165,36 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `159` - // Estimated: `3624` - // Minimum execution time: 53_846_000 picoseconds. - Weight::from_parts(54_684_000, 3624) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `122` + // Estimated: `6196` + // Minimum execution time: 60_484_000 picoseconds. + Weight::from_parts(61_634_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 33_052_000 picoseconds. - Weight::from_parts(33_897_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 44_863_000 picoseconds. + Weight::from_parts(45_549_000, 3593) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index da3404909f31ca9418a9f2d21117178833bb1a11..0ae6d0b5623fbfefb3e55c8a5bf3f43370d21a4b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,28 +17,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,120 +50,128 @@ pub struct WeightInfo(PhantomData); impl WeightInfo { // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 37_350_000 picoseconds. - Weight::from_parts(38_105_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `171` + // Estimated: `6196` + // Minimum execution time: 63_453_000 picoseconds. + Weight::from_parts(64_220_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_042_000 picoseconds. - Weight::from_parts(3_117_000, 0) + // Minimum execution time: 2_238_000 picoseconds. + Weight::from_parts(2_351_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `3534` - // Minimum execution time: 11_037_000 picoseconds. - Weight::from_parts(11_465_000, 3534) + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_953_000 picoseconds. + Weight::from_parts(8_162_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_359_000 picoseconds. - Weight::from_parts(12_741_000, 0) + // Minimum execution time: 9_080_000 picoseconds. + Weight::from_parts(9_333_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_165_000 picoseconds. - Weight::from_parts(3_295_000, 0) + // Minimum execution time: 2_415_000 picoseconds. + Weight::from_parts(2_519_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_847_000 picoseconds. - Weight::from_parts(2_893_000, 0) + // Minimum execution time: 2_045_000 picoseconds. + Weight::from_parts(2_184_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_847_000 picoseconds. - Weight::from_parts(2_936_000, 0) + // Minimum execution time: 2_065_000 picoseconds. + Weight::from_parts(2_125_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_856_000 picoseconds. - Weight::from_parts(2_933_000, 0) + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_164_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_635_000 picoseconds. - Weight::from_parts(3_710_000, 0) + // Minimum execution time: 2_868_000 picoseconds. + Weight::from_parts(2_933_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_822_000 picoseconds. - Weight::from_parts(2_899_000, 0) + // Minimum execution time: 2_058_000 picoseconds. + Weight::from_parts(2_164_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 29_399_000 picoseconds. - Weight::from_parts(30_284_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `171` + // Estimated: `6196` + // Minimum execution time: 55_971_000 picoseconds. + Weight::from_parts(56_869_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `3591` - // Minimum execution time: 16_173_000 picoseconds. - Weight::from_parts(16_576_000, 3591) + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 11_382_000 picoseconds. + Weight::from_parts(11_672_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -173,11 +179,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_882_000 picoseconds. - Weight::from_parts(3_017_000, 0) + // Minimum execution time: 2_071_000 picoseconds. + Weight::from_parts(2_193_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -190,11 +198,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 29_839_000 picoseconds. - Weight::from_parts(30_519_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 22_573_000 picoseconds. + Weight::from_parts(23_423_000, 3503) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) @@ -203,147 +211,157 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_806_000 picoseconds. - Weight::from_parts(5_042_000, 0) + // Minimum execution time: 3_870_000 picoseconds. + Weight::from_parts(3_993_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_407_000 picoseconds. - Weight::from_parts(4_548_000, 0) + // Minimum execution time: 3_483_000 picoseconds. + Weight::from_parts(3_598_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_930_000 picoseconds. - Weight::from_parts(3_042_000, 0) + // Minimum execution time: 2_241_000 picoseconds. + Weight::from_parts(2_297_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_915_000 picoseconds. - Weight::from_parts(3_052_000, 0) + // Minimum execution time: 2_230_000 picoseconds. + Weight::from_parts(2_318_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_823_000 picoseconds. - Weight::from_parts(2_912_000, 0) + // Minimum execution time: 2_051_000 picoseconds. + Weight::from_parts(2_153_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_119_000 picoseconds. - Weight::from_parts(3_205_000, 0) + // Minimum execution time: 2_306_000 picoseconds. + Weight::from_parts(2_380_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 33_394_000 picoseconds. - Weight::from_parts(34_497_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `171` + // Estimated: `6196` + // Minimum execution time: 60_201_000 picoseconds. + Weight::from_parts(61_132_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_471_000 picoseconds. - Weight::from_parts(5_640_000, 0) + // Minimum execution time: 4_554_000 picoseconds. + Weight::from_parts(4_704_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 29_932_000 picoseconds. - Weight::from_parts(30_478_000, 3572) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `171` + // Estimated: `6196` + // Minimum execution time: 56_071_000 picoseconds. + Weight::from_parts(56_889_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_807_000 picoseconds. - Weight::from_parts(2_941_000, 0) + // Minimum execution time: 2_093_000 picoseconds. + Weight::from_parts(2_169_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_785_000 picoseconds. - Weight::from_parts(2_894_000, 0) + // Minimum execution time: 2_027_000 picoseconds. + Weight::from_parts(2_172_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_844_000 picoseconds. - Weight::from_parts(2_943_000, 0) + // Minimum execution time: 2_035_000 picoseconds. + Weight::from_parts(2_164_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `BridgeWococoMessages::PalletOperatingMode` (r:1 w:0) - // Proof: `BridgeWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - // Storage: `BridgeWococoMessages::OutboundLanes` (r:1 w:1) - // Proof: `BridgeWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - // Storage: `BridgeWococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `95` + // Measured: `96` // Estimated: `1529` - // Minimum execution time: 28_427_000 picoseconds. - Weight::from_parts(28_755_860, 1529) - // Standard Error: 383 - .saturating_add(Weight::from_parts(393_744, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Minimum execution time: 25_636_000 picoseconds. + Weight::from_parts(25_405_640, 1529) + // Standard Error: 321 + .saturating_add(Weight::from_parts(365_002, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_781_000 picoseconds. - Weight::from_parts(2_907_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_136_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_001_000 picoseconds. - Weight::from_parts(3_117_000, 0) + // Minimum execution time: 2_147_000 picoseconds. + Weight::from_parts(2_276_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index e3d8645d49e7e6e6b0d6a7dd82a244c5cca90ee1..de7b5315c883d68e486f1de4d567a67dbcd05756 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -15,62 +15,59 @@ // along with Cumulus. If not, see . use super::{ - AccountId, AllPalletsWithSystem, Balances, BridgeGrandpaRococoInstance, - BridgeGrandpaWococoInstance, DeliveryRewardInBalance, ParachainInfo, ParachainSystem, - PolkadotXcm, RequiredStakeForStakeAndSlash, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - WeightToFee, XcmpQueue, + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::{ - bridge_hub_rococo_config::ToBridgeHubWococoHaulBlobExporter, - bridge_hub_wococo_config::ToBridgeHubRococoHaulBlobExporter, +use crate::bridge_common_config::{ + BridgeGrandpaWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, }; +use bp_messages::LaneId; +use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::ChainId; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteNativeAssetFrom}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + TREASURY_PALLET_ID, +}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use rococo_runtime_constants::system_parachain; use sp_core::Get; +use sp_runtime::traits::AccountIdConversion; +use sp_std::marker::PhantomData; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, - CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, HandleFee, + IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{ - traits::{ExportXcm, WithOriginFilter}, + traits::{FeeReason, TransactAsset, WithOriginFilter}, XcmExecutor, }; parameter_types! { - pub const RelayLocation: MultiLocation = MultiLocation::parent(); + pub const TokenLocation: MultiLocation = MultiLocation::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub RelayNetwork: NetworkId = NetworkId::Rococo; pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; -} - -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match u32::from(ParachainInfo::parachain_id()) { - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID => NetworkId::Rococo, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID => NetworkId::Wococo, - para_id => unreachable!("Not supported for para_id: {}", para_id), - } - } + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used @@ -90,7 +87,7 @@ pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, + IsConcrete, // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): @@ -154,7 +151,7 @@ impl Contains for SafeCallFilter { // Allow to change dedicated storage items (called by governance-like) match call { RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().any(|(k, _)| { + if items.iter().all(|(k, _)| { k.eq(&DeliveryRewardInBalance::key()) | k.eq(&RequiredStakeForStakeAndSlash::key()) }) => @@ -164,13 +161,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( @@ -183,14 +182,10 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoInstance, - >::initialize { .. }) | - RuntimeCall::BridgeWococoGrandpa(pallet_bridge_grandpa::Call::< + RuntimeCall::MessageQueue(..) | + RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< Runtime, - BridgeGrandpaWococoInstance, + BridgeGrandpaWestendInstance, >::initialize { .. }) ) } @@ -206,24 +201,49 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Parent and its pluralities (i.e. governance bodies) get free execution. - AllowExplicitUnpaidExecutionFrom, + // Parent, its pluralities (i.e. governance bodies) and relay treasury pallet + // get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, >, - // TODO:check-parameter - (https://github.com/paritytech/parity-bridges-common/issues/2084) - // remove this and extend `AllowExplicitUnpaidExecutionFrom` with "or SystemParachains" once merged https://github.com/paritytech/polkadot/pull/7005 - AllowUnpaidExecutionFrom, ), >, >; +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::CONTRACTS_ID | + system_parachain::ENCOINTER_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - NativeToken with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -233,8 +253,7 @@ impl xcm_executor::Config for XcmConfig { // BridgeHub does not recognize a reserve location for any asset. Users must teleport Native // token where allowed (e.g. with the Relay Chain). type IsReserve = (); - /// Only allow teleportation of NativeToken of relay chain. - type IsTeleporter = ConcreteNativeAssetFrom; + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< @@ -243,7 +262,7 @@ impl xcm_executor::Config for XcmConfig { MaxInstructions, >; type Trader = - UsingComponents>; + UsingComponents>; type ResponseHandler = PolkadotXcm; type AssetTrap = PolkadotXcm; type AssetLocker = (); @@ -252,14 +271,29 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = PolkadotXcm; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = (); - type MessageExporter = BridgeHubRococoOrBridgeHubWococoSwitchExporter; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + ( + XcmExportFeeToRelayerRewardAccounts< + Self::AssetTransactor, + crate::bridge_to_westend_config::WestendGlobalConsensusNetwork, + crate::bridge_to_westend_config::AssetHubWestendParaId, + crate::bridge_to_westend_config::BridgeHubWestendChainId, + crate::bridge_to_westend_config::AssetHubRococoToAssetHubWestendMessagesLane, + >, + XcmFeeToAccount, + ), + >; + type MessageExporter = (crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter,); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; } +pub type PriceForParentDelivery = + ExponentialPrice; + /// Converts a local signed origin into an XCM multilocation. /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; @@ -268,16 +302,11 @@ pub type LocalOriginToLocation = SignedToAccountId32, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -305,8 +334,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -317,46 +344,95 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } -/// Hacky switch implementation, because we have just one runtime for Rococo and Wococo BridgeHub, -/// so it means we have just one XcmConfig -pub struct BridgeHubRococoOrBridgeHubWococoSwitchExporter; -impl ExportXcm for BridgeHubRococoOrBridgeHubWococoSwitchExporter { - type Ticket = (NetworkId, (sp_std::prelude::Vec, XcmHash)); +/// A `HandleFee` implementation that simply deposits the fees for `ExportMessage` XCM instructions +/// into the accounts that are used for paying the relayer rewards. +/// Burns the fees in case of a failure. +pub struct XcmExportFeeToRelayerRewardAccounts< + AssetTransactor, + DestNetwork, + DestParaId, + DestBridgedChainId, + BridgeLaneId, +>(PhantomData<(AssetTransactor, DestNetwork, DestParaId, DestBridgedChainId, BridgeLaneId)>); - fn validate( - network: NetworkId, - channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> SendResult { - match network { - Rococo => ToBridgeHubRococoHaulBlobExporter::validate( - network, - channel, - universal_source, - destination, - message, - ) - .map(|result| ((Rococo, result.0), result.1)), - Wococo => ToBridgeHubWococoHaulBlobExporter::validate( - network, - channel, - universal_source, - destination, - message, - ) - .map(|result| ((Wococo, result.0), result.1)), - _ => unimplemented!("Unsupported network: {:?}", network), - } - } +impl< + AssetTransactor: TransactAsset, + DestNetwork: Get, + DestParaId: Get, + DestBridgedChainId: Get, + BridgeLaneId: Get, + > HandleFee + for XcmExportFeeToRelayerRewardAccounts< + AssetTransactor, + DestNetwork, + DestParaId, + DestBridgedChainId, + BridgeLaneId, + > +{ + fn handle_fee( + fee: MultiAssets, + maybe_context: Option<&XcmContext>, + reason: FeeReason, + ) -> MultiAssets { + if matches!(reason, FeeReason::Export { network: bridged_network, destination } + if bridged_network == DestNetwork::get() && + destination == X1(Parachain(DestParaId::get().into()))) + { + // We have 2 relayer rewards accounts: + // - the SA of the source parachain on this BH: this pays the relayers for delivering + // Source para -> Target Para message delivery confirmations + // - the SA of the destination parachain on this BH: this pays the relayers for + // delivering Target para -> Source Para messages + // We split the `ExportMessage` fee between these 2 accounts. + let source_para_account = PayRewardFromAccount::< + pallet_balances::Pallet, + AccountId, + >::rewards_account(RewardsAccountParams::new( + BridgeLaneId::get(), + DestBridgedChainId::get(), + RewardsAccountOwner::ThisChain, + )); + + let dest_para_account = PayRewardFromAccount::< + pallet_balances::Pallet, + AccountId, + >::rewards_account(RewardsAccountParams::new( + BridgeLaneId::get(), + DestBridgedChainId::get(), + RewardsAccountOwner::BridgedChain, + )); + + for asset in fee.into_inner() { + match asset.fun { + Fungible(total_fee) => { + let source_fee = total_fee / 2; + deposit_or_burn_fee::( + MultiAsset { id: asset.id, fun: Fungible(source_fee) }.into(), + maybe_context, + source_para_account.clone(), + ); - fn deliver(ticket: Self::Ticket) -> Result { - let (network, ticket) = ticket; - match network { - Rococo => ToBridgeHubRococoHaulBlobExporter::deliver(ticket), - Wococo => ToBridgeHubWococoHaulBlobExporter::deliver(ticket), - _ => unimplemented!("Unsupported network: {:?}", network), + let dest_fee = total_fee - source_fee; + deposit_or_burn_fee::( + MultiAsset { id: asset.id, fun: Fungible(dest_fee) }.into(), + maybe_context, + dest_para_account.clone(), + ); + }, + NonFungible(_) => { + deposit_or_burn_fee::( + asset.into(), + maybe_context, + source_para_account.clone(), + ); + }, + } + } + + return MultiAssets::new() } + + fee } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index e5fe67f2a8e5b29a46adcf2a133ce9e956df9d5b..9597d71f6b279ec209069939c3b0d339d452faa9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,14 +18,14 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_hub_rococo_config, bridge_hub_wococo_config, - xcm_config::{RelayNetwork, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, DeliveryRewardInBalance, - Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, RequiredStakeForStakeAndSlash, - Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, UncheckedExtrinsic, + bridge_common_config, bridge_to_westend_config, + xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, + AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, + TransactionPayment, UncheckedExtrinsic, }; use codec::{Decode, Encode}; -use frame_support::parameter_types; +use frame_support::{dispatch::GetDispatchInfo, parameter_types}; use frame_system::pallet_prelude::HeaderFor; use parachains_common::{rococo::fee::WeightToFee, AccountId, AuraId, Balance}; use sp_keyring::AccountKeyring::Alice; @@ -35,7 +35,7 @@ use sp_runtime::{ }; use xcm::latest::prelude::*; -// Para id of sibling chain (Rockmine/Wockmint) used in tests. +// Para id of sibling chain used in tests. pub const SIBLING_PARACHAIN_ID: u32 = 1000; parameter_types! { @@ -55,11 +55,8 @@ fn construct_extrinsic( frame_system::CheckNonce::::from(0), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - BridgeRejectObsoleteHeadersAndMessages {}, - ( - bridge_hub_wococo_config::BridgeRefundBridgeHubRococoMessages::default(), - bridge_hub_rococo_config::BridgeRefundBridgeHubWococoMessages::default(), - ), + BridgeRejectObsoleteHeadersAndMessages::default(), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); @@ -81,6 +78,13 @@ fn construct_and_apply_extrinsic( r.unwrap() } +fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { + let batch_call = RuntimeCall::Utility(batch); + let batch_info = batch_call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, batch_call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +} + fn executive_init_block(header: &HeaderFor) { Executive::initialize_block(header) } @@ -95,12 +99,13 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID ); #[test] fn initialize_bridge_by_governance_works() { + // for Westend finality bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, - BridgeGrandpaWococoInstance, + BridgeGrandpaWestendInstance, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeWococoGrandpa(call).encode()), + Box::new(|call| RuntimeCall::BridgeWestendGrandpa(call).encode()), ) } @@ -176,35 +176,41 @@ mod bridge_hub_rococo_tests { #[test] fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { + // for Westend bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< Runtime, XcmConfig, - WithBridgeHubWococoMessagesInstance, + WithBridgeHubWestendMessagesInstance, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, Box::new(|runtime_event_encoded: Vec| { match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeWococoMessages(event)) => Some(event), + Ok(RuntimeEvent::BridgeWestendMessages(event)) => Some(event), _ => None, } }), - || ExportMessage { network: Wococo, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, - bridge_hub_rococo_config::DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO + || ExportMessage { network: Westend, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + Some((TokenLocation::get(), ExistentialDeposit::get()).into()), + // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` + Some((TokenLocation::get(), bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get()).into()), + || (), ) } #[test] fn message_dispatch_routing_works() { + // from Westend bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - WithBridgeHubWococoMessagesInstance, + WithBridgeHubWestendMessagesInstance, RelayNetwork, - bridge_hub_rococo_config::WococoGlobalConsensusNetwork, + WestendGlobalConsensusNetwork, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, @@ -221,232 +227,122 @@ mod bridge_hub_rococo_tests { _ => None, } }), - bridge_hub_rococo_config::DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO, + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + || (), ) } #[test] fn relayed_incoming_message_works() { + // from Westend bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, + BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, SIBLING_PARACHAIN_ID, Rococo, - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO, + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + || (), ) } #[test] pub fn complex_relay_extrinsic_works() { + // for Westend bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, + BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - bridge_hub_rococo_config::BridgeHubWococoChainId::get(), + BridgeHubWestendChainId::get(), Rococo, - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_WOCOCO, + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, ExistentialDeposit::get(), executive_init_block, construct_and_apply_extrinsic, + || (), ); } -} - -mod bridge_hub_wococo_tests { - use super::*; - use bridge_hub_rococo_runtime::{ - BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - }; - use bridge_hub_wococo_config::{ - WithBridgeHubRococoMessageBridge, DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO, - }; - - bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID - ); - - #[test] - fn initialize_bridge_by_governance_works() { - bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< - Runtime, - BridgeGrandpaRococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), - ) - } #[test] - fn change_delivery_reward_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_weight_for_paid_export_message_with_reserve_transfer::< Runtime, - DeliveryRewardInBalance, - u64, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), - |old_value| old_value.checked_mul(2).unwrap(), - ) - } + XcmConfig, + WeightToFee, + >(); - #[test] - fn change_required_stake_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - RequiredStakeForStakeAndSlash, - Balance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - RequiredStakeForStakeAndSlash::key().to_vec(), - RequiredStakeForStakeAndSlash::get(), - ) - }, - |old_value| old_value.checked_mul(2).unwrap(), - ) + // check if estimated value is sane + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs` value", + estimated, + max_expected + ); } #[test] - fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< + pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< Runtime, - XcmConfig, - WithBridgeHubRococoMessagesInstance, + BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, >( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeRococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, - bridge_hub_wococo_config::DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO - ) - } + construct_and_estimate_extrinsic_fee + ); - #[test] - fn message_dispatch_routing_works() { - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubRococoMessagesInstance, - RelayNetwork, - bridge_hub_wococo_config::RococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - bridge_hub_wococo_config::DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO, - ) + // check if estimated value is sane + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs` value", + estimated, + max_expected + ); } #[test] - fn relayed_incoming_message_works() { - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< + pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, + BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, >( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Wococo, - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO, - ) - } + construct_and_estimate_extrinsic_fee + ); - #[test] - pub fn complex_relay_extrinsic_works() { - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - bridge_hub_wococo_config::BridgeHubRococoChainId::get(), - Wococo, - DEFAULT_XCM_LANE_TO_BRIDGE_HUB_ROCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, + // check if estimated value is sane + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs` value", + estimated, + max_expected ); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7e384126ab670148f8435bac01e9743fc2a72016 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -0,0 +1,245 @@ +[package] +name = "bridge-hub-westend-runtime" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Westend's BridgeHub parachain runtime" +license = "Apache-2.0" + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +hex-literal = { version = "0.4.1" } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.188", optional = true, features = ["derive"] } +smallvec = "1.11.0" + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } + +# Bridges +bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } +bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } +bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } +bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } +bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } +bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } +bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } +pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } +pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } +bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } + +[dev-dependencies] +static_assertions = "1.1" +bridge-hub-test-utils = { path = "../test-utils" } +bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } + +[features] +default = [ "std" ] +std = [ + "bp-asset-hub-westend/std", + "bp-bridge-hub-rococo/std", + "bp-bridge-hub-westend/std", + "bp-header-chain/std", + "bp-messages/std", + "bp-parachains/std", + "bp-polkadot-core/std", + "bp-relayers/std", + "bp-rococo/std", + "bp-runtime/std", + "bp-westend/std", + "bridge-runtime-common/std", + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", + "pallet-bridge-parachains/std", + "pallet-bridge-relayers/std", + "pallet-collator-selection/std", + "pallet-message-queue/std", + "pallet-multisig/std", + "pallet-session/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-utility/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-core-primitives/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "scale-info/std", + "serde", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-io/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "substrate-wasm-builder", + "westend-runtime-constants/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] + +runtime-benchmarks = [ + "bridge-runtime-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-bridge-grandpa/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", + "pallet-bridge-parachains/runtime-benchmarks", + "pallet-bridge-relayers/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] + +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-bridge-grandpa/try-runtime", + "pallet-bridge-messages/try-runtime", + "pallet-bridge-parachains/try-runtime", + "pallet-bridge-relayers/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/build.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..60f8a125129ff1344a1799246e931acdb1d139d5 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/build.rs @@ -0,0 +1,26 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "std")] +fn main() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bae106395a62a9d1cb6861ed3099b5f3bf963cc --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Bridge definitions that can be used by multiple BridgeHub flavors. +//! All configurations here should be dedicated to a single chain; in other words, we don't need two +//! chains for a single pallet configuration. +//! +//! For example, the messaging pallet needs to know the sending and receiving chains, but the +//! GRANDPA tracking pallet only needs to be aware of one chain. + +use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; +use frame_support::parameter_types; + +parameter_types! { + pub storage RequiredStakeForStakeAndSlash: Balance = 1_000_000; + pub const RelayerStakeLease: u32 = 8; + pub const RelayerStakeReserveId: [u8; 8] = *b"brdgrlrs"; + + pub storage DeliveryRewardInBalance: u64 = 1_000_000; +} + +/// Allows collect and claim rewards for relayers +impl pallet_bridge_relayers::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Reward = Balance; + type PaymentProcedure = + bp_relayers::PayRewardFromAccount, AccountId>; + type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< + AccountId, + BlockNumber, + Balances, + RelayerStakeReserveId, + RequiredStakeForStakeAndSlash, + RelayerStakeLease, + >; + type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..70ff43c09e3f22ba746966136aa457ebd9c82b4f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -0,0 +1,347 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Bridge definitions used on BridgeHub with the Westend flavor. + +use crate::{ + bridge_common_config::DeliveryRewardInBalance, weights, AccountId, BridgeRococoMessages, + ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, XcmRouter, +}; +use bp_messages::LaneId; +use bp_parachains::SingleParaStoredHeaderDataBuilder; +use bridge_runtime_common::{ + messages, + messages::{ + source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, + target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, + MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, + }, + messages_xcm_extension::{ + SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, + XcmBlobMessageDispatch, + }, + refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, RefundableParachain, + }, +}; +use codec::Encode; +use frame_support::{ + parameter_types, + traits::{ConstU32, PalletInfoAccess}, +}; +use sp_runtime::RuntimeDebug; +use xcm::{ + latest::prelude::*, + prelude::{InteriorMultiLocation, NetworkId}, +}; +use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; + +parameter_types! { + pub const RelayChainHeadersToKeep: u32 = 1024; + pub const ParachainHeadsToKeep: u32 = 64; + + pub const RococoBridgeParachainPalletName: &'static str = "Paras"; + pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; + + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = + bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = + bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; + pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; + pub BridgeHubWestendUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Westend), Parachain(ParachainInfo::parachain_id().into())); + pub BridgeWestendToRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); + pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; + pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO]; + pub const AssetHubWestendToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO; + // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value + pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; + + pub AssetHubWestendParaId: cumulus_primitives_core::ParaId = bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID.into(); + + pub FromAssetHubWestendToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( + ParentThen(X1(Parachain(AssetHubWestendParaId::get().into()))).into(), + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + ); + + pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); + + pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); +} +pub const XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 2]); + +fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: + bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(), + call: bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested, + } + ) + .encode() + .into(), + } + ] +} + +/// Proof of messages, coming from Rococo. +pub type FromRococoBridgeHubMessagesProof = + FromBridgedChainMessagesProof; +/// Messages delivery proof for Rococo Bridge Hub -> Westend Bridge Hub messages. +pub type ToRococoBridgeHubMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof; + +/// Dispatches received XCM messages from other bridge +type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher< + XcmRouter, + BridgeHubWestendUniversalLocation, + BridgeWestendToRococoMessagesPalletInstance, +>; + +/// Export XCM messages to be relayed to the other side +pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< + XcmBlobHaulerAdapter, + RococoGlobalConsensusNetwork, + (), +>; +pub struct ToBridgeHubRococoXcmBlobHauler; +impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { + type Runtime = Runtime; + type MessagesInstance = WithBridgeHubRococoMessagesInstance; + type SenderAndLane = FromAssetHubWestendToAssetHubRococoRoute; + + type ToSourceChainSender = XcmRouter; + type CongestedMessage = CongestedMessage; + type UncongestedMessage = UncongestedMessage; +} + +/// On messages delivered callback. +type OnMessagesDelivered = XcmBlobHaulerAdapter; + +/// Messaging Bridge configuration for BridgeHubWestend -> BridgeHubRococo +pub struct WithBridgeHubRococoMessageBridge; +impl MessageBridge for WithBridgeHubRococoMessageBridge { + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = + bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME; + type ThisChain = BridgeHubWestend; + type BridgedChain = BridgeHubRococo; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainRococoInstance, + bp_bridge_hub_rococo::BridgeHubRococo, + >; +} + +/// Message verifier for BridgeHubRococo messages sent from BridgeHubWestend +type ToBridgeHubRococoMessageVerifier = + messages::source::FromThisChainMessageVerifier; + +/// Maximal outbound payload size of BridgeHubWestend -> BridgeHubRococo messages. +type ToBridgeHubRococoMaximalOutboundPayloadSize = + messages::source::FromThisChainMaximalOutboundPayloadSize; + +/// BridgeHubRococo chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct BridgeHubRococo; + +impl UnderlyingChainProvider for BridgeHubRococo { + type Chain = bp_bridge_hub_rococo::BridgeHubRococo; +} + +impl messages::BridgedChainWithMessages for BridgeHubRococo {} + +/// BridgeHubWestend chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct BridgeHubWestend; + +impl UnderlyingChainProvider for BridgeHubWestend { + type Chain = bp_bridge_hub_westend::BridgeHubWestend; +} + +impl ThisChainWithMessages for BridgeHubWestend { + type RuntimeOrigin = RuntimeOrigin; +} + +/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. +pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< + RefundBridgedParachainMessages< + Runtime, + RefundableParachain, + RefundableMessagesLane< + WithBridgeHubRococoMessagesInstance, + AssetHubWestendToAssetHubRococoMessagesLane, + >, + ActualFeeRefund, + PriorityBoostPerMessage, + StrOnBridgeHubWestendRefundBridgeHubRococoMessages, + >, +>; +bp_runtime::generate_static_str_provider!(OnBridgeHubWestendRefundBridgeHubRococoMessages); + +/// Add GRANDPA bridge pallet to track Rococo relay chain. +pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance1; +impl pallet_bridge_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type BridgedChain = bp_rococo::Rococo; + type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type HeadersToKeep = RelayChainHeadersToKeep; + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; +} + +/// Add parachain bridge pallet to track Rococo BridgeHub parachain +pub type BridgeParachainRococoInstance = pallet_bridge_parachains::Instance1; +impl pallet_bridge_parachains::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_bridge_parachains::WeightInfo; + type BridgesGrandpaPalletInstance = BridgeGrandpaRococoInstance; + type ParasPalletName = RococoBridgeParachainPalletName; + type ParaStoredHeaderDataBuilder = + SingleParaStoredHeaderDataBuilder; + type HeadsToKeep = ParachainHeadsToKeep; + type MaxParaHeadDataSize = MaxRococoParaHeadDataSize; +} + +/// Add XCM messages support for BridgeHubWestend to support Westend->Rococo XCM messages +pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance1; +impl pallet_bridge_messages::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_bridge_messages::WeightInfo; + type BridgedChainId = BridgeHubRococoChainId; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; + type OutboundPayload = XcmAsPlainPayload; + + type InboundPayload = XcmAsPlainPayload; + type InboundRelayer = AccountId; + type DeliveryPayments = (); + + type TargetHeaderChain = TargetHeaderChainAdapter; + type LaneMessageVerifier = ToBridgeHubRococoMessageVerifier; + type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< + Runtime, + WithBridgeHubRococoMessagesInstance, + DeliveryRewardInBalance, + >; + + type SourceHeaderChain = SourceHeaderChainAdapter; + type MessageDispatch = XcmBlobMessageDispatch< + FromRococoMessageBlobDispatcher, + Self::WeightInfo, + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< + AssetHubWestendParaId, + Runtime, + >, + >; + type OnMessagesDelivered = OnMessagesDelivered; +} + +#[cfg(test)] +mod tests { + use super::*; + use bridge_runtime_common::{ + assert_complete_bridge_types, + integrity::{ + assert_complete_bridge_constants, check_message_lane_weights, + AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, + AssertCompleteBridgeConstants, + }, + }; + use parachains_common::{westend, Balance}; + + /// Every additional message in the message delivery transaction boosts its priority. + /// So the priority of transaction with `N+1` messages is larger than priority of + /// transaction with `N` messages by the `PriorityBoostPerMessage`. + /// + /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. + /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. + /// + /// We want this tip to be large enough (delivery transactions with more messages = less + /// operational costs and a faster bridge), so this value should be significant. + const FEE_BOOST_PER_MESSAGE: Balance = 2 * westend::currency::UNITS; + + #[test] + fn ensure_bridge_hub_westend_message_lane_weights_are_correct() { + check_message_lane_weights::< + bp_bridge_hub_westend::BridgeHubWestend, + Runtime, + WithBridgeHubRococoMessagesInstance, + >( + bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE, + bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + true, + ); + } + + #[test] + fn ensure_bridge_integrity() { + assert_complete_bridge_types!( + runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, + with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, + bridge: WithBridgeHubRococoMessageBridge, + this_chain: bp_westend::Westend, + bridged_chain: bp_rococo::Rococo, + ); + + assert_complete_bridge_constants::< + Runtime, + BridgeGrandpaRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, + >(AssertCompleteBridgeConstants { + this_chain_constants: AssertChainConstants { + block_length: bp_bridge_hub_westend::BlockLength::get(), + block_weights: bp_bridge_hub_westend::BlockWeights::get(), + }, + messages_pallet_constants: AssertBridgeMessagesPalletConstants { + max_unrewarded_relayers_in_bridged_confirmation_tx: + bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + max_unconfirmed_messages_in_bridged_confirmation_tx: + bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + bridged_chain_id: bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID, + }, + pallet_names: AssertBridgePalletNames { + with_this_chain_messages_pallet_name: + bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, + with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, + }, + }); + + bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + Runtime, + WithBridgeHubRococoMessagesInstance, + PriorityBoostPerMessage, + >(FEE_BOOST_PER_MESSAGE); + + assert_eq!( + BridgeWestendToRococoMessagesPalletInstance::get(), + X1(PalletInstance( + bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX + )) + ); + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c97728058f762bec46ef5a373b0d628113abefc --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -0,0 +1,1137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! # Bridge Hub Westend Runtime +//! +//! This runtime currently supports bridging between: +//! - Rococo <> Westend + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod bridge_common_config; +pub mod bridge_to_rococo_config; +mod weights; +pub mod xcm_config; + +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +pub use sp_runtime::{MultiAddress, Perbill, Permill}; +use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; + +use bp_runtime::HeaderId; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::prelude::*; + +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; + +use parachains_common::{ + impls::DealWithFees, + westend::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; + +/// The address format for describing accounts. +pub type Address = MultiAddress; + +/// Block type as expected by this runtime. +pub type Block = generic::Block; + +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; + +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; + +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + BridgeRejectObsoleteHeadersAndMessages, + (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), +); + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Migrations to apply on runtime upgrade. +pub type Migrations = ( + pallet_collator_selection::migration::v1::MigrateToV1, + pallet_multisig::migrations::v1::MigrateToV1, + InitStorageVersions, +); + +/// Migration to initialize storage versions for pallets added after genesis. +/// +/// Ideally this would be done automatically (see +/// ), but it probably won't be ready for some +/// time and it's beneficial to get try-runtime-cli on-runtime-upgrade checks into the CI, so we're +/// doing it manually. +pub struct InitStorageVersions; + +impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::{GetStorageVersion, StorageVersion}; + use sp_runtime::traits::Saturating; + + let mut writes = 0; + + if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { + PolkadotXcm::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Balances::on_chain_storage_version() == StorageVersion::new(0) { + Balances::current_storage_version().put::(); + writes.saturating_inc(); + } + + ::DbWeight::get().reads_writes(2, writes) + } +} + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("bridge-hub-westend"), + impl_name: create_runtime_str!("bridge-hub-westend"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 3, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u16 = 42; +} + +// Configure FRAME pallets to include in runtime. + +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type RuntimeCall = RuntimeCall; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = AccountIdLookup; + /// The index type for storing how many extrinsics an account has signed. + type Nonce = Nonce; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The block type. + type Block = Block; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + /// The ubiquitous origin type. + type RuntimeOrigin = RuntimeOrigin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// Converts a module to an index of this module in the runtime. + type PalletInfo = PalletInfo; + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The weight of database operations that the runtime can invoke. + type DbWeight = RocksDbWeight; + /// The basic call filter to use in dispatchable. + type BaseCallFilter = Everything; + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = RuntimeBlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = RuntimeBlockLength; + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The action to take on a Runtime Upgrade + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + type DustRemoval = (); + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxLocks = ConstU32<50>; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<0>; + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::CurrencyAdapter>; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::WestendLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type DmpSink = frame_support::traits::EnqueueWithOrigin; +} + +pub const PERIOD: u32 = 6 * HOURS; +pub const OFFSET: u32 = 0; + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; + type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; +} + +pub type CollatorSelectionUpdateOrigin = EnsureRoot; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = ConstU32; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = ConstU32<100>; + type WeightInfo = weights::pallet_multisig::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + + // Monetary stuff. + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, + + // Collator support. The order of these 4 are important and shall not change. + Authorship: pallet_authorship::{Pallet, Storage} = 20, + CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, + Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, + Aura: pallet_aura::{Pallet, Storage, Config} = 23, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + + // Handy utilities. + Utility: pallet_utility::{Pallet, Call, Event} = 40, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 36, + + // Bridging stuff. + BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 41, + BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 42, + BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 43, + BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 44, + + // Message Queue. Importantly, is registered last so that messages are processed after + // the `on_initialize` hooks of bridging pallets. + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 250, + } +); + +bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { + RuntimeCall, AccountId, + // Grandpa + BridgeRococoGrandpa, + // Parachains + BridgeRococoParachains, + // Messages + BridgeRococoMessages +} + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] + [pallet_multisig, Multisig] + [pallet_session, SessionBench::] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_xcmp_queue, XcmpQueue] + // XCM + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + // NOTE: Make sure you point to the individual modules below. + [pallet_xcm_benchmarks::fungible, XcmBalances] + [pallet_xcm_benchmarks::generic, XcmGeneric] + // Bridge pallets + [pallet_bridge_relayers, BridgeRelayersBench::] + [pallet_bridge_grandpa, RococoFinality] + [pallet_bridge_parachains, WithinRococo] + [pallet_bridge_messages, WestendToRococo] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl bp_rococo::RococoFinalityApi for Runtime { + fn best_finalized() -> Option> { + BridgeRococoGrandpa::best_finalized() + } + fn synced_headers_grandpa_info( + ) -> Vec> { + BridgeRococoGrandpa::synced_headers_grandpa_info() + } + } + + impl bp_bridge_hub_rococo::BridgeHubRococoFinalityApi for Runtime { + fn best_finalized() -> Option> { + BridgeRococoParachains::best_parachain_head_id::< + bp_bridge_hub_rococo::BridgeHubRococo + >().unwrap_or(None) + } + } + + impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { + fn message_details( + lane: bp_messages::LaneId, + messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, + ) -> Vec { + bridge_runtime_common::messages_api::inbound_message_details::< + Runtime, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, + >(lane, messages) + } + } + + impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { + fn message_details( + lane: bp_messages::LaneId, + begin: bp_messages::MessageNonce, + end: bp_messages::MessageNonce, + ) -> Vec { + bridge_runtime_common::messages_api::outbound_message_details::< + Runtime, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, + >(lane, begin, end) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + + // This is defined once again in dispatch_benchmark, because list_benchmarks! + // and add_benchmarks! are macros exported by define_benchmarks! macros and those types + // are referenced in that call. + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + use pallet_bridge_relayers::benchmarking::Pallet as BridgeRelayersBench; + // Change weight file names. + type RococoFinality = BridgeRococoGrandpa; + type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; + type WestendToRococo = pallet_bridge_messages::benchmarking::Pallet ::; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + + use xcm::latest::prelude::*; + use xcm_config::WestendLocation; + + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + WestendLocation::get(), + ExistentialDeposit::get() + ).into()); + } + + impl pallet_xcm_benchmarks::Config for Runtime { + type XcmConfig = xcm_config::XcmConfig; + type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForParentDelivery, + >; + fn valid_destination() -> Result { + Ok(WestendLocation::get()) + } + fn worst_case_holding(_depositable_count: u32) -> MultiAssets { + // just concrete assets according to relay chain. + let assets: Vec = vec![ + MultiAsset { + id: Concrete(WestendLocation::get()), + fun: Fungible(1_000_000 * UNITS), + } + ]; + assets.into() + } + } + + parameter_types! { + pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( + WestendLocation::get(), + MultiAsset { fun: Fungible(UNITS), id: Concrete(WestendLocation::get()) }, + )); + pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; + pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; + } + + impl pallet_xcm_benchmarks::fungible::Config for Runtime { + type TransactAsset = Balances; + + type CheckedAccount = CheckedAccount; + type TrustedTeleporter = TrustedTeleporter; + type TrustedReserve = TrustedReserve; + + fn get_multi_asset() -> MultiAsset { + MultiAsset { + id: Concrete(WestendLocation::get()), + fun: Fungible(UNITS), + } + } + } + + impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; + type RuntimeCall = RuntimeCall; + + fn worst_case_response() -> (u64, Response) { + (0u64, Response::Version(Default::default())) + } + + fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { + Ok((WestendLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + } + + fn subscribe_origin() -> Result { + Ok(WestendLocation::get()) + } + + fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { + let origin = WestendLocation::get(); + let assets: MultiAssets = (Concrete(WestendLocation::get()), 1_000 * UNITS).into(); + let ticket = MultiLocation { parents: 0, interior: Here }; + Ok((origin, ticket, assets)) + } + + fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn export_message_origin_and_destination( + ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { + Ok((WestendLocation::get(), NetworkId::Rococo, X1(Parachain(100)))) + } + + fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { + Err(BenchmarkError::Skip) + } + } + + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + type RococoFinality = BridgeRococoGrandpa; + type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; + type WestendToRococo = pallet_bridge_messages::benchmarking::Pallet ::; + + use bridge_runtime_common::messages_benchmarking::{ + prepare_message_delivery_proof_from_parachain, + prepare_message_proof_from_parachain, + generate_xcm_builder_bridge_message_sample, + }; + use pallet_bridge_messages::benchmarking::{ + Config as BridgeMessagesConfig, + MessageDeliveryProofParams, + MessageProofParams, + }; + + impl BridgeMessagesConfig for Runtime { + fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { + let bench_lane_id = >::bench_lane_id(); + let bridged_chain_id = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; + pallet_bridge_relayers::Pallet::::relayer_reward( + relayer, + bp_relayers::RewardsAccountParams::new( + bench_lane_id, + bridged_chain_id, + bp_relayers::RewardsAccountOwner::BridgedChain + ) + ).is_some() + } + + fn prepare_message_proof( + params: MessageProofParams, + ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { + use cumulus_primitives_core::XcmpMessageSource; + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); + prepare_message_proof_from_parachain::< + Runtime, + bridge_to_rococo_config::BridgeGrandpaRococoInstance, + bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Westend), Parachain(42)))) + } + + fn prepare_message_delivery_proof( + params: MessageDeliveryProofParams, + ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { + prepare_message_delivery_proof_from_parachain::< + Runtime, + bridge_to_rococo_config::BridgeGrandpaRococoInstance, + bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + >(params) + } + + fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { + use cumulus_primitives_core::XcmpMessageSource; + !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + } + } + + use bridge_runtime_common::parachains_benchmarking::prepare_parachain_heads_proof; + use pallet_bridge_parachains::benchmarking::Config as BridgeParachainsConfig; + use pallet_bridge_relayers::benchmarking::{ + Pallet as BridgeRelayersBench, + Config as BridgeRelayersConfig, + }; + + impl BridgeParachainsConfig for Runtime { + fn parachains() -> Vec { + use bp_runtime::Parachain; + vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_rococo::BridgeHubRococo::PARACHAIN_ID)] + } + + fn prepare_parachain_heads_proof( + parachains: &[bp_polkadot_core::parachains::ParaId], + parachain_head_size: u32, + proof_size: bp_runtime::StorageProofSize, + ) -> ( + pallet_bridge_parachains::RelayBlockNumber, + pallet_bridge_parachains::RelayBlockHash, + bp_polkadot_core::parachains::ParaHeadsProof, + Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, + ) { + prepare_parachain_heads_proof::( + parachains, + parachain_head_size, + proof_size, + ) + } + } + + impl BridgeRelayersConfig for Runtime { + fn prepare_rewards_account( + account_params: bp_relayers::RewardsAccountParams, + reward: Balance, + ) { + let rewards_account = bp_relayers::PayRewardFromAccount::< + Balances, + AccountId + >::rewards_account(account_params); + Self::deposit_account(rewards_account, reward); + } + + fn deposit_account(account: AccountId, balance: Balance) { + use frame_support::traits::fungible::Mutate; + Balances::mint_into(&account, balance.saturating_add(ExistentialDeposit::get())).unwrap(); + } + } + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} + +#[cfg(test)] +mod tests { + use super::*; + use codec::Encode; + use sp_runtime::{ + generic::Era, + traits::{SignedExtension, Zero}, + }; + + #[test] + fn ensure_signed_extension_definition_is_compatible_with_relay() { + use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + + sp_io::TestExternalities::default().execute_with(|| { + frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); + let payload: SignedExtra = ( + frame_system::CheckNonZeroSender::new(), + frame_system::CheckSpecVersion::new(), + frame_system::CheckTxVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(10), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(10), + BridgeRejectObsoleteHeadersAndMessages, + ( + bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), + ), + ); + + { + let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + VERSION.spec_version, + VERSION.transaction_version, + bp_runtime::TransactionEra::Immortal, + System::block_hash(BlockNumber::zero()), + 10, + 10, + (((), ()), ((), ())), + ); + assert_eq!(payload.encode(), bh_indirect_payload.encode()); + assert_eq!( + payload.additional_signed().unwrap().encode(), + bh_indirect_payload.additional_signed().unwrap().encode() + ) + } + }); + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/block_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/block_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7fdb2aae2a01ec06076de83d94817e540e205dd --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc480c391636a92aad6e303a515524b5ce7ff2b1 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_645_000 picoseconds. + Weight::from_parts(1_717_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 12_258 + .saturating_add(Weight::from_parts(24_890_934, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..70fc3617bce93e2fa9246a649cd3adf81c1cda23 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// bridge-hub-rococo-dev +// --output +// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65711` + // Estimated: `69176` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(73_000_000, 0) + .saturating_add(Weight::from_parts(0, 69176)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/extrinsic_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a4adb968bb7195428ea00d59cd92dcd3b6eea5f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..3dec4cc7f182c9aede28084122747dca63b24431 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs @@ -0,0 +1,155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_956_000 picoseconds. + Weight::from_parts(2_974_450, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_432_000 picoseconds. + Weight::from_parts(7_686_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_767, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_715_000 picoseconds. + Weight::from_parts(3_983_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `119` + // Estimated: `1604` + // Minimum execution time: 99_688_458_000 picoseconds. + Weight::from_parts(103_623_061_000, 0) + .saturating_add(Weight::from_parts(0, 1604)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_318_000 picoseconds. + Weight::from_parts(2_421_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_168 + .saturating_add(Weight::from_parts(765_555, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_162_000 picoseconds. + Weight::from_parts(2_228_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 951 + .saturating_add(Weight::from_parts(569_773, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `68 + p * (69 ±0)` + // Estimated: `71 + p * (70 ±0)` + // Minimum execution time: 3_795_000 picoseconds. + Weight::from_parts(3_895_000, 0) + .saturating_add(Weight::from_parts(0, 71)) + // Standard Error: 1_869 + .saturating_add(Weight::from_parts(1_209_251, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..833944ebfa52eb3def20271dc129b0b314aa8198 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Expose the auto generated weight files. + +use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; +use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; + +pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod pallet_balances; +pub mod pallet_bridge_grandpa; +pub mod pallet_bridge_messages; +pub mod pallet_bridge_parachains; +pub mod pallet_bridge_relayers; +pub mod pallet_collator_selection; +pub mod pallet_message_queue; +pub mod pallet_multisig; +pub mod pallet_session; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_xcm; +pub mod paritydb_weights; +pub mod rocksdb_weights; +pub mod xcm; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; + +use crate::Runtime; +use frame_support::weights::Weight; + +// import trait from dependency module +use ::pallet_bridge_relayers::WeightInfoExt as _; + +impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { + fn expected_extra_storage_proof_size() -> u32 { + bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE + } + + fn receive_messages_proof_overhead_from_runtime() -> Weight { + pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( + ) + } + + fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { + pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() + } +} + +impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo { + fn expected_extra_storage_proof_size() -> u32 { + bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..26a188a98610a7ecc822eb5e7e448ad7f1867eb8 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_balances.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 56_219_000 picoseconds. + Weight::from_parts(56_763_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 41_515_000 picoseconds. + Weight::from_parts(42_186_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 16_274_000 picoseconds. + Weight::from_parts(16_898_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 23_847_000 picoseconds. + Weight::from_parts(24_343_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 57_564_000 picoseconds. + Weight::from_parts(58_172_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 52_131_000 picoseconds. + Weight::from_parts(52_662_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 19_005_000 picoseconds. + Weight::from_parts(19_594_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 17_275_000 picoseconds. + Weight::from_parts(17_901_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 15_775 + .saturating_add(Weight::from_parts(15_448_147, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_rococo_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs similarity index 78% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_rococo_grandpa.rs rename to cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index f646ddc3a387b1cf8a0b712e98a94c0dbea42492..b0634ff2ccf499687ed14b9a833a02ea29f38019 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_bridge_rococo_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_grandpa +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,17 +64,19 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `268 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 258_708_000 picoseconds. - Weight::from_parts(36_816_946, 0) + // Minimum execution time: 304_726_000 picoseconds. + Weight::from_parts(16_868_060, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 3_891 - .saturating_add(Weight::from_parts(55_186_206, 0).saturating_mul(p.into())) - // Standard Error: 64_911 - .saturating_add(Weight::from_parts(1_803_772, 0).saturating_mul(v.into())) + // Standard Error: 2_802 + .saturating_add(Weight::from_parts(55_200_017, 0).saturating_mul(p.into())) + // Standard Error: 46_745 + .saturating_add(Weight::from_parts(2_689_151, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs similarity index 54% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance.rs rename to cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs index 7c25ae337ad6e731d97ed887bbf7e22e95354e0e..5d229497f3eb477912dec9304dbca1aed38b7881 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_bridge_messages_bench_runtime_with_bridge_hub_rococo_messages_instance.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_messages +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,160 +48,174 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `575` // Estimated: `52645` - // Minimum execution time: 43_473_000 picoseconds. - Weight::from_parts(44_789_000, 0) + // Minimum execution time: 42_332_000 picoseconds. + Weight::from_parts(43_375_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `575` // Estimated: `52645` - // Minimum execution time: 53_826_000 picoseconds. - Weight::from_parts(61_945_000, 0) + // Minimum execution time: 53_139_000 picoseconds. + Weight::from_parts(54_236_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `575` // Estimated: `52645` - // Minimum execution time: 48_194_000 picoseconds. - Weight::from_parts(50_311_000, 0) + // Minimum execution time: 47_466_000 picoseconds. + Weight::from_parts(48_724_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `543` // Estimated: `52645` - // Minimum execution time: 40_783_000 picoseconds. - Weight::from_parts(43_019_000, 0) + // Minimum execution time: 40_962_000 picoseconds. + Weight::from_parts(42_002_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `543` // Estimated: `52645` - // Minimum execution time: 74_259_000 picoseconds. - Weight::from_parts(76_009_000, 0) + // Minimum execution time: 71_599_000 picoseconds. + Weight::from_parts(74_307_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 31_323_000 picoseconds. - Weight::from_parts(32_282_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `414` + // Estimated: `3879` + // Minimum execution time: 31_206_000 picoseconds. + Weight::from_parts(32_045_000, 0) + .saturating_add(Weight::from_parts(0, 3879)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 31_725_000 picoseconds. - Weight::from_parts(32_250_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `414` + // Estimated: `3879` + // Minimum execution time: 31_211_000 picoseconds. + Weight::from_parts(32_171_000, 0) + .saturating_add(Weight::from_parts(0, 3879)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `414` // Estimated: `6086` - // Minimum execution time: 34_244_000 picoseconds. - Weight::from_parts(35_033_000, 0) + // Minimum execution time: 33_790_000 picoseconds. + Weight::from_parts(34_708_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -212,22 +224,22 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. /// The range of component `i` is `[128, 2048]`. + /// The range of component `i` is `[128, 2048]`. + /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `635` + // Measured: `777` // Estimated: `52645` - // Minimum execution time: 172_521_000 picoseconds. - Weight::from_parts(173_742_000, 0) + // Minimum execution time: 61_938_000 picoseconds. + Weight::from_parts(63_009_714, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 3_678 - .saturating_add(Weight::from_parts(1_012_559, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) + // Standard Error: 23 + .saturating_add(Weight::from_parts(6_677, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs similarity index 84% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance.rs rename to cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs index 147e8447ee87ed33740393299ffee3d8d92d772e..81cb0a66b7d277731a5d87f386301058d2634588 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_bridge_parachains_bench_runtime_bridge_parachain_rococo_instance.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_parachains +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,15 +60,14 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { + /// The range of component `p` is `[1, 2]`. + fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `367` // Estimated: `2543` - // Minimum execution time: 33_519_000 picoseconds. - Weight::from_parts(34_527_779, 0) + // Minimum execution time: 31_241_000 picoseconds. + Weight::from_parts(32_488_584, 0) .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 45_887 - .saturating_add(Weight::from_parts(120_010, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -86,10 +83,10 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `367` // Estimated: `2543` - // Minimum execution time: 35_140_000 picoseconds. - Weight::from_parts(35_801_000, 0) + // Minimum execution time: 32_962_000 picoseconds. + Weight::from_parts(33_658_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -106,10 +103,10 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `367` // Estimated: `2543` - // Minimum execution time: 67_110_000 picoseconds. - Weight::from_parts(67_951_000, 0) + // Minimum execution time: 62_685_000 picoseconds. + Weight::from_parts(64_589_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs new file mode 100644 index 0000000000000000000000000000000000000000..fde670ab927ce8d64cb7d8a2146cd90954a8f903 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs @@ -0,0 +1,123 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_bridge_relayers` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_relayers +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_bridge_relayers`. +pub struct WeightInfo(PhantomData); +impl pallet_bridge_relayers::WeightInfo for WeightInfo { + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn claim_rewards() -> Weight { + // Proof Size summary in bytes: + // Measured: `207` + // Estimated: `3593` + // Minimum execution time: 45_338_000 picoseconds. + Weight::from_parts(45_836_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `BridgeRelayers::RegisteredRelayers` (r:1 w:1) + /// Proof: `BridgeRelayers::RegisteredRelayers` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x1e8445dc201eeb8560e5579a5dd54655` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x1e8445dc201eeb8560e5579a5dd54655` (r:1 w:0) + /// Storage: `Balances::Reserves` (r:1 w:1) + /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `61` + // Estimated: `4714` + // Minimum execution time: 23_561_000 picoseconds. + Weight::from_parts(24_012_000, 0) + .saturating_add(Weight::from_parts(0, 4714)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `BridgeRelayers::RegisteredRelayers` (r:1 w:1) + /// Proof: `BridgeRelayers::RegisteredRelayers` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Balances::Reserves` (r:1 w:1) + /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) + fn deregister() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `4714` + // Minimum execution time: 25_133_000 picoseconds. + Weight::from_parts(25_728_000, 0) + .saturating_add(Weight::from_parts(0, 4714)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `BridgeRelayers::RegisteredRelayers` (r:1 w:1) + /// Proof: `BridgeRelayers::RegisteredRelayers` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Balances::Reserves` (r:1 w:1) + /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn slash_and_deregister() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `4714` + // Minimum execution time: 27_356_000 picoseconds. + Weight::from_parts(27_828_000, 0) + .saturating_add(Weight::from_parts(0, 4714)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn register_relayer_reward() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3538` + // Minimum execution time: 2_955_000 picoseconds. + Weight::from_parts(3_084_000, 0) + .saturating_add(Weight::from_parts(0, 3538)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000000000000000000000000000000..9dcee77082b99f586707a77a540cf6b13bd2be16 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs @@ -0,0 +1,249 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `196 + b * (79 ±0)` + // Estimated: `1187 + b * (2555 ±0)` + // Minimum execution time: 14_728_000 picoseconds. + Weight::from_parts(11_562_750, 0) + .saturating_add(Weight::from_parts(0, 1187)) + // Standard Error: 7_121 + .saturating_add(Weight::from_parts(3_300_884, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `757 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 47_549_000 picoseconds. + Weight::from_parts(45_432_273, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 11_457 + .saturating_add(Weight::from_parts(216_469, 0).saturating_mul(b.into())) + // Standard Error: 2_171 + .saturating_add(Weight::from_parts(197_614, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 15_417_000 picoseconds. + Weight::from_parts(15_357_487, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 4_074 + .saturating_add(Weight::from_parts(187_410, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_407_000 picoseconds. + Weight::from_parts(7_657_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_514_000 picoseconds. + Weight::from_parts(7_695_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `740 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 41_711_000 picoseconds. + Weight::from_parts(45_690_780, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_800 + .saturating_add(Weight::from_parts(194_907, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[3, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + c * (49 ±0)` + // Estimated: `6287` + // Minimum execution time: 33_901_000 picoseconds. + Weight::from_parts(35_875_905, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 1_968 + .saturating_add(Weight::from_parts(200_283, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `155` + // Estimated: `6196` + // Minimum execution time: 47_475_000 picoseconds. + Weight::from_parts(48_265_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2263 + c * (97 ±0) + r * (115 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 16_907_000 picoseconds. + Weight::from_parts(17_203_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 354_098 + .saturating_add(Weight::from_parts(15_341_462, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..2fcd573ceb277116bda67180da8e0701593ab453 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_446_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 10_613_000 picoseconds. + Weight::from_parts(10_613_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_854_000 picoseconds. + Weight::from_parts(4_854_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_748_000 picoseconds. + Weight::from_parts(5_748_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_136_000 picoseconds. + Weight::from_parts(6_136_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 59_505_000 picoseconds. + Weight::from_parts(59_505_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 6_506_000 picoseconds. + Weight::from_parts(6_506_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(40_646_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 51_424_000 picoseconds. + Weight::from_parts(51_424_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 81_153_000 picoseconds. + Weight::from_parts(81_153_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000000000000000000000000000000..91840ae0c6d77cfb4507d5f7d7b8f2aca7c84b35 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs @@ -0,0 +1,165 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_958_000 picoseconds. + Weight::from_parts(14_501_711, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(626, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 44_067_000 picoseconds. + Weight::from_parts(33_432_998, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_250 + .saturating_add(Weight::from_parts(131_851, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 29_373_000 picoseconds. + Weight::from_parts(19_409_201, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 725 + .saturating_add(Weight::from_parts(110_824, 0).saturating_mul(s.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_502, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `388 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 49_724_000 picoseconds. + Weight::from_parts(34_153_321, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_376 + .saturating_add(Weight::from_parts(174_634, 0).saturating_mul(s.into())) + // Standard Error: 13 + .saturating_add(Weight::from_parts(1_753, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_081_000 picoseconds. + Weight::from_parts(31_552_702, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_066 + .saturating_add(Weight::from_parts(135_081, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 17_807_000 picoseconds. + Weight::from_parts(18_241_044, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 768 + .saturating_add(Weight::from_parts(112_957, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 32_421_000 picoseconds. + Weight::from_parts(32_554_061, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_157 + .saturating_add(Weight::from_parts(141_221, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_session.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9d04f9c6df65892243220531ac9d42dc885702d --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_session.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 16_965_000 picoseconds. + Weight::from_parts(17_384_000, 0) + .saturating_add(Weight::from_parts(0, 3762)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `3744` + // Minimum execution time: 12_444_000 picoseconds. + Weight::from_parts(12_832_000, 0) + .saturating_add(Weight::from_parts(0, 3744)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a5bf9b9f9c1f8c483c2e2f86ae471b28e7f609a --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `85` + // Estimated: `1493` + // Minimum execution time: 9_231_000 picoseconds. + Weight::from_parts(9_595_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `94` + // Estimated: `0` + // Minimum execution time: 3_869_000 picoseconds. + Weight::from_parts(4_041_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..44cd0cf91e7998cb504587c481d13b110c9db072 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs @@ -0,0 +1,102 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_831_000 picoseconds. + Weight::from_parts(12_945_569, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_949 + .saturating_add(Weight::from_parts(5_125_189, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_790_000 picoseconds. + Weight::from_parts(5_063_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_894_000 picoseconds. + Weight::from_parts(14_201_341, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_501 + .saturating_add(Weight::from_parts(5_466_047, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_624_000 picoseconds. + Weight::from_parts(9_064_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_912_000 picoseconds. + Weight::from_parts(9_228_121, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_601 + .saturating_add(Weight::from_parts(5_138_293, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f17d327024c4f792d0f61d4001067fece87f7f5 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -0,0 +1,316 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_xcm +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `75` + // Estimated: `3540` + // Minimum execution time: 29_724_000 picoseconds. + Weight::from_parts(30_440_000, 0) + .saturating_add(Weight::from_parts(0, 3540)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 26_779_000 picoseconds. + Weight::from_parts(27_249_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_170_000 picoseconds. + Weight::from_parts(9_629_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_769_000 picoseconds. + Weight::from_parts(2_933_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `75` + // Estimated: `3540` + // Minimum execution time: 34_547_000 picoseconds. + Weight::from_parts(35_653_000, 0) + .saturating_add(Weight::from_parts(0, 3540)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `292` + // Estimated: `3757` + // Minimum execution time: 36_274_000 picoseconds. + Weight::from_parts(37_281_000, 0) + .saturating_add(Weight::from_parts(0, 3757)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_749_000 picoseconds. + Weight::from_parts(2_917_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `187` + // Estimated: `11077` + // Minimum execution time: 17_649_000 picoseconds. + Weight::from_parts(17_964_000, 0) + .saturating_add(Weight::from_parts(0, 11077)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `191` + // Estimated: `11081` + // Minimum execution time: 17_551_000 picoseconds. + Weight::from_parts(18_176_000, 0) + .saturating_add(Weight::from_parts(0, 11081)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `13563` + // Minimum execution time: 19_261_000 picoseconds. + Weight::from_parts(19_714_000, 0) + .saturating_add(Weight::from_parts(0, 13563)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `6082` + // Minimum execution time: 31_630_000 picoseconds. + Weight::from_parts(32_340_000, 0) + .saturating_add(Weight::from_parts(0, 6082)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `172` + // Estimated: `8587` + // Minimum execution time: 9_218_000 picoseconds. + Weight::from_parts(9_558_000, 0) + .saturating_add(Weight::from_parts(0, 8587)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `11088` + // Minimum execution time: 18_133_000 picoseconds. + Weight::from_parts(18_663_000, 0) + .saturating_add(Weight::from_parts(0, 11088)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `204` + // Estimated: `11094` + // Minimum execution time: 38_878_000 picoseconds. + Weight::from_parts(39_779_000, 0) + .saturating_add(Weight::from_parts(0, 11094)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/paritydb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..25679703831a13b8d1bb7fb7dd4d92fa84b1f255 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/rocksdb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7269fa84f84a31943af3999298b16e49fddc2a8a --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -0,0 +1,247 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +mod pallet_xcm_benchmarks_fungible; +mod pallet_xcm_benchmarks_generic; + +use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use codec::Encode; +use frame_support::weights::Weight; +use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; +use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_std::prelude::*; +use xcm::{latest::prelude::*, DoubleEncoded}; + +trait WeighMultiAssets { + fn weigh_multi_assets(&self, weight: Weight) -> Weight; +} + +const MAX_ASSETS: u64 = 100; + +impl WeighMultiAssets for MultiAssetFilter { + fn weigh_multi_assets(&self, weight: Weight) -> Weight { + match self { + Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), + Self::Wild(asset) => match asset { + All => weight.saturating_mul(MAX_ASSETS), + AllOf { fun, .. } => match fun { + WildFungibility::Fungible => weight, + // Magic number 2 has to do with the fact that we could have up to 2 times + // MaxAssetsIntoHolding in the worst-case scenario. + WildFungibility::NonFungible => + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), + }, + AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + }, + } + } +} + +impl WeighMultiAssets for MultiAssets { + fn weigh_multi_assets(&self, weight: Weight) -> Weight { + weight.saturating_mul(self.inner().iter().count() as u64) + } +} + +pub struct BridgeHubWestendXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for BridgeHubWestendXcmWeight { + fn withdraw_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) + } + fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) + } + fn receive_teleported_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) + } + fn query_response( + _query_id: &u64, + _response: &Response, + _max_weight: &Weight, + _querier: &Option, + ) -> Weight { + XcmGeneric::::query_response() + } + fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) + } + fn transfer_reserve_asset( + assets: &MultiAssets, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) + } + fn transact( + _origin_type: &OriginKind, + _require_weight_at_most: &Weight, + _call: &DoubleEncoded, + ) -> Weight { + XcmGeneric::::transact() + } + fn hrmp_new_channel_open_request( + _sender: &u32, + _max_message_size: &u32, + _max_capacity: &u32, + ) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_accepted(_recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn clear_origin() -> Weight { + XcmGeneric::::clear_origin() + } + fn descend_origin(_who: &InteriorMultiLocation) -> Weight { + XcmGeneric::::descend_origin() + } + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_error() + } + + fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) + } + fn deposit_reserve_asset( + assets: &MultiAssetFilter, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) + } + fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { + Weight::MAX + } + fn initiate_reserve_withdraw( + assets: &MultiAssetFilter, + _reserve: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) + } + fn initiate_teleport( + assets: &MultiAssetFilter, + _dest: &MultiLocation, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) + } + fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { + XcmGeneric::::report_holding() + } + fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { + XcmGeneric::::buy_execution() + } + fn refund_surplus() -> Weight { + XcmGeneric::::refund_surplus() + } + fn set_error_handler(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_error_handler() + } + fn set_appendix(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_appendix() + } + fn clear_error() -> Weight { + XcmGeneric::::clear_error() + } + fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { + XcmGeneric::::claim_asset() + } + fn trap(_code: &u64) -> Weight { + XcmGeneric::::trap() + } + fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { + XcmGeneric::::subscribe_version() + } + fn unsubscribe_version() -> Weight { + XcmGeneric::::unsubscribe_version() + } + fn burn_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmGeneric::::burn_asset()) + } + fn expect_asset(assets: &MultiAssets) -> Weight { + assets.weigh_multi_assets(XcmGeneric::::expect_asset()) + } + fn expect_origin(_origin: &Option) -> Weight { + XcmGeneric::::expect_origin() + } + fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { + XcmGeneric::::expect_error() + } + fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { + XcmGeneric::::expect_transact_status() + } + fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::query_pallet() + } + fn expect_pallet( + _index: &u32, + _name: &Vec, + _module_name: &Vec, + _crate_major: &u32, + _min_crate_minor: &u32, + ) -> Weight { + XcmGeneric::::expect_pallet() + } + fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_transact_status() + } + fn clear_transact_status() -> Weight { + XcmGeneric::::clear_transact_status() + } + fn universal_origin(_: &Junction) -> Weight { + Weight::MAX + } + fn export_message(_: &NetworkId, _: &Junctions, inner: &Xcm<()>) -> Weight { + let inner_encoded_len = inner.encode().len() as u32; + XcmGeneric::::export_message(inner_encoded_len) + } + fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { + Weight::MAX + } + fn set_fees_mode(_: &bool) -> Weight { + XcmGeneric::::set_fees_mode() + } + fn set_topic(_topic: &[u8; 32]) -> Weight { + XcmGeneric::::set_topic() + } + fn clear_topic() -> Weight { + XcmGeneric::::clear_topic() + } + fn alias_origin(_: &MultiLocation) -> Weight { + // XCM Executor does not currently support alias origin operations + Weight::MAX + } + fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { + XcmGeneric::::unpaid_execution() + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000000000000000000000000000000..295abd481d7dc6decd39032bf6b0972cdc7ccf20 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,208 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 19_037_000 picoseconds. + Weight::from_parts(19_602_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `153` + // Estimated: `6196` + // Minimum execution time: 43_115_000 picoseconds. + Weight::from_parts(43_897_000, 6196) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `System::Account` (r:3 w:3) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `294` + // Estimated: `8799` + // Minimum execution time: 90_267_000 picoseconds. + Weight::from_parts(91_460_000, 8799) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: `Benchmark::Override` (r:0 w:0) + // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `6196` + // Minimum execution time: 60_477_000 picoseconds. + Weight::from_parts(61_314_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_996_000 picoseconds. + Weight::from_parts(3_107_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `52` + // Estimated: `3593` + // Minimum execution time: 18_907_000 picoseconds. + Weight::from_parts(19_475_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `6196` + // Minimum execution time: 59_143_000 picoseconds. + Weight::from_parts(60_316_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `141` + // Estimated: `3606` + // Minimum execution time: 44_459_000 picoseconds. + Weight::from_parts(45_365_000, 3606) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c686190208fd2fec4a00b7b34ef25038b4815b1 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,377 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `6196` + // Minimum execution time: 62_732_000 picoseconds. + Weight::from_parts(64_581_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_987_000 picoseconds. + Weight::from_parts(2_107_000, 0) + } + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3568` + // Minimum execution time: 8_098_000 picoseconds. + Weight::from_parts(8_564_000, 3568) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_539_000 picoseconds. + Weight::from_parts(9_085_000, 0) + } + pub fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_369_000, 0) + } + pub fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_828_000 picoseconds. + Weight::from_parts(1_994_000, 0) + } + pub fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_869_000 picoseconds. + Weight::from_parts(1_946_000, 0) + } + pub fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_842_000 picoseconds. + Weight::from_parts(1_949_000, 0) + } + pub fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_460_000 picoseconds. + Weight::from_parts(2_593_000, 0) + } + pub fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(2_003_000, 0) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `6196` + // Minimum execution time: 56_813_000 picoseconds. + Weight::from_parts(57_728_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 11_364_000 picoseconds. + Weight::from_parts(11_872_000, 3625) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_936_000, 0) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 23_081_000 picoseconds. + Weight::from_parts(23_512_000, 3574) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_747_000 picoseconds. + Weight::from_parts(4_068_000, 0) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_045_000 picoseconds. + Weight::from_parts(3_208_000, 0) + } + pub fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_962_000 picoseconds. + Weight::from_parts(2_284_000, 0) + } + pub fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_951_000 picoseconds. + Weight::from_parts(2_026_000, 0) + } + pub fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_837_000 picoseconds. + Weight::from_parts(2_084_000, 0) + } + pub fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_042_000 picoseconds. + Weight::from_parts(2_145_000, 0) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `6196` + // Minimum execution time: 61_350_000 picoseconds. + Weight::from_parts(62_440_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_993_000 picoseconds. + Weight::from_parts(5_309_000, 0) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `6196` + // Minimum execution time: 57_133_000 picoseconds. + Weight::from_parts(58_100_000, 6196) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_899_000 picoseconds. + Weight::from_parts(2_153_000, 0) + } + pub fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_880_000 picoseconds. + Weight::from_parts(1_960_000, 0) + } + pub fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_825_000 picoseconds. + Weight::from_parts(1_960_000, 0) + } + // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoToWococoMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeRococoToWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 1000]`. + pub fn export_message(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `3604` + // Minimum execution time: 28_419_000 picoseconds. + Weight::from_parts(29_387_791, 3604) + // Standard Error: 552 + .saturating_add(Weight::from_parts(316_277, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + pub fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_903_000 picoseconds. + Weight::from_parts(2_023_000, 0) + } + pub fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_963_000 picoseconds. + Weight::from_parts(2_143_000, 0) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..c89ee91c5e447db55841ca6c82816823bb82b106 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -0,0 +1,324 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use super::{ + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, XcmpQueue, +}; +use crate::bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; +use frame_support::{ + match_types, parameter_types, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + TREASURY_PALLET_ID, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use sp_runtime::traits::AccountIdConversion; +use westend_runtime_constants::system_parachain; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; + +parameter_types! { + pub const WestendLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Westend; + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting the native currency on this chain. +pub type CurrencyTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognized. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognized. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +match_types! { + pub type ParentOrParentsPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { .. }) } + }; + pub type ParentOrSiblings: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(_) } + }; +} + +/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly +/// account for proof size weights. +/// +/// Calls that are allowed through this filter must: +/// 1. Have a fixed weight; +/// 2. Cannot lead to another call being made; +/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. +pub struct SafeCallFilter; +impl Contains for SafeCallFilter { + fn contains(call: &RuntimeCall) -> bool { + #[cfg(feature = "runtime-benchmarks")] + { + if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { + return true + } + } + + // Allow to change dedicated storage items (called by governance-like) + match call { + RuntimeCall::System(frame_system::Call::set_storage { items }) + if items.iter().all(|(k, _)| { + k.eq(&DeliveryRewardInBalance::key()) | + k.eq(&RequiredStakeForStakeAndSlash::key()) + }) => + return true, + _ => (), + }; + + matches!( + call, + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::XcmpQueue(..) | + RuntimeCall::MessageQueue(..) | + RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< + Runtime, + crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, + >::initialize { .. }) + ) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + // Allow local users to buy weight credit. + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent, its pluralities (i.e. governance bodies) and relay treasury pallet + // get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + )>, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::COLLECTIVES_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - NativeToken with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = CurrencyTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // BridgeHub does not recognize a reserve location for any asset. Users must teleport Native + // token where allowed (e.g. with the Relay Chain). + type IsReserve = (); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = WeightInfoBounds< + crate::weights::xcm::BridgeHubWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type Trader = + UsingComponents>; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; + type MessageExporter = (crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter,); + type UniversalAliases = Nothing; + type CallDispatcher = WithOriginFilter; + type SafeCallFilter = SafeCallFilter; + type Aliasers = Nothing; +} + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// Converts a local signed origin into an XCM multilocation. +/// Forms the basis for local origins sending/executing XCMs. +pub type LocalOriginToLocation = SignedToAccountId32; + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +)>; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmRouter = XcmRouter; + // We want to disallow users sending (arbitrary) XCMs from this chain. + type SendXcmOrigin = EnsureXcmOrigin; + // We support local origins dispatching XCM executions in principle... + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type Weigher = WeightInfoBounds< + crate::weights::xcm::BridgeHubWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4d477e1413e4c2506b09a94d06960b09978eb390 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -0,0 +1,332 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use bp_polkadot_core::Signature; +use bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; +use bridge_hub_westend_runtime::{ + bridge_common_config, bridge_to_rococo_config, + xcm_config::{RelayNetwork, WestendLocation, XcmConfig}, + AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, + TransactionPayment, UncheckedExtrinsic, +}; +use bridge_to_rococo_config::{ + BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeParachainRococoInstance, + WithBridgeHubRococoMessageBridge, WithBridgeHubRococoMessagesInstance, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, +}; +use codec::{Decode, Encode}; +use frame_support::{dispatch::GetDispatchInfo, parameter_types}; +use frame_system::pallet_prelude::HeaderFor; +use parachains_common::{westend::fee::WeightToFee, AccountId, AuraId, Balance}; +use sp_keyring::AccountKeyring::Alice; +use sp_runtime::{ + generic::{Era, SignedPayload}, + AccountId32, +}; +use xcm::latest::prelude::*; + +// Para id of sibling chain used in tests. +pub const SIBLING_PARACHAIN_ID: u32 = 1000; + +parameter_types! { + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); +} + +fn construct_extrinsic( + sender: sp_keyring::AccountKeyring, + call: RuntimeCall, +) -> UncheckedExtrinsic { + let extra: SignedExtra = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(Era::immortal()), + frame_system::CheckNonce::::from(0), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + BridgeRejectObsoleteHeadersAndMessages::default(), + (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), + ); + let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + let signature = payload.using_encoded(|e| sender.sign(e)); + UncheckedExtrinsic::new_signed( + call, + AccountId32::from(sender.public()).into(), + Signature::Sr25519(signature.clone()), + extra, + ) +} + +fn construct_and_apply_extrinsic( + relayer_at_target: sp_keyring::AccountKeyring, + batch: pallet_utility::Call, +) -> sp_runtime::DispatchOutcome { + let batch_call = RuntimeCall::Utility(batch); + let xt = construct_extrinsic(relayer_at_target, batch_call); + let r = Executive::apply_extrinsic(xt); + r.unwrap() +} + +fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { + let batch_call = RuntimeCall::Utility(batch); + let batch_info = batch_call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, batch_call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +} + +fn executive_init_block(header: &HeaderFor) { + Executive::initialize_block(header) +} + +fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { + bridge_hub_test_utils::CollatorSessionKeys::new( + AccountId::from(Alice), + AccountId::from(Alice), + SessionKeys { aura: AuraId::from(Alice.public()) }, + ) +} + +bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + CheckingAccount, + WeightToFee, + ParachainSystem, + collator_session_keys(), + ExistentialDeposit::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID +); + +#[test] +fn initialize_bridge_by_governance_works() { + bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< + Runtime, + BridgeGrandpaRococoInstance, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), + ) +} + +#[test] +fn change_delivery_reward_by_governance_works() { + bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + DeliveryRewardInBalance, + u64, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + Box::new(|call| RuntimeCall::System(call).encode()), + || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), + |old_value| old_value.checked_mul(2).unwrap(), + ) +} + +#[test] +fn change_required_stake_by_governance_works() { + bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + RequiredStakeForStakeAndSlash, + Balance, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + Box::new(|call| RuntimeCall::System(call).encode()), + || (RequiredStakeForStakeAndSlash::key().to_vec(), RequiredStakeForStakeAndSlash::get()), + |old_value| old_value.checked_mul(2).unwrap(), + ) +} + +#[test] +fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { + bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< + Runtime, + XcmConfig, + WithBridgeHubRococoMessagesInstance, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + SIBLING_PARACHAIN_ID, + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::BridgeRococoMessages(event)) => Some(event), + _ => None, + } + }), + || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + Some((WestendLocation::get(), ExistentialDeposit::get()).into()), + // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` + Some((WestendLocation::get(), bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds::get()).into()), + || (), + ) +} + +#[test] +fn message_dispatch_routing_works() { + bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + WithBridgeHubRococoMessagesInstance, + RelayNetwork, + bridge_to_rococo_config::RococoGlobalConsensusNetwork, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + SIBLING_PARACHAIN_ID, + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + || (), + ) +} + +#[test] +fn relayed_incoming_message_works() { + bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + BridgeGrandpaRococoInstance, + BridgeParachainRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + SIBLING_PARACHAIN_ID, + Westend, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + || (), + ) +} + +#[test] +pub fn complex_relay_extrinsic_works() { + bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + BridgeGrandpaRococoInstance, + BridgeParachainRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, + >( + collator_session_keys(), + bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + SIBLING_PARACHAIN_ID, + BridgeHubRococoChainId::get(), + Westend, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + ExistentialDeposit::get(), + executive_init_block, + construct_and_apply_extrinsic, + || (), + ); +} + +#[test] +pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_weight_for_paid_export_message_with_reserve_transfer::< + Runtime, + XcmConfig, + WeightToFee, + >(); + + // check if estimated value is sane + let max_expected = bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds` value", + estimated, + max_expected + ); +} + +#[test] +pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< + Runtime, + BridgeGrandpaRococoInstance, + BridgeParachainRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, + >( + collator_session_keys(), + construct_and_estimate_extrinsic_fee + ); + + // check if estimated value is sane + let max_expected = bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds` value", + estimated, + max_expected + ); +} + +#[test] +pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< + Runtime, + BridgeGrandpaRococoInstance, + BridgeParachainRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, + >( + collator_session_keys(), + construct_and_estimate_extrinsic_fee + ); + + // check if estimated value is sane + let max_expected = bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get(); + assert!( + estimated <= max_expected, + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds` value", + estimated, + max_expected + ); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index d56c32d8afabb3c40eb861f028c5f954889cf60d..bd171be53bf299e83fa127771adddc76f0b0d6c3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.20", default-features = false } -assert_matches = "1.4.0" # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -19,17 +19,17 @@ sp-core = { path = "../../../../../substrate/primitives/core", default-features sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-keyring = { path = "../../../../../substrate/primitives/keyring" } sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-tracing = { path = "../../../../../substrate/primitives/tracing" } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} # Cumulus asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } @@ -41,8 +41,6 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Bridges -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -60,8 +58,6 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", de default = [ "std" ] std = [ "asset-test-utils/std", - "bp-bridge-hub-rococo/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", @@ -71,7 +67,6 @@ std = [ "bp-test-utils/std", "bridge-runtime-common/std", "codec/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcmp-queue/std", "frame-benchmarking/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs index aa7fa16a9791c626eaa4473f6de5914e6f5867ca..7a86d85c86fc26f0d5610700fa13735cb885f2cb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs @@ -16,10 +16,10 @@ //! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities. -use assert_matches::assert_matches; use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch, SourceHeaderChain}, - LaneId, MessageKey, OutboundLaneData, Weight, + LaneId, MessageKey, OutboundLaneData, UnrewardedRelayersState, Weight, }; use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHash, ParaId}; @@ -28,9 +28,13 @@ use bp_runtime::{HeaderOf, Parachain, StorageProofSize, UnderlyingChainOf}; use bp_test_utils::{make_default_justification, prepare_parachain_heads_proof}; use bridge_runtime_common::{ messages::{ - target::FromBridgedChainMessagesProof, BridgedChain as MessageBridgedChain, MessageBridge, + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + BridgedChain as MessageBridgedChain, MessageBridge, + }, + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, }, - messages_generation::{encode_all_messages, encode_lane_data, prepare_messages_storage_proof}, messages_xcm_extension::{XcmAsPlainPayload, XcmBlobMessageDispatchResult}, }; use codec::Encode; @@ -47,10 +51,16 @@ use parachains_runtimes_test_utils::{ }; use sp_core::H256; use sp_keyring::AccountKeyring::*; -use sp_runtime::{traits::Header as HeaderT, AccountId32}; +use sp_runtime::{ + traits::{Header as HeaderT, Zero}, + AccountId32, +}; use xcm::latest::prelude::*; use xcm_builder::DispatchBlobError; -use xcm_executor::XcmExecutor; +use xcm_executor::{ + traits::{TransactAsset, WeightBounds}, + XcmExecutor, +}; // Re-export test_case from assets pub use asset_test_utils::include_teleports_for_native_asset_works; @@ -75,7 +85,6 @@ pub fn initialize_bridge_by_governance_works( + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_dmp_queue::Config + cumulus_pallet_parachain_system::Config + pallet_bridge_grandpa::Config, GrandpaPalletInstance: 'static, @@ -137,6 +146,9 @@ pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< >, export_message_instruction: fn() -> Instruction, expected_lane_id: LaneId, + existential_deposit: Option, + maybe_paid_export_message: Option, + prepare_configuration: impl Fn(), ) where Runtime: frame_system::Config + pallet_balances::Config @@ -144,7 +156,6 @@ pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_dmp_queue::Config + cumulus_pallet_parachain_system::Config + pallet_bridge_messages::Config, XcmConfig: xcm_executor::Config, @@ -161,6 +172,8 @@ pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< .with_tracing() .build() .execute_with(|| { + prepare_configuration(); + // check queue before assert_eq!( pallet_bridge_messages::OutboundLanes::::try_get( @@ -170,10 +183,35 @@ pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< ); // prepare `ExportMessage` - let xcm = Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - export_message_instruction(), - ]); + let xcm = if let Some(fee) = maybe_paid_export_message { + // deposit ED to origin (if needed) + if let Some(ed) = existential_deposit { + XcmConfig::AssetTransactor::deposit_asset( + &ed, + &sibling_parachain_location, + Some(&XcmContext::with_message_id([0; 32])), + ) + .expect("deposited ed"); + } + // deposit fee to origin + XcmConfig::AssetTransactor::deposit_asset( + &fee, + &sibling_parachain_location, + Some(&XcmContext::with_message_id([0; 32])), + ) + .expect("deposited fee"); + + Xcm(vec![ + WithdrawAsset(MultiAssets::from(vec![fee.clone()])), + BuyExecution { fees: fee, weight_limit: Unlimited }, + export_message_instruction(), + ]) + } else { + Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + export_message_instruction(), + ]) + }; // execute XCM let hash = xcm.using_encoded(sp_io::hashing::blake2_256); @@ -231,6 +269,7 @@ pub fn message_dispatch_routing_works< dyn Fn(Vec) -> Option>, >, expected_lane_id: LaneId, + prepare_configuration: impl Fn(), ) where Runtime: frame_system::Config + pallet_balances::Config @@ -238,7 +277,6 @@ pub fn message_dispatch_routing_works< + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_dmp_queue::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config + pallet_bridge_messages::Config, @@ -249,6 +287,7 @@ pub fn message_dispatch_routing_works< XcmConfig: xcm_executor::Config, MessagesPalletInstance: 'static, ValidatorIdOf: From>, + ::AccountId: From, HrmpChannelOpener: frame_support::inherent::ProvideInherent< Call = cumulus_pallet_parachain_system::Call, >, @@ -267,12 +306,14 @@ pub fn message_dispatch_routing_works< .with_tracing() .build() .execute_with(|| { + prepare_configuration(); + let mut alice = [0u8; 32]; alice[0] = 1; let included_head = RuntimeHelper::::run_to_block( 2, - AccountId::from(alice), + AccountId::from(alice).into(), ); // 1. this message is sent from other global consensus with destination of this Runtime relay chain (UMP) let bridging_message = @@ -343,6 +384,7 @@ pub fn relayed_incoming_message_works @@ -363,6 +404,7 @@ pub fn relayed_incoming_message_works::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, UnderlyingChainOf>: bp_runtime::Chain + Parachain, XcmConfig: xcm_executor::Config, HrmpChannelOpener: frame_support::inherent::ProvideInherent< @@ -374,11 +416,11 @@ pub fn relayed_incoming_message_works>::BridgedChain as bp_runtime::Chain>::Hash>, ::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + ::AccountId: From, AccountIdOf: From, >::InboundRelayer: From, { assert_ne!(runtime_para_id, sibling_parachain_id); - assert_ne!(runtime_para_id, bridged_para_id); ExtBuilder::::default() .with_collators(collator_session_key.collators()) @@ -388,12 +430,14 @@ pub fn relayed_incoming_message_works::run_to_block( 2, - AccountId::from(alice), + AccountId::from(alice).into(), ); mock_open_hrmp_channel::( runtime_para_id.into(), @@ -446,7 +490,11 @@ pub fn relayed_incoming_message_works, MB, ()>( + ) = test_data::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + MB, + (), + >( lane_id, xcm.into(), message_nonce, @@ -528,15 +576,24 @@ pub fn relayed_incoming_message_works>::take_xcm( sibling_parachain_id.into(), ) .unwrap(); - let mut dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); - // We use `WithUniqueTopic`, so expect a trailing `SetTopic`. - assert_matches!(dispatched.0.pop(), Some(SetTopic(..))); - assert_eq!(dispatched, expected_dispatch); + // verify contains original message + let dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); + let mut dispatched_clone = dispatched.clone(); + for (idx, expected_instr) in expected_dispatch.0.iter().enumerate() { + assert_eq!(expected_instr, &dispatched.0[idx]); + assert_eq!(expected_instr, &dispatched_clone.0.remove(0)); + } + match dispatched_clone.0.len() { + 0 => (), + 1 => assert!(matches!(dispatched_clone.0[0], SetTopic(_))), + count => assert!(false, "Unexpected messages count: {:?}", count), + } }) } @@ -557,6 +614,7 @@ pub fn complex_relay_extrinsic_works ) -> sp_runtime::DispatchOutcome, + prepare_configuration: impl Fn(), ) where Runtime: frame_system::Config + pallet_balances::Config @@ -565,7 +623,6 @@ pub fn complex_relay_extrinsic_works @@ -579,6 +636,7 @@ pub fn complex_relay_extrinsic_works::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, UnderlyingChainOf>: bp_runtime::Chain + Parachain, XcmConfig: xcm_executor::Config, HrmpChannelOpener: frame_support::inherent::ProvideInherent< @@ -591,6 +649,7 @@ pub fn complex_relay_extrinsic_works::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId>, AccountIdOf: From, + ::AccountId: From, >::InboundRelayer: From, ::RuntimeCall: From> @@ -598,7 +657,6 @@ pub fn complex_relay_extrinsic_works> { assert_ne!(runtime_para_id, sibling_parachain_id); - assert_ne!(runtime_para_id, bridged_para_id); // Relayer account at local/this BH. let relayer_at_target = Bob; @@ -617,12 +675,14 @@ pub fn complex_relay_extrinsic_works::run_to_block( 2, - AccountId::from(alice), + AccountId::from(alice).into(), ); let zero: BlockNumberFor = 0u32.into(); let genesis_hash = frame_system::Pallet::::block_hash(zero); @@ -676,9 +736,13 @@ pub fn complex_relay_extrinsic_works, MB, ()>( + ) = test_data::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + MB, + (), + >( lane_id, - xcm.into(), + xcm.clone().into(), message_nonce, message_destination, para_header_number, @@ -686,27 +750,15 @@ pub fn complex_relay_extrinsic_works::submit_finality_proof { - finality_target: Box::new(relay_chain_header.clone()), - justification: grandpa_justification, - }; - let submit_para_head = - pallet_bridge_parachains::Call::::submit_parachain_heads { - at_relay_block: (relay_header_number, relay_chain_header.hash().into()), - parachains: parachain_heads, - parachain_heads_proof: para_heads_proof, - }; - let submit_message = - pallet_bridge_messages::Call::::receive_messages_proof { - relayer_id_at_bridged_chain: relayer_id_on_source.into(), - proof: message_proof.into(), - messages_count: 1, - dispatch_weight: Weight::from_parts(1000000000, 0), - }; - let batch = pallet_utility::Call::::batch_all { - calls: vec![submit_grandpa.into(), submit_para_head.into(), submit_message.into()], - }; + let relay_chain_header_hash = relay_chain_header.hash(); + let batch = test_data::make_complex_relayer_delivery_batch::( + relay_chain_header, + grandpa_justification, + parachain_heads, + para_heads_proof, + message_proof, + relayer_id_on_source, + ); // sanity checks - before relayer extrinsic assert!(RuntimeHelper::>::take_xcm( @@ -741,10 +793,10 @@ pub fn complex_relay_extrinsic_works>::get().unwrap().1, - relay_chain_header.hash() + relay_chain_header_hash ); assert!(>::contains_key( - relay_chain_header.hash() + relay_chain_header_hash )); // verify parachain head proof correctly imported assert_eq!( @@ -769,25 +821,334 @@ pub fn complex_relay_extrinsic_works>::take_xcm( sibling_parachain_id.into(), ) .unwrap(); - let mut dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); - // We use `WithUniqueTopic`, so expect a trailing `SetTopic`. - assert_matches!(dispatched.0.pop(), Some(SetTopic(..))); - assert_eq!(dispatched, expected_dispatch); + // verify contains original message + let dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); + let mut dispatched_clone = dispatched.clone(); + for (idx, expected_instr) in expected_dispatch.0.iter().enumerate() { + assert_eq!(expected_instr, &dispatched.0[idx]); + assert_eq!(expected_instr, &dispatched_clone.0.remove(0)); + } + match dispatched_clone.0.len() { + 0 => (), + 1 => assert!(matches!(dispatched_clone.0[0], SetTopic(_))), + count => assert!(false, "Unexpected messages count: {:?}", count), + } + }) +} + +/// Estimates XCM execution fee for paid `ExportMessage` processing. +pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer< + Runtime, + XcmConfig, + WeightToFee, +>() -> u128 +where + Runtime: frame_system::Config + pallet_balances::Config, + XcmConfig: xcm_executor::Config, + WeightToFee: frame_support::weights::WeightToFee>, + ::Balance: From + Into, +{ + // data here are not relevant for weighing + let mut xcm = Xcm(vec![ + WithdrawAsset(MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(34333299), + }])), + BuyExecution { + fees: MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(34333299), + }, + weight_limit: Unlimited, + }, + ExportMessage { + network: Polkadot, + destination: X1(Parachain(1000)), + xcm: Xcm(vec![ + ReserveAssetDeposited(MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Kusama)), + }), + fun: Fungible(1000000000000), + }])), + ClearOrigin, + BuyExecution { + fees: MultiAsset { + id: Concrete(MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Kusama)), + }), + fun: Fungible(1000000000000), + }, + weight_limit: Unlimited, + }, + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: MultiLocation { + parents: 0, + interior: X1(xcm::latest::prelude::AccountId32 { + network: None, + id: [ + 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, + 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, + 109, 162, 125, + ], + }), + }, + }, + SetTopic([ + 116, 82, 194, 132, 171, 114, 217, 165, 23, 37, 161, 177, 165, 179, 247, 114, + 137, 101, 147, 70, 28, 157, 168, 32, 154, 63, 74, 228, 152, 180, 5, 63, + ]), + ]), + }, + DepositAsset { + assets: Wild(All), + beneficiary: MultiLocation { parents: 1, interior: X1(Parachain(1000)) }, + }, + SetTopic([ + 36, 224, 250, 165, 82, 195, 67, 110, 160, 170, 140, 87, 217, 62, 201, 164, 42, 98, 219, + 157, 124, 105, 248, 25, 131, 218, 199, 36, 109, 173, 100, 122, + ]), + ]); + + // get weight + let weight = XcmConfig::Weigher::weight(&mut xcm); + assert_ok!(weight); + let weight = weight.unwrap(); + // check if sane + let max_expected = Runtime::BlockWeights::get().max_block / 10; + assert!( + weight.all_lte(max_expected), + "calculated weight: {:?}, max_expected: {:?}", + weight, + max_expected + ); + + // check fee, should not be 0 + let estimated_fee = WeightToFee::weight_to_fee(&weight); + assert!(estimated_fee > BalanceOf::::zero()); + + sp_tracing::try_init_simple(); + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for `ExportMessage` for runtime: {:?}", + estimated_fee, + Runtime::Version::get(), + ); + + estimated_fee.into() +} + +/// Estimates transaction fee for default message delivery transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_complex_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call::) -> u128, +) -> u128 +where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + MB: MessageBridge, + ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, + ValidatorIdOf: From>, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, + <>::BridgedChain as bp_runtime::Chain>::Hash: From, + ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + ::AccountId: From, + AccountIdOf: From, + >::InboundRelayer: From, + ::RuntimeCall: + From> + + From> + + From>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_safe_xcm_version(XCM_VERSION) + .with_para_id(1000.into()) + .with_tracing() + .build() + .execute_with(|| { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let ( + relay_chain_header, + grandpa_justification, + _, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + MB, + (), + >( + LaneId::default(), + vec![xcm::v3::Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + X2(GlobalConsensus(Polkadot), Parachain(1_000)), + 1, + 5, + 1_000, + ); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::make_complex_relayer_delivery_batch::( + relay_chain_header, + grandpa_justification, + parachain_heads, + para_heads_proof, + message_proof, + Dave.public().into(), + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message delivery for runtime: {:?}", + estimated_fee, + Runtime::Version::get(), + ); + + estimated_fee + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_complex_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call::) -> u128, +) -> u128 +where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + MB: MessageBridge, + ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, + <::ThisChain as bp_runtime::Chain>::AccountId: From, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, + ValidatorIdOf: From>, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, + <>::BridgedChain as bp_runtime::Chain>::Hash: From, + ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + ::AccountId: From, + AccountIdOf: From, + >::InboundRelayer: From, + <>::TargetHeaderChain as TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + >>::MessagesDeliveryProof: From>, + ::RuntimeCall: + From> + + From> + + From>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_safe_xcm_version(XCM_VERSION) + .with_para_id(1000.into()) + .with_tracing() + .build() + .execute_with(|| { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let ( + relay_chain_header, + grandpa_justification, + _, + parachain_heads, + para_heads_proof, + message_delivery_proof, + ) = test_data::make_complex_relayer_confirmation_proofs::< + >::BridgedChain, + MB, + (), + >(LaneId::default(), 1, 5, 1_000, Alice.public().into(), unrewarded_relayers.clone()); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::make_complex_relayer_confirmation_batch::( + relay_chain_header, + grandpa_justification, + parachain_heads, + para_heads_proof, + message_delivery_proof, + unrewarded_relayers, + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message confirmation for runtime: {:?}", + estimated_fee, + Runtime::Version::get(), + ); + + estimated_fee }) } pub mod test_data { use super::*; - use bp_header_chain::justification::GrandpaJustification; - use bp_messages::MessageNonce; + use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; + use bp_messages::{DeliveredMessages, InboundLaneData, MessageNonce, UnrewardedRelayer}; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; - use bp_runtime::BasicOperatingMode; + use bp_runtime::{BasicOperatingMode, HashOf}; use bp_test_utils::authority_list; + use sp_runtime::{DigestItem, SaturatedConversion}; use xcm_builder::{HaulBlob, HaulBlobError, HaulBlobExporter}; use xcm_executor::traits::{validate_export, ExportXcm}; @@ -804,7 +1165,117 @@ pub mod test_data { (location, xcm).encode().encode() } - pub fn make_complex_relayer_proofs( + /// Prepare a batch call with relay finality proof, parachain head proof and message proof. + pub fn make_complex_relayer_delivery_batch( + relay_chain_header: BridgedHeader, + grandpa_justification: GrandpaJustification>, + parachain_heads: Vec<(ParaId, ParaHash)>, + para_heads_proof: ParaHeadsProof, + message_proof: FromBridgedChainMessagesProof, + relayer_id_at_bridged_chain: AccountId32, + ) -> pallet_utility::Call where + Runtime:pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, + <>::BridgedChain as bp_runtime::Chain>::Hash: From, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, + >::InboundRelayer: From, + ::RuntimeCall: + From> + + From> + + From>, + { + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }; + let submit_para_head = + pallet_bridge_parachains::Call::::submit_parachain_heads { + at_relay_block: ( + relay_chain_header_number.saturated_into(), + relay_chain_header_hash.into(), + ), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }; + let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), + proof: message_proof.into(), + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }; + pallet_utility::Call::::batch_all { + calls: vec![submit_grandpa.into(), submit_para_head.into(), submit_message.into()], + } + } + + /// Prepare a batch call with relay finality proof, parachain head proof and message delivery + /// proof. + pub fn make_complex_relayer_confirmation_batch( + relay_chain_header: BridgedHeader, + grandpa_justification: GrandpaJustification>, + parachain_heads: Vec<(ParaId, ParaHash)>, + para_heads_proof: ParaHeadsProof, + message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + relayers_state: UnrewardedRelayersState, + ) -> pallet_utility::Call where + Runtime:pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, + <>::BridgedChain as bp_runtime::Chain>::Hash: From, + <>::TargetHeaderChain as TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + >>::MessagesDeliveryProof: From>, + ::RuntimeCall: + From> + + From> + + From>, + { + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }; + let submit_para_head = + pallet_bridge_parachains::Call::::submit_parachain_heads { + at_relay_block: ( + relay_chain_header_number.saturated_into(), + relay_chain_header_hash.into(), + ), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }; + let submit_message_delivery_proof = + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof.into(), + relayers_state, + }; + pallet_utility::Call::::batch_all { + calls: vec![ + submit_grandpa.into(), + submit_para_head.into(), + submit_message_delivery_proof.into(), + ], + } + } + + /// Prepare storage proofs of messages, stored at the source chain. + pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, xcm_message: Xcm, message_nonce: MessageNonce, @@ -813,18 +1284,19 @@ pub mod test_data { relay_header_number: u32, bridged_para_id: u32, ) -> ( - BridgedRelayHeader, - GrandpaJustification, + HeaderOf, + GrandpaJustification>, ParaHead, Vec<(ParaId, ParaHash)>, ParaHeadsProof, FromBridgedChainMessagesProof, ) where - BridgedRelayHeader: HeaderT, - ::Hash: From, + BridgedRelayChain: ChainWithGrandpa, + HashOf: From, MB: MessageBridge, ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, UnderlyingChainOf>: bp_runtime::Chain + Parachain, { let message_payload = prepare_inbound_xcm(xcm_message, message_destination); @@ -840,19 +1312,18 @@ pub mod test_data { encode_lane_data, ); - let bridged_para_head = ParaHead( - bp_test_utils::test_header_with_root::>( - para_header_number.into(), - para_state_root.into(), - ) - .encode(), + let ( + relay_chain_header, + justification, + bridged_para_head, + parachain_heads, + para_heads_proof, + ) = make_complex_bridged_heads_proof::( + para_state_root, + para_header_number, + relay_header_number, + bridged_para_id, ); - let (relay_state_root, para_heads_proof, parachain_heads) = - prepare_parachain_heads_proof::>(vec![( - bridged_para_id, - bridged_para_head.clone(), - )]); - assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); let message_proof = FromBridgedChainMessagesProof { bridged_header_hash: bridged_para_head.hash(), @@ -862,12 +1333,6 @@ pub mod test_data { nonces_end: message_nonce, }; - // import bridged relay chain block#1 with state root containing head#5 of bridged parachain - let relay_chain_header: BridgedRelayHeader = bp_test_utils::test_header_with_root( - relay_header_number.into(), - relay_state_root.into(), - ); - let justification = make_default_justification(&relay_chain_header); ( relay_chain_header, justification, @@ -878,6 +1343,134 @@ pub mod test_data { ) } + /// Prepare storage proofs of message confirmations, stored at the target chain. + pub fn make_complex_relayer_confirmation_proofs( + lane_id: LaneId, + para_header_number: u32, + relay_header_number: u32, + bridged_para_id: u32, + relayer_id_at_this_chain: AccountId32, + relayers_state: UnrewardedRelayersState, + ) -> ( + HeaderOf, + GrandpaJustification>, + ParaHead, + Vec<(ParaId, ParaHash)>, + ParaHeadsProof, + FromBridgedChainMessagesDeliveryProof, + ) + where + BridgedRelayChain: ChainWithGrandpa, + HashOf: From, + MB: MessageBridge, + ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, + <::ThisChain as bp_runtime::Chain>::AccountId: From, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, + { + // prepare para storage proof containing message delivery proof + let (para_state_root, para_storage_proof) = prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain.into(), + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + StorageProofSize::Minimal(0), + ); + + let ( + relay_chain_header, + justification, + bridged_para_head, + parachain_heads, + para_heads_proof, + ) = make_complex_bridged_heads_proof::( + para_state_root, + para_header_number, + relay_header_number, + bridged_para_id, + ); + + let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: bridged_para_head.hash(), + storage_proof: para_storage_proof, + lane: lane_id, + }; + + ( + relay_chain_header, + justification, + bridged_para_head, + parachain_heads, + para_heads_proof, + message_delivery_proof, + ) + } + + /// Make bridged parachain header with given state root and relay header that is finalizing it. + pub fn make_complex_bridged_heads_proof( + para_state_root: ParaHash, + para_header_number: u32, + relay_header_number: u32, + bridged_para_id: u32, + ) -> ( + HeaderOf, + GrandpaJustification>, + ParaHead, + Vec<(ParaId, ParaHash)>, + ParaHeadsProof, + ) + where + BridgedRelayChain: ChainWithGrandpa, + HashOf: From, + MB: MessageBridge, + ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, + { + let bridged_para_head = ParaHead( + bp_test_utils::test_header_with_root::>( + para_header_number.into(), + para_state_root.into(), + ) + .encode(), + ); + let (relay_state_root, para_heads_proof, parachain_heads) = + prepare_parachain_heads_proof::>(vec![( + bridged_para_id, + bridged_para_head.clone(), + )]); + assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); + + // import bridged relay chain block#1 with state root containing head#5 of bridged parachain + let mut relay_chain_header: BridgedRelayChain::Header = + bp_test_utils::test_header_with_root( + relay_header_number.into(), + relay_state_root.into(), + ); + // to compute proper cost of GRANDPA call, let's add some dummy bytes to header, so that the + // `submit_finality_proof` call size would be close to maximal expected (and refundable) + let expected_bytes_in_grandpa_call = BridgedRelayChain::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION + .saturating_mul(BridgedRelayChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY) + .saturating_add(BridgedRelayChain::MAX_HEADER_SIZE) + as usize; + let extra_bytes_required = + expected_bytes_in_grandpa_call.saturating_sub(relay_chain_header.encoded_size()); + relay_chain_header + .digest_mut() + .push(DigestItem::Other(vec![42; extra_bytes_required])); + + let justification = make_default_justification(&relay_chain_header); + (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) + } + /// Helper that creates InitializationData mock data, that can be used to initialize bridge /// GRANDPA pallet pub fn initialization_data< @@ -928,7 +1521,7 @@ pub mod test_data { ); /// Simulates `HaulBlobExporter` and all its wrapping and captures generated plain bytes, - /// which are transfered over bridge. + /// which are transferred over bridge. pub(crate) fn simulate_message_exporter_on_bridged_chain< SourceNetwork: Get, DestinationNetwork: Get, diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml index e66cef31e56f4817fa31def359d5c7e132ea9c49..ca83b84cd8fa60f4078fd6b341bb69850e55c27b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml @@ -4,12 +4,13 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Polkadot Collectives Parachain Runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -64,6 +65,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} @@ -73,7 +75,7 @@ cumulus-primitives-core = { path = "../../../../primitives/core", default-featur cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] @@ -85,9 +87,11 @@ sp-io = { path = "../../../../../substrate/primitives/io", default-features = fa [features] default = [ "std" ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -99,6 +103,7 @@ runtime-benchmarks = [ "pallet-collective-content/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-core-fellowship/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", @@ -134,6 +139,7 @@ try-runtime = [ "pallet-collective-content/try-runtime", "pallet-collective/try-runtime", "pallet-core-fellowship/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", @@ -176,6 +182,7 @@ std = [ "pallet-collective-content/std", "pallet-collective/std", "pallet-core-fellowship/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-preimage/std", "pallet-proxy/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index ff16f93d8f54b39be8a553883949165acf653521..c3d671c9085509cf2cc70b7be0eac8973e2c3d66 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -65,6 +65,7 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -72,7 +73,7 @@ use frame_support::{ parameter_types, traits::{ fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, - EitherOfDiverse, InstanceFilter, LinearStoragePrice, + EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, }, weights::{ConstantMultiplier, Weight}, PalletId, @@ -84,13 +85,14 @@ use frame_system::{ pub use parachains_common as common; use parachains_common::{ impls::DealWithFees, + message_queue::*, polkadot::{account::*, consensus::*, currency::*, fee::WeightToFee}, AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; use sp_runtime::RuntimeDebug; -use xcm_config::{GovernanceLocation, XcmConfig, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -98,8 +100,7 @@ pub use sp_runtime::BuildStorage; // Polkadot imports use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::BodyId; -use xcm_executor::XcmExecutor; +use xcm::latest::{prelude::*, BodyId}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -214,6 +215,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<0>; @@ -380,10 +382,11 @@ parameter_types! { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -399,24 +402,70 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::DotLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EitherOfDiverse, Fellows>; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = + polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type DmpSink = frame_support::traits::EnqueueWithOrigin; } pub const PERIOD: u32 = 6 * HOURS; @@ -609,6 +658,7 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -684,25 +734,24 @@ pub type Executive = frame_executive::Executive< Migrations, >; -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_proxy, Proxy] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -890,6 +939,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -919,6 +969,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..0b7a2fc21cde4f12c821a0e89982db4813f3f832 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `48` + // Estimated: `8121` + // Minimum execution time: 1_988_000 picoseconds. + Weight::from_parts(2_039_000, 0) + .saturating_add(Weight::from_parts(0, 8121)) + // Standard Error: 30_660 + .saturating_add(Weight::from_parts(24_419_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs index ccd9478bf10eab85eef8b80c0354b59e33e08d3d..e68c075bffc63e46668ffa84ce37143e848439fb 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,43 +1,38 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/release/polkadot-parachain // benchmark // pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_xcmp_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// collectives-polkadot-dev +// --output +// cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,22 +51,98 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 5_136_000 picoseconds. - Weight::from_parts(5_399_000, 0) + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(Weight::from_parts(0, 1627)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_weight() -> Weight { + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `148` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 5_056_000 picoseconds. - Weight::from_parts(5_301_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 1627)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `1662` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1662)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65777` + // Estimated: `69242` + // Minimum execution time: 60_000_000 picoseconds. + Weight::from_parts(63_000_000, 0) + .saturating_add(Weight::from_parts(0, 69242)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65776` + // Estimated: `69241` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69241)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs index 31cd502d1927b0cfeac64774567c937d7ee33050..b6f1dc8dc08038a8c614f10914f9fd8c14fb10ca 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs index b5a3a892f79a56f034b60cbb3c4e142a5ff6ef16..1d877fdbd2bbe2b18fbefbc6bd39357c8a0e21a6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs @@ -1,20 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; @@ -25,6 +26,7 @@ pub mod pallet_collective; pub mod pallet_collective_content; pub mod pallet_core_fellowship_ambassador_core; pub mod pallet_core_fellowship_fellowship_core; +pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_preimage; pub mod pallet_proxy; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs index 9e3acac46a4c9e7bc39d55cc5d928150aab3aaad..d8ede609a67c6df5c707504206b72e7382d2f7b4 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_alliance` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs index dd0c02ab87306ac8d311c223a0c4b297b67552c4..6c1cf072257f0e0e7033bdbeaa1af5dcaec5a5f1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_balances` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs index ea237d602a9b5a1190b89aaf4fdf955bdbc9df58..03f3ff602a5b3e91c4e2ff90a4a3433d513079a1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collator_selection` //! @@ -122,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -176,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs index 2d344ad0db7ba9e7b98303624e4f3f4a11c4087a..9133baa6120cff6f13bf3a3f45ec5db4b3fe5f3f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collective` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs index e66907b9453060a29b5e6635fa03a31e0dc782ad..6be94db22db9f88db97f60db4d850fcd4179aa26 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_collective_content` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs index 434986b03bba84c8ef43250c2192a946de09b286..471ee82ead729ea5abff616f0c9fe3a86704fd91 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_core_fellowship` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..4bd71c4e7d497b84f9a5b910015e439b69e41ef9 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_440_000 picoseconds. + Weight::from_parts(11_440_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_077_000 picoseconds. + Weight::from_parts(11_077_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_977_000 picoseconds. + Weight::from_parts(3_977_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 4_831_000 picoseconds. + Weight::from_parts(4_831_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_192_000 picoseconds. + Weight::from_parts(5_192_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_750_000 picoseconds. + Weight::from_parts(58_750_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 5_107_000 picoseconds. + Weight::from_parts(5_107_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 46_814_000 picoseconds. + Weight::from_parts(46_814_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 52_510_000 picoseconds. + Weight::from_parts(52_510_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 71_930_000 picoseconds. + Weight::from_parts(71_930_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs index a8dd58320cc204382c0160f9629e386ddd7851fb..a7827b7200906370039d245b02fcaec6d22430d2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_multisig` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs index e9f565d9387da08f35419baa196ce7ef77e265be..9b45c8758186d5b36ae5af158b35e4349cb7a1ed 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_preimage` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs index faf100d23bb0d5c68b1da4d4421b05315ddfd3de..59d9f912bf11d92a9c214e9ff83aa880482d8faa 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_proxy` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs index 7515aecbb525a4b4de59bd05e8329166a5515ff1..9c773c56ac398a7563427d5d33f837d3977028e1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_ranked_collective` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs index 5b4aed06899fcca022895488477ffb058ae76399..63f68833795f20023d8e9040f105d98bc161ef4d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_referenda` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs index 9bb7e68d3145c339505f9c6a729eca1217c02537..37680b4e5df719e61f174c3260bde023fd685623 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_salary` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs index d30ac82bf059bc8a2c75b1e8bce75024da38bea3..cf5610df66574a38a388b7e36a56f87e41ff59c2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_scheduler` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs index 2af8ce29a196ef21d8f203ccb5d62a1bfeb9a6b6..2ac0804df8903dd5ab4184e21b49cb13b32b48dc 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_session` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs index bc149ec63a176f09e226988d3344f29f9cf3e530..ca06f43f92e425b8d93b8bd394f0897f71627252 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_timestamp` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs index 5d6b0cb8285c91a98c99e97c7eb948086d1fb2b8..c60a79d91da32c863604b1d220b2e6c8c4159063 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_utility` //! diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs index 738742b6108b79bb11520fdca1d191735cee1a0c..57e502841473bcfb0b92a7f8b43fa7b19ed7cfd8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,22 +62,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 27_795_000 picoseconds. - Weight::from_parts(28_215_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_050_000 picoseconds. + Weight::from_parts(26_382_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` + // Measured: `69` // Estimated: `1489` - // Minimum execution time: 23_847_000 picoseconds. - Weight::from_parts(24_332_000, 0) + // Minimum execution time: 21_625_000 picoseconds. + Weight::from_parts(22_076_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_885_000 picoseconds. - Weight::from_parts(9_128_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_378_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_815_000, 0) + // Minimum execution time: 2_327_000 picoseconds. + Weight::from_parts(2_454_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,16 +143,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 32_214_000 picoseconds. - Weight::from_parts(32_989_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_080_000 picoseconds. + Weight::from_parts(29_886_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `3759` - // Minimum execution time: 33_638_000 picoseconds. - Weight::from_parts(34_206_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 30_746_000 picoseconds. + Weight::from_parts(31_631_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_730_000, 0) + // Minimum execution time: 2_208_000 picoseconds. + Weight::from_parts(2_341_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -188,11 +192,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `129` - // Estimated: `11019` - // Minimum execution time: 16_199_000 picoseconds. - Weight::from_parts(16_833_000, 0) - .saturating_add(Weight::from_parts(0, 11019)) + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_239_000 picoseconds. + Weight::from_parts(16_881_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -200,11 +204,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `133` - // Estimated: `11023` - // Minimum execution time: 16_561_000 picoseconds. - Weight::from_parts(16_872_000, 0) - .saturating_add(Weight::from_parts(0, 11023)) + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_711_000 picoseconds. + Weight::from_parts(16_944_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -212,15 +216,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `13505` - // Minimum execution time: 17_812_000 picoseconds. - Weight::from_parts(20_036_000, 0) - .saturating_add(Weight::from_parts(0, 13505)) + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_142_000 picoseconds. + Weight::from_parts(18_470_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,39 +239,41 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_153_000 picoseconds. - Weight::from_parts(31_366_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_687_000 picoseconds. + Weight::from_parts(28_250_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_465_000 picoseconds. - Weight::from_parts(9_743_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_675_000 picoseconds. + Weight::from_parts(9_992_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `11030` - // Minimum execution time: 16_954_000 picoseconds. - Weight::from_parts(19_772_000, 0) - .saturating_add(Weight::from_parts(0, 11030)) + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_597_000 picoseconds. + Weight::from_parts(17_248_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,12 +286,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_302_000 picoseconds. - Weight::from_parts(38_124_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_649_000 picoseconds. + Weight::from_parts(35_475_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_619_000 picoseconds. + Weight::from_parts(4_756_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_721_000 picoseconds. + Weight::from_parts(27_412_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index f802073bfbbb62bf662e0377da638fe426632ae3..71845650bd6cea163ed1c5715a6319614006401f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -14,8 +14,9 @@ // limitations under the License. use super::{ - AccountId, AllPalletsWithSystem, Balances, Fellows, ParachainInfo, ParachainSystem, - PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, XcmpQueue, }; use frame_support::{ match_types, parameter_types, @@ -24,8 +25,9 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteNativeAssetFrom}; +use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -172,7 +174,7 @@ impl Contains for SafeCallFilter { ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | + RuntimeCall::MessageQueue(..) | RuntimeCall::Alliance( // `init_members` accepts unbounded vecs as arguments, // but the call can be initiated only by root origin. @@ -225,7 +227,7 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. @@ -240,6 +242,10 @@ pub type Barrier = TrailingSetTopicAsId< >, >; +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - DOT with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -249,8 +255,7 @@ impl xcm_executor::Config for XcmConfig { // Collectives does not recognize a reserve location for any asset. Users must teleport DOT // where allowed (e.g. with the Relay Chain). type IsReserve = (); - /// Only allow teleportation of DOT. - type IsTeleporter = ConcreteNativeAssetFrom; + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -276,20 +281,18 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; @@ -317,8 +320,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..94f2de33e90864c4933c34d4c4885fb947a85672 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -0,0 +1,230 @@ +[package] +name = "collectives-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend Collectives Parachain Runtime" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +hex-literal = { version = "0.4.1" } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +smallvec = "1.11.0" + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} +pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} +pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} +pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} +pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } +testnets-common = { path = "../../../testnets-common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } + +[dev-dependencies] +sp-io = { path = "../../../../../substrate/primitives/io", features = [ "std" ]} + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-alliance/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-collective-content/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-core-fellowship/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-ranked-collective/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", + "pallet-salary/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "testnets-common/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-alliance/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-collective-content/try-runtime", + "pallet-collective/try-runtime", + "pallet-core-fellowship/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-preimage/try-runtime", + "pallet-proxy/try-runtime", + "pallet-ranked-collective/try-runtime", + "pallet-referenda/try-runtime", + "pallet-salary/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-alliance/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-collator-selection/std", + "pallet-collective-content/std", + "pallet-collective/std", + "pallet-core-fellowship/std", + "pallet-message-queue/std", + "pallet-multisig/std", + "pallet-preimage/std", + "pallet-proxy/std", + "pallet-ranked-collective/std", + "pallet-referenda/std", + "pallet-salary/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-utility/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-core-primitives/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "scale-info/std", + "sp-api/std", + "sp-arithmetic/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "substrate-wasm-builder", + "testnets-common/std", + "westend-runtime-constants/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..60f8a125129ff1344a1799246e931acdb1d139d5 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs @@ -0,0 +1,26 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "std")] +fn main() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..18c1466bf3624088ded34e4442691d65e6d59afd --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -0,0 +1,262 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program. +//! +//! The module defines the following on-chain functionality of the Ambassador Program: +//! +//! - Managed set of program members, where every member has a [rank](ranks) +//! (via [AmbassadorCollective](pallet_ranked_collective)). +//! - Referendum functionality for the program members to propose, vote on, and execute +//! proposals on behalf of the members of a certain [rank](Origin) +//! (via [AmbassadorReferenda](pallet_referenda)). +//! - Managed content (charter, announcements) (via [pallet_collective_content]). +//! - Promotion and demotion periods, register of members' activity, and rank based salaries +//! (via [AmbassadorCore](pallet_core_fellowship)). +//! - Members' salaries (via [AmbassadorSalary](pallet_salary), requiring a member to be +//! imported or inducted into [AmbassadorCore](pallet_core_fellowship)). + +pub mod origins; +mod tracks; + +use super::*; +use crate::xcm_config::{FellowshipAdminBodyId, WndAssetHub}; +use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; +pub use origins::pallet_origins as pallet_ambassador_origins; +use origins::pallet_origins::{ + EnsureAmbassadorsVoice, EnsureAmbassadorsVoiceFrom, EnsureHeadAmbassadorsVoice, Origin, +}; +use parachains_common::polkadot::account; +use sp_core::ConstU128; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, ConvertToValue, Replace}; +use xcm::prelude::*; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +/// The Ambassador Program's member ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + #[allow(dead_code)] + pub const CANDIDATE: Rank = 0; + pub const AMBASSADOR_TIER_1: Rank = 1; + pub const AMBASSADOR_TIER_2: Rank = 2; + pub const SENIOR_AMBASSADOR_TIER_3: Rank = 3; + pub const SENIOR_AMBASSADOR_TIER_4: Rank = 4; + pub const HEAD_AMBASSADOR_TIER_5: Rank = 5; + pub const HEAD_AMBASSADOR_TIER_6: Rank = 6; + pub const HEAD_AMBASSADOR_TIER_7: Rank = 7; + pub const MASTER_AMBASSADOR_TIER_8: Rank = 8; + pub const MASTER_AMBASSADOR_TIER_9: Rank = 9; +} + +impl pallet_ambassador_origins::Config for Runtime {} + +pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; + +/// Demotion is by any of: +/// - Root can demote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the current rank. +pub type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + TryMapSuccess< + EnsureAmbassadorsVoiceFrom>, + CheckedReduceBy>, + >, + >, +>; + +/// Promotion and approval (rank-retention) is by any of: +/// - Root can promote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the new/current rank. +/// - a member of rank `5` or above can add a candidate (rank `0`). +pub type PromoteOrigin = EitherOf< + DemoteOrigin, + TryMapSuccess< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + Replace>, + >, +>; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_ambassador_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type PromoteOrigin = PromoteOrigin; + type DemoteOrigin = DemoteOrigin; + type Polls = AmbassadorReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Linear; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; + // The Ambassador Referenda pallet account, used as a temporary place to deposit a slashed + // imbalance before teleport to the treasury. + pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); +} + +pub type AmbassadorReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_ambassador_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // A proposal can be submitted by a member of the Ambassador Program of + // [ranks::SENIOR_AMBASSADOR_TIER_3] rank or higher. + type SubmitOrigin = pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >; + type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<20>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +parameter_types! { + pub const AnnouncementLifetime: BlockNumber = 180 * DAYS; + pub const MaxAnnouncements: u32 = 50; +} + +pub type AmbassadorContentInstance = pallet_collective_content::Instance1; + +impl pallet_collective_content::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CharterOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type AnnouncementLifetime = AnnouncementLifetime; + // An announcement can be submitted by a Senior Ambassador member or an ambassador plurality + // voice taken via referendum. + type AnnouncementOrigin = EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >, + EnsureAmbassadorsVoice, + >; + type MaxAnnouncements = MaxAnnouncements; + type WeightInfo = weights::pallet_collective_content::WeightInfo; +} + +pub type AmbassadorCoreInstance = pallet_core_fellowship::Instance2; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_ambassador_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Head Ambassadors. + type ParamsOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EnsureHeadAmbassadorsVoice, + >, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Head Ambassador; + // - a vote among all senior members. + type InductOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + EnsureAmbassadorsVoiceFrom>, + >, + >, + >; + type ApproveOrigin = PromoteOrigin; + type PromoteOrigin = PromoteOrigin; + type EvidenceSize = ConstU32<65536>; +} + +pub type AmbassadorSalaryInstance = pallet_salary::Instance2; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Ambassador Salary + // pallet instance (which sits at index 74). This sovereign account will need funding. + pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); +} + +/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in WND. +pub type AmbassadorSalaryPaymaster = PayOverXcm< + AmbassadorSalaryLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_ambassador_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = AmbassadorSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = crate::impls::benchmarks::PayWithEnsure< + AmbassadorSalaryPaymaster, + crate::impls::benchmarks::OpenHrmpChannel>, + >; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 10_000 * DOLLARS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ce8a6b9e5d1bf7f70dda395f1998fd66d20cbeb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs @@ -0,0 +1,135 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's origins. + +#[frame_support::pallet] +pub mod pallet_origins { + use crate::ambassador::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Plurality voice of the [ranks::AMBASSADOR_TIER_1] members or above given via + /// referendum. + Ambassadors, + /// Plurality voice of the [ranks::AMBASSADOR_TIER_2] members or above given via + /// referendum. + AmbassadorsTier2, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_3] members or above given via + /// referendum. + SeniorAmbassadors, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_4] members or above given via + /// referendum. + SeniorAmbassadorsTier4, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_5] members or above given via + /// referendum. + HeadAmbassadors, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_6] members or above given via + /// referendum. + HeadAmbassadorsTier6, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_7] members or above given via + /// referendum. + HeadAmbassadorsTier7, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_8] members or above given via + /// referendum. + MasterAmbassadors, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_9] members or above given via + /// referendum. + MasterAmbassadorsTier9, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Ambassadors => ranks::AMBASSADOR_TIER_1, + Origin::AmbassadorsTier2 => ranks::AMBASSADOR_TIER_2, + Origin::SeniorAmbassadors => ranks::SENIOR_AMBASSADOR_TIER_3, + Origin::SeniorAmbassadorsTier4 => ranks::SENIOR_AMBASSADOR_TIER_4, + Origin::HeadAmbassadors => ranks::HEAD_AMBASSADOR_TIER_5, + Origin::HeadAmbassadorsTier6 => ranks::HEAD_AMBASSADOR_TIER_6, + Origin::HeadAmbassadorsTier7 => ranks::HEAD_AMBASSADOR_TIER_7, + Origin::MasterAmbassadors => ranks::MASTER_AMBASSADOR_TIER_8, + Origin::MasterAmbassadorsTier9 => ranks::MASTER_AMBASSADOR_TIER_9, + }) + } + } + + /// Implementation of the [EnsureOrigin] trait for the [Origin::HeadAmbassadors] origin. + pub struct EnsureHeadAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureHeadAmbassadorsVoice { + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::HeadAmbassadors => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::HeadAmbassadors)) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s + /// from a given rank `R` with the success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoiceFrom(PhantomData); + impl, O: Into> + From> EnsureOrigin + for EnsureAmbassadorsVoiceFrom + { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match Origin::as_voice(&o) { + Some(r) if r >= R::get() => Ok(r), + _ => Err(O::from(o)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + ranks::MASTER_AMBASSADOR_TIER_9 + .ge(&R::get()) + .then(|| O::from(Origin::MasterAmbassadorsTier9)) + .ok_or(()) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s with the + /// success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureAmbassadorsVoice { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| Origin::as_voice(&o).ok_or(O::from(o))) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::MasterAmbassadorsTier9)) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs new file mode 100644 index 0000000000000000000000000000000000000000..d4a2d3bbf1c7becba8ea6ed560496a0a8114b185 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs @@ -0,0 +1,282 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's referenda voting tracks. + +use super::Origin; +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS}; +use sp_runtime::Perbill; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + pub const AMBASSADOR_TIER_1: TrackId = 1; + pub const AMBASSADOR_TIER_2: TrackId = 2; + pub const SENIOR_AMBASSADOR_TIER_3: TrackId = 3; + pub const SENIOR_AMBASSADOR_TIER_4: TrackId = 4; + pub const HEAD_AMBASSADOR_TIER_5: TrackId = 5; + pub const HEAD_AMBASSADOR_TIER_6: TrackId = 6; + pub const HEAD_AMBASSADOR_TIER_7: TrackId = 7; + pub const MASTER_AMBASSADOR_TIER_8: TrackId = 8; + pub const MASTER_AMBASSADOR_TIER_9: TrackId = 9; +} + +/// The type implementing the [`pallet_referenda::TracksInfo`] trait for referenda pallet. +pub struct TracksInfo; + +/// Information on the voting tracks. +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + + type RuntimeOrigin = ::PalletsOrigin; + + /// Return the array of available tracks and their information. + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + static DATA: [(TrackId, pallet_referenda::TrackInfo); 9] = [ + ( + constants::AMBASSADOR_TIER_1, + pallet_referenda::TrackInfo { + name: "ambassador tier 1", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::AMBASSADOR_TIER_2, + pallet_referenda::TrackInfo { + name: "ambassador tier 2", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_3, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 3", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_4, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 4", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_5, + pallet_referenda::TrackInfo { + name: "head ambassador tier 5", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_6, + pallet_referenda::TrackInfo { + name: "head ambassador tier 6", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_7, + pallet_referenda::TrackInfo { + name: "head ambassador tier 7", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_8, + pallet_referenda::TrackInfo { + name: "master ambassador tier 8", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_9, + pallet_referenda::TrackInfo { + name: "master ambassador tier 9", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + + /// Determine the voting track for the given `origin`. + fn track_for(id: &Self::RuntimeOrigin) -> Result { + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(constants::MASTER_AMBASSADOR_TIER_9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Ambassadors) => Ok(constants::AMBASSADOR_TIER_1), + Ok(Origin::AmbassadorsTier2) => Ok(constants::AMBASSADOR_TIER_2), + Ok(Origin::SeniorAmbassadors) => Ok(constants::SENIOR_AMBASSADOR_TIER_3), + Ok(Origin::SeniorAmbassadorsTier4) => Ok(constants::SENIOR_AMBASSADOR_TIER_4), + Ok(Origin::HeadAmbassadors) => Ok(constants::HEAD_AMBASSADOR_TIER_5), + Ok(Origin::HeadAmbassadorsTier6) => Ok(constants::HEAD_AMBASSADOR_TIER_6), + Ok(Origin::HeadAmbassadorsTier7) => Ok(constants::HEAD_AMBASSADOR_TIER_7), + Ok(Origin::MasterAmbassadors) => Ok(constants::MASTER_AMBASSADOR_TIER_8), + Ok(Origin::MasterAmbassadorsTier9) => Ok(constants::MASTER_AMBASSADOR_TIER_9), + _ => Err(()), + } + } +} + +// implements [`frame_support::traits::Get`] for [`TracksInfo`] +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7412705dde79df9af77b56dad99c420b8b5bc58 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -0,0 +1,238 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The Westend Technical Fellowship. + +mod origins; +mod tracks; +use crate::{ + impls::ToParentTreasury, + weights, + xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, + AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, Runtime, + RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, +}; +use frame_support::{ + parameter_types, + traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, +}; +use frame_system::EnsureRootWithSuccess; +pub use origins::{ + pallet_origins as pallet_fellowship_origins, Architects, EnsureCanPromoteTo, EnsureCanRetainAt, + EnsureFellowship, Fellows, Masters, Members, ToVoice, +}; +use pallet_ranked_collective::EnsureOfRank; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::{polkadot::account, HOURS}; +use sp_core::{ConstU128, ConstU32}; +use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +#[cfg(feature = "runtime-benchmarks")] +use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; + +/// The Fellowship members' ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + pub const DAN_1: Rank = 1; // aka Members. + pub const DAN_2: Rank = 2; + pub const DAN_3: Rank = 3; // aka Fellows. + pub const DAN_4: Rank = 4; // aka Architects. + pub const DAN_5: Rank = 5; + pub const DAN_6: Rank = 6; + pub const DAN_7: Rank = 7; // aka Masters. + pub const DAN_8: Rank = 8; + pub const DAN_9: Rank = 9; +} + +parameter_types! { + // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. + pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); +} + +impl pallet_fellowship_origins::Config for Runtime {} + +pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // Fellows can submit proposals. + type SubmitOrigin = EitherOf< + pallet_ranked_collective::EnsureMember, + MapSuccess< + TryWithMorphedArg< + RuntimeOrigin, + ::PalletsOrigin, + ToVoice, + EnsureOfRank, + (AccountId, u16), + >, + TakeFirst, + >, + >; + type CancelOrigin = Architects; + type KillOrigin = Masters; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = ConstU128<0>; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = ConstU32<{ 7 * DAYS }>; + type AlarmInterval = ConstU32<1>; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_fellowship_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + // Promotions and the induction of new members are serviced by `FellowshipCore` pallet instance. + type PromoteOrigin = frame_system::EnsureNever; + #[cfg(feature = "runtime-benchmarks")] + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type PromoteOrigin = EnsureRootWithSuccess>; + + // Demotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type DemoteOrigin = EitherOf< + EnsureRootWithSuccess>, + MapSuccess< + EnsureXcm>, + Replace>, + >, + >; + type Polls = FellowshipReferenda; + type MinRankOfClass = tracks::MinRankOfClass; + type VoteWeight = pallet_ranked_collective::Geometric; +} + +pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_fellowship_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Fellows. + type ParamsOrigin = EitherOfDiverse< + EnsureXcm>, + Fellows, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Fellow; + // - a vote among all Members. + type InductOrigin = EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + FellowshipCollectiveInstance, + { ranks::DAN_3 }, + >, + Members, + >, + >; + // Approval (rank-retention) of a Member's current rank is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the current rank for all retention up to the Master rank. + type ApproveOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanRetainAt, + >; + // Promotion is by any of: + // - Root can promote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the new rank for all promotions up to the Master rank. + type PromoteOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanPromoteTo, + >; + type EvidenceSize = ConstU32<65536>; +} + +pub type FellowshipSalaryInstance = pallet_salary::Instance1; + +use xcm::prelude::*; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Fellowship Salary + // pallet instance (which sits at index 64). This sovereign account will need funding. + pub Interior: InteriorMultiLocation = PalletInstance(64).into(); +} + +const USDT_UNITS: u128 = 1_000_000; + +/// [`PayOverXcm`] setup to pay the Fellowship salary on the AssetHub in USDT. +pub type FellowshipSalaryPaymaster = PayOverXcm< + Interior, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_fellowship_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = FellowshipSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = PayWithEnsure>>; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 100_000 * USDT_UNITS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs new file mode 100644 index 0000000000000000000000000000000000000000..5ed2c19f79e6464fc09a349d8dce3c09afad2d55 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs @@ -0,0 +1,247 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Fellowship custom origins. + +use super::ranks; +pub use pallet_origins::*; + +#[frame_support::pallet] +pub mod pallet_origins { + use super::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin aggregated through weighted votes of those with rank 1 or above; `Success` is 1. + /// Aka the "voice" of all Members. + Members, + /// Origin aggregated through weighted votes of those with rank 2 or above; `Success` is 2. + /// Aka the "voice" of members at least II Dan. + Fellowship2Dan, + /// Origin aggregated through weighted votes of those with rank 3 or above; `Success` is 3. + /// Aka the "voice" of all Fellows. + Fellows, + /// Origin aggregated through weighted votes of those with rank 4 or above; `Success` is 4. + /// Aka the "voice" of members at least IV Dan. + Architects, + /// Origin aggregated through weighted votes of those with rank 5 or above; `Success` is 5. + /// Aka the "voice" of members at least V Dan. + Fellowship5Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above; `Success` is 6. + /// Aka the "voice" of members at least VI Dan. + Fellowship6Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above; `Success` is 7. + /// Aka the "voice" of all Masters. + Masters, + /// Origin aggregated through weighted votes of those with rank 8 or above; `Success` is 8. + /// Aka the "voice" of members at least VIII Dan. + Fellowship8Dan, + /// Origin aggregated through weighted votes of those with rank 9 or above; `Success` is 9. + /// Aka the "voice" of members at least IX Dan. + Fellowship9Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a fortnight-long track; `Success` is 1. + RetainAt1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a fortnight-long track; `Success` is 2. + RetainAt2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a fortnight-long track; `Success` is 3. + RetainAt3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a fortnight-long track; `Success` is 4. + RetainAt4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a fortnight-long track; `Success` is 5. + RetainAt5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a fortnight-long track; `Success` is 6. + RetainAt6Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a month-long track; `Success` is 1. + PromoteTo1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a month-long track; `Success` is 2. + PromoteTo2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a month-long track; `Success` is 3. + PromoteTo3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a month-long track; `Success` is 4. + PromoteTo4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a month-long track; `Success` is 5. + PromoteTo5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a month-long track; `Success` is 6. + PromoteTo6Dan, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + /// + /// `Some` will be returned only for the first 9 elements of [Origin]. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Members => ranks::DAN_1, + Origin::Fellowship2Dan => ranks::DAN_2, + Origin::Fellows => ranks::DAN_3, + Origin::Architects => ranks::DAN_4, + Origin::Fellowship5Dan => ranks::DAN_5, + Origin::Fellowship6Dan => ranks::DAN_6, + Origin::Masters => ranks::DAN_7, + Origin::Fellowship8Dan => ranks::DAN_8, + Origin::Fellowship9Dan => ranks::DAN_9, + _ => return None, + }) + } + } + + /// A `TryMorph` implementation which is designed to convert an aggregate `RuntimeOrigin` + /// value into the Fellowship voice it represents if it is a Fellowship pallet origin an + /// appropriate variant. See also [Origin::as_voice]. + pub struct ToVoice; + impl<'a, O: 'a + TryInto<&'a Origin>> sp_runtime::traits::TryMorph for ToVoice { + type Outcome = pallet_ranked_collective::Rank; + fn try_morph(o: O) -> Result { + o.try_into().ok().and_then(Origin::as_voice).ok_or(()) + } + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + Members: Rank = ranks::DAN_1, + Fellows: Rank = ranks::DAN_3, + Architects: Rank = ranks::DAN_4, + Masters: Rank = ranks::DAN_7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success` on a + // week-long track. + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Members = ranks::DAN_1, + Fellowship2Dan = ranks::DAN_2, + Fellows = ranks::DAN_3, + Architects = ranks::DAN_4, + Fellowship5Dan = ranks::DAN_5, + Fellowship6Dan = ranks::DAN_6, + Masters = ranks::DAN_7, + Fellowship8Dan = ranks::DAN_8, + Fellowship9Dan = ranks::DAN_9, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a fortnight-long track; needed for Fellowship retention voting. + decl_ensure! { + pub type EnsureCanRetainAt: EnsureOrigin { + RetainAt1Dan = ranks::DAN_1, + RetainAt2Dan = ranks::DAN_2, + RetainAt3Dan = ranks::DAN_3, + RetainAt4Dan = ranks::DAN_4, + RetainAt5Dan = ranks::DAN_5, + RetainAt6Dan = ranks::DAN_6, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a month-long track; needed for Fellowship promotion voting. + decl_ensure! { + pub type EnsureCanPromoteTo: EnsureOrigin { + PromoteTo1Dan = ranks::DAN_1, + PromoteTo2Dan = ranks::DAN_2, + PromoteTo3Dan = ranks::DAN_3, + PromoteTo4Dan = ranks::DAN_4, + PromoteTo5Dan = ranks::DAN_5, + PromoteTo6Dan = ranks::DAN_6, + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs new file mode 100644 index 0000000000000000000000000000000000000000..099bdf4cf7539ed27c2c5af4178113d8cbbd2d1e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Track configurations for Fellowship. + +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS, MINUTES}; +use pallet_ranked_collective::Rank; +use sp_runtime::{traits::Convert, Perbill}; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + // Regular tracks (7 days) used for general operations. The required rank for voting is the + // same as that which is named (and also the track ID). + pub const MEMBERS: TrackId = 1; + pub const PROFICIENTS: TrackId = 2; + pub const FELLOWS: TrackId = 3; + pub const ARCHITECTS: TrackId = 4; + pub const ARCHITECTS_ADEPT: TrackId = 5; + pub const GRAND_ARCHITECTS: TrackId = 6; + pub const MASTERS: TrackId = 7; + pub const MASTERS_CONSTANT: TrackId = 8; + pub const GRAND_MASTERS: TrackId = 9; + + // Longer tracks (14 days) used for rank retention. These require a rank of two more than the + // grade at which they retain (as per the whitepaper). This works out as the track ID minus 8. + pub const RETAIN_AT_1DAN: TrackId = 11; + pub const RETAIN_AT_2DAN: TrackId = 12; + pub const RETAIN_AT_3DAN: TrackId = 13; + pub const RETAIN_AT_4DAN: TrackId = 14; + pub const RETAIN_AT_5DAN: TrackId = 15; + pub const RETAIN_AT_6DAN: TrackId = 16; + + // Longest tracks (30 days) used for promotions. These require a rank of two more than the + // grade to which they promote (as per the whitepaper). This works out as the track ID minus 18. + pub const PROMOTE_TO_1DAN: TrackId = 21; + pub const PROMOTE_TO_2DAN: TrackId = 22; + pub const PROMOTE_TO_3DAN: TrackId = 23; + pub const PROMOTE_TO_4DAN: TrackId = 24; + pub const PROMOTE_TO_5DAN: TrackId = 25; + pub const PROMOTE_TO_6DAN: TrackId = 26; +} + +/// Convert the track ID (defined above) into the minimum rank (i.e. fellowship Dan grade) required +/// to vote on the track. +pub struct MinRankOfClass; +impl Convert for MinRankOfClass { + fn convert(a: TrackId) -> Rank { + match a { + // Just a regular vote: the track ID is conveniently the same as the minimum rank. + regular @ 1..=9 => regular, + // A retention vote; the track ID turns out to be 8 more than the minimum required rank. + retention @ 11..=16 => retention - 8, + // A promotion vote; the track ID turns out to be 18 more than the minimum required + // rank. + promotion @ 21..=26 => promotion - 18, + _ => Rank::max_value(), + } + } +} + +const RETAIN_MAX_DECIDING: u32 = 25; +const RETAIN_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const RETAIN_PREPARE_PERIOD: BlockNumber = 0; +const RETAIN_DECISION_PERIOD: BlockNumber = 14 * DAYS; +const RETAIN_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const RETAIN_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const RETAIN_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const RETAIN_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +const PROMOTE_MAX_DECIDING: u32 = 10; +const PROMOTE_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const PROMOTE_PREPARE_PERIOD: BlockNumber = 0; +const PROMOTE_DECISION_PERIOD: BlockNumber = 30 * DAYS; +const PROMOTE_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const PROMOTE_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const PROMOTE_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const PROMOTE_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + use constants as tracks; + static DATA: [(TrackId, pallet_referenda::TrackInfo); 21] = [ + ( + tracks::MEMBERS, + pallet_referenda::TrackInfo { + name: "members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::PROFICIENTS, + pallet_referenda::TrackInfo { + name: "proficient members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::FELLOWS, + pallet_referenda::TrackInfo { + name: "fellows", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS, + pallet_referenda::TrackInfo { + name: "architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS_ADEPT, + pallet_referenda::TrackInfo { + name: "architects adept", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_ARCHITECTS, + pallet_referenda::TrackInfo { + name: "grand architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS, + pallet_referenda::TrackInfo { + name: "masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS_CONSTANT, + pallet_referenda::TrackInfo { + name: "masters constant", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_MASTERS, + pallet_referenda::TrackInfo { + name: "grand masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::RETAIN_AT_1DAN, + pallet_referenda::TrackInfo { + name: "retain at I Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_2DAN, + pallet_referenda::TrackInfo { + name: "retain at II Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_3DAN, + pallet_referenda::TrackInfo { + name: "retain at III Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_4DAN, + pallet_referenda::TrackInfo { + name: "retain at IV Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_5DAN, + pallet_referenda::TrackInfo { + name: "retain at V Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_6DAN, + pallet_referenda::TrackInfo { + name: "retain at VI Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_1DAN, + pallet_referenda::TrackInfo { + name: "promote to I Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_2DAN, + pallet_referenda::TrackInfo { + name: "promote to II Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_3DAN, + pallet_referenda::TrackInfo { + name: "promote to III Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_4DAN, + pallet_referenda::TrackInfo { + name: "promote to IV Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_5DAN, + pallet_referenda::TrackInfo { + name: "promote to V Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_6DAN, + pallet_referenda::TrackInfo { + name: "promote to VI Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ]; + &DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + use super::origins::Origin; + use constants as tracks; + + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(tracks::GRAND_MASTERS) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Members) => Ok(tracks::MEMBERS), + Ok(Origin::Fellowship2Dan) => Ok(tracks::PROFICIENTS), + Ok(Origin::Fellows) => Ok(tracks::FELLOWS), + Ok(Origin::Architects) => Ok(tracks::ARCHITECTS), + Ok(Origin::Fellowship5Dan) => Ok(tracks::ARCHITECTS_ADEPT), + Ok(Origin::Fellowship6Dan) => Ok(tracks::GRAND_ARCHITECTS), + Ok(Origin::Masters) => Ok(tracks::MASTERS), + Ok(Origin::Fellowship8Dan) => Ok(tracks::MASTERS_CONSTANT), + Ok(Origin::Fellowship9Dan) => Ok(tracks::GRAND_MASTERS), + + Ok(Origin::RetainAt1Dan) => Ok(tracks::RETAIN_AT_1DAN), + Ok(Origin::RetainAt2Dan) => Ok(tracks::RETAIN_AT_2DAN), + Ok(Origin::RetainAt3Dan) => Ok(tracks::RETAIN_AT_3DAN), + Ok(Origin::RetainAt4Dan) => Ok(tracks::RETAIN_AT_4DAN), + Ok(Origin::RetainAt5Dan) => Ok(tracks::RETAIN_AT_5DAN), + Ok(Origin::RetainAt6Dan) => Ok(tracks::RETAIN_AT_6DAN), + + Ok(Origin::PromoteTo1Dan) => Ok(tracks::PROMOTE_TO_1DAN), + Ok(Origin::PromoteTo2Dan) => Ok(tracks::PROMOTE_TO_2DAN), + Ok(Origin::PromoteTo3Dan) => Ok(tracks::PROMOTE_TO_3DAN), + Ok(Origin::PromoteTo4Dan) => Ok(tracks::PROMOTE_TO_4DAN), + Ok(Origin::PromoteTo5Dan) => Ok(tracks::PROMOTE_TO_5DAN), + Ok(Origin::PromoteTo6Dan) => Ok(tracks::PROMOTE_TO_6DAN), + + _ => Err(()), + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f4c2a6a4c94a5d28fce09dbe800b2d7226723cf --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -0,0 +1,229 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::OriginCaller; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + traits::{Currency, Get, Imbalance, OnUnbalanced, OriginTrait, PrivilegeCmp}, + weights::Weight, +}; +use log; +use pallet_alliance::{ProposalIndex, ProposalProvider}; +use parachains_common::impls::NegativeImbalance; +use sp_runtime::DispatchError; +use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; +use xcm::latest::{Fungibility, Junction, Parent}; + +type AccountIdOf = ::AccountId; + +type ProposalOf = >::Proposal; + +type HashOf = ::Hash; + +/// Type alias to conveniently refer to the `Currency::Balance` associated type. +pub type BalanceOf = + as Currency<::AccountId>>::Balance; + +/// Implements `OnUnbalanced::on_unbalanced` to teleport slashed assets to relay chain treasury +/// account. +pub struct ToParentTreasury( + PhantomData<(TreasuryAccount, PalletAccount, T)>, +); + +impl OnUnbalanced> + for ToParentTreasury +where + T: pallet_balances::Config + pallet_xcm::Config + frame_system::Config, + <::RuntimeOrigin as OriginTrait>::AccountId: From>, + [u8; 32]: From<::AccountId>, + TreasuryAccount: Get>, + PalletAccount: Get>, + BalanceOf: Into, +{ + fn on_unbalanced(amount: NegativeImbalance) { + let amount = match amount.drop_zero() { + Ok(..) => return, + Err(amount) => amount, + }; + let imbalance = amount.peek(); + let pallet_acc: AccountIdOf = PalletAccount::get(); + let treasury_acc: AccountIdOf = TreasuryAccount::get(); + + >::resolve_creating(&pallet_acc, amount); + + let result = >::teleport_assets( + <::RuntimeOrigin>::signed(pallet_acc.into()), + Box::new(Parent.into()), + Box::new( + Junction::AccountId32 { network: None, id: treasury_acc.into() } + .into_location() + .into(), + ), + Box::new((Parent, imbalance).into()), + 0, + ); + + if let Err(err) = result { + log::warn!("Failed to teleport slashed assets: {:?}", err); + } + } +} + +/// Proposal provider for alliance pallet. +/// Adapter from collective pallet to alliance proposal provider trait. +pub struct AllianceProposalProvider(PhantomData<(T, I)>); + +impl ProposalProvider, HashOf, ProposalOf> + for AllianceProposalProvider +where + T: pallet_collective::Config + frame_system::Config, + I: 'static, +{ + fn propose_proposal( + who: AccountIdOf, + threshold: u32, + proposal: Box>, + length_bound: u32, + ) -> Result<(u32, u32), DispatchError> { + pallet_collective::Pallet::::do_propose_proposed( + who, + threshold, + proposal, + length_bound, + ) + } + + fn vote_proposal( + who: AccountIdOf, + proposal: HashOf, + index: ProposalIndex, + approve: bool, + ) -> Result { + pallet_collective::Pallet::::do_vote(who, proposal, index, approve) + } + + fn close_proposal( + proposal_hash: HashOf, + proposal_index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + pallet_collective::Pallet::::do_close( + proposal_hash, + proposal_index, + proposal_weight_bound, + length_bound, + ) + } + + fn proposal_of(proposal_hash: HashOf) -> Option> { + pallet_collective::Pallet::::proposal_of(proposal_hash) + } +} + +/// Used to compare the privilege of an origin inside the scheduler. +pub struct EqualOrGreatestRootCmp; + +impl PrivilegeCmp for EqualOrGreatestRootCmp { + fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { + if left == right { + return Some(Ordering::Equal) + } + match (left, right) { + // Root is greater than anything. + (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), + _ => None, + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarks { + use super::*; + use crate::ParachainSystem; + use cumulus_primitives_core::{ChannelStatus, GetChannelInfo}; + use frame_support::traits::{ + fungible, + tokens::{Pay, PaymentStatus}, + }; + use pallet_ranked_collective::Rank; + use parachains_common::{AccountId, Balance}; + use sp_runtime::traits::Convert; + + /// Rank to salary conversion helper type. + pub struct RankToSalary(PhantomData); + impl Convert for RankToSalary + where + Fungible: fungible::Inspect, + { + fn convert(r: Rank) -> Balance { + Balance::from(r).saturating_mul(Fungible::minimum_balance()) + } + } + + /// Trait for setting up any prerequisites for successful execution of benchmarks. + pub trait EnsureSuccessful { + fn ensure_successful(); + } + + /// Implementation of the [`EnsureSuccessful`] trait which opens an HRMP channel between + /// the Collectives and a parachain with a given ID. + pub struct OpenHrmpChannel(PhantomData); + impl> EnsureSuccessful for OpenHrmpChannel { + fn ensure_successful() { + if let ChannelStatus::Closed = ParachainSystem::get_channel_status(I::get().into()) { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(I::get().into()) + } + } + } + + /// Type that wraps a type implementing the [`Pay`] trait to decorate its + /// [`Pay::ensure_successful`] function with a provided implementation of the + /// [`EnsureSuccessful`] trait. + pub struct PayWithEnsure(PhantomData<(O, E)>); + impl Pay for PayWithEnsure + where + O: Pay, + E: EnsureSuccessful, + { + type AssetKind = O::AssetKind; + type Balance = O::Balance; + type Beneficiary = O::Beneficiary; + type Error = O::Error; + type Id = O::Id; + + fn pay( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) -> Result { + O::pay(who, asset_kind, amount) + } + fn check_payment(id: Self::Id) -> PaymentStatus { + O::check_payment(id) + } + fn ensure_successful( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) { + E::ensure_successful(); + O::ensure_successful(who, asset_kind, amount) + } + fn ensure_concluded(id: Self::Id) { + O::ensure_concluded(id) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..8c5593e154d7a752282f2fa0982f7f7a7ac8c8b3 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,1023 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Collectives Parachain +//! +//! This parachain is for collectives that serve the Westend network. +//! Each collective is defined by a specialized (possibly instanced) pallet. +//! +//! ### Governance +//! +//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to +//! its Relay Chain parent, Westend. +//! +//! ### Collator Selection +//! +//! Collectives uses `pallet-collator-selection`, a simple first-come-first-served registration +//! system where collators can reserve a small bond to join the block producer set. There is no +//! slashing. Collective members are generally expected to run collators. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod ambassador; +pub mod impls; +mod weights; +pub mod xcm_config; +// Fellowship configurations. +pub mod fellowship; +pub use ambassador::pallet_ambassador_origins; + +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use fellowship::{pallet_fellowship_origins, Fellows}; +use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp, ToParentTreasury}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, Perbill, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, + EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, + }, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +pub use parachains_common as common; +use parachains_common::{ + impls::DealWithFees, message_queue::*, AccountId, AuraId, Balance, BlockNumber, Hash, Header, + Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, + NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; +use sp_runtime::RuntimeDebug; +use testnets_common::westend::{account::*, consensus::*, currency::*, fee::WeightToFee}; +use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +// Polkadot imports +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::{prelude::*, BodyId}; + +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("collectives-westend"), + impl_name: create_runtime_str!("collectives-westend"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 5, + state_version: 0, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// Privileged origin that represents Root or more than two thirds of the Alliance. +pub type RootOrAllianceTwoThirdsMajority = EitherOfDiverse< + EnsureRoot, + pallet_collective::EnsureProportionMoreThan, +>; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +// Configure FRAME pallets to include in runtime. +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type SS58Prefix = ConstU16<0>; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<1>; + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::CurrencyAdapter>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = ConstU32<100>; + type WeightInfo = weights::pallet_multisig::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, + /// Alliance proxy. Allows calls related to the Alliance. + Alliance, + /// Fellowship proxy. Allows calls related to the Fellowship. + Fellowship, + /// Ambassador proxy. Allows calls related to the Ambassador Program. + Ambassador, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!(c, RuntimeCall::Balances { .. }), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Alliance => matches!( + c, + RuntimeCall::AllianceMotion { .. } | + RuntimeCall::Alliance { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Fellowship => matches!( + c, + RuntimeCall::FellowshipCollective { .. } | + RuntimeCall::FellowshipReferenda { .. } | + RuntimeCall::FellowshipCore { .. } | + RuntimeCall::FellowshipSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Ambassador => matches!( + c, + RuntimeCall::AmbassadorCollective { .. } | + RuntimeCall::AmbassadorReferenda { .. } | + RuntimeCall::AmbassadorContent { .. } | + RuntimeCall::AmbassadorCore { .. } | + RuntimeCall::AmbassadorSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::NonTransfer, _) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = ConstU32<32>; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = ConstU32<32>; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type OutboundXcmpMessageSource = XcmpQueue; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::WndLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EitherOfDiverse, Fellows>; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type DmpSink = frame_support::traits::EnqueueWithOrigin; +} + +pub const PERIOD: u32 = 6 * HOURS; +pub const OFFSET: u32 = 0; + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; + type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // `StakingAdmin` pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = ConstU32; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +pub const ALLIANCE_MOTION_DURATION: BlockNumber = 5 * DAYS; + +parameter_types! { + pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION; + pub MaxProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; +} +pub const ALLIANCE_MAX_PROPOSALS: u32 = 100; +pub const ALLIANCE_MAX_MEMBERS: u32 = 100; + +type AllianceCollective = pallet_collective::Instance1; +impl pallet_collective::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type MotionDuration = AllianceMotionDuration; + type MaxProposals = ConstU32; + type MaxMembers = ConstU32; + type DefaultVote = pallet_collective::MoreThanMajorityThenPrimeDefaultVote; + type SetMembersOrigin = EnsureRoot; + type WeightInfo = weights::pallet_collective::WeightInfo; + type MaxProposalWeight = MaxProposalWeight; +} + +pub const MAX_FELLOWS: u32 = ALLIANCE_MAX_MEMBERS; +pub const MAX_ALLIES: u32 = 100; + +parameter_types! { + pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 WND bond to join as an Ally + // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance + // before the teleport to the Treasury. + pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); + pub WestendTreasuryAccount: AccountId = WESTEND_TREASURY_PALLET_ID.into_account_truncating(); + // The number of blocks a member must wait between giving a retirement notice and retiring. + // Supposed to be greater than time required to `kick_member` with alliance motion. + pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; +} + +impl pallet_alliance::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Proposal = RuntimeCall; + type AdminOrigin = RootOrAllianceTwoThirdsMajority; + type MembershipManager = RootOrAllianceTwoThirdsMajority; + type AnnouncementOrigin = RootOrAllianceTwoThirdsMajority; + type Currency = Balances; + type Slashed = ToParentTreasury; + type InitializeMembers = AllianceMotion; + type MembershipChanged = AllianceMotion; + type RetirementPeriod = AllianceRetirementPeriod; + type IdentityVerifier = (); // Don't block accounts on identity criteria + type ProposalProvider = AllianceProposalProvider; + type MaxProposals = ConstU32; + type MaxFellows = ConstU32; + type MaxAllies = ConstU32; + type MaxUnscrupulousItems = ConstU32<100>; + type MaxWebsiteUrlLength = ConstU32<255>; + type MaxAnnouncementsCount = ConstU32<100>; + type MaxMembersCount = ConstU32; + type AllyDeposit = AllyDeposit; + type WeightInfo = weights::pallet_alliance::WeightInfo; +} + +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +#[cfg(not(feature = "runtime-benchmarks"))] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 50; +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 200; +} + +impl pallet_scheduler::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; + type OriginPrivilegeCmp = EqualOrGreatestRootCmp; + type Preimages = Preimage; +} + +parameter_types! { + pub const PreimageBaseDeposit: Balance = deposit(2, 64); + pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); +} + +impl pallet_preimage::Config for Runtime { + type WeightInfo = weights::pallet_preimage::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + + // Monetary stuff. + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, + + // Collator support. the order of these 5 are important and shall not change. + Authorship: pallet_authorship::{Pallet, Storage} = 20, + CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, + Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, + Aura: pallet_aura::{Pallet, Storage, Config} = 23, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + + // Handy utilities. + Utility: pallet_utility::{Pallet, Call, Event} = 40, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, + + // The main stage. + + // The Alliance. + Alliance: pallet_alliance::{Pallet, Call, Storage, Event, Config} = 50, + AllianceMotion: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 51, + + // The Fellowship. + // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + FellowshipCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 60, + // pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + FellowshipReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 61, + FellowshipOrigins: pallet_fellowship_origins::{Origin} = 62, + // pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, + // pub type FellowshipSalaryInstance = pallet_salary::Instance1; + FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, + + // Ambassador Program. + AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, + AmbassadorReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 71, + AmbassadorOrigins: pallet_ambassador_origins::{Origin} = 72, + AmbassadorCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 73, + AmbassadorSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 74, + AmbassadorContent: pallet_collective_content::::{Pallet, Call, Storage, Event} = 75, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// All migrations executed on runtime upgrade as a nested tuple of types implementing +/// `OnRuntimeUpgrade`. Included migrations must be idempotent. +type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] + [pallet_multisig, Multisig] + [pallet_proxy, Proxy] + [pallet_session, SessionBench::] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] + [pallet_alliance, Alliance] + [pallet_collective, AllianceMotion] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + [pallet_preimage, Preimage] + [pallet_scheduler, Scheduler] + [pallet_referenda, FellowshipReferenda] + [pallet_ranked_collective, FellowshipCollective] + [pallet_core_fellowship, FellowshipCore] + [pallet_salary, FellowshipSalary] + [pallet_referenda, AmbassadorReferenda] + [pallet_ranked_collective, AmbassadorCollective] + [pallet_collective_content, AmbassadorContent] + [pallet_core_fellowship, AmbassadorCore] + [pallet_salary, AmbassadorSalary] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7fdb2aae2a01ec06076de83d94817e540e205dd --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..0b7a2fc21cde4f12c821a0e89982db4813f3f832 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `48` + // Estimated: `8121` + // Minimum execution time: 1_988_000 picoseconds. + Weight::from_parts(2_039_000, 0) + .saturating_add(Weight::from_parts(0, 8121)) + // Standard Error: 30_660 + .saturating_add(Weight::from_parts(24_419_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..e68c075bffc63e46668ffa84ce37143e848439fb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// collectives-polkadot-dev +// --output +// cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `148` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `1662` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1662)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65777` + // Estimated: `69242` + // Minimum execution time: 60_000_000 picoseconds. + Weight::from_parts(63_000_000, 0) + .saturating_add(Weight::from_parts(0, 69242)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65776` + // Estimated: `69241` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69241)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a4adb968bb7195428ea00d59cd92dcd3b6eea5f --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6f1dc8dc08038a8c614f10914f9fd8c14fb10ca --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(1_929_666, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_221_000 picoseconds. + Weight::from_parts(34_449_539, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_706, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_681_000 picoseconds. + Weight::from_parts(3_857_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 101_899_621_000 picoseconds. + Weight::from_parts(106_377_672_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_094_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(754_465, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_182_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_031 + .saturating_add(Weight::from_parts(570_563, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82 + p * (69 ±0)` + // Estimated: `78 + p * (70 ±0)` + // Minimum execution time: 3_728_000 picoseconds. + Weight::from_parts(3_836_000, 0) + .saturating_add(Weight::from_parts(0, 78)) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(1_199_345, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..1d877fdbd2bbe2b18fbefbc6bd39357c8a0e21a6 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod pallet_alliance; +pub mod pallet_balances; +pub mod pallet_collator_selection; +pub mod pallet_collective; +pub mod pallet_collective_content; +pub mod pallet_core_fellowship_ambassador_core; +pub mod pallet_core_fellowship_fellowship_core; +pub mod pallet_message_queue; +pub mod pallet_multisig; +pub mod pallet_preimage; +pub mod pallet_proxy; +pub mod pallet_ranked_collective_ambassador_collective; +pub mod pallet_ranked_collective_fellowship_collective; +pub mod pallet_referenda_ambassador_referenda; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_salary_ambassador_salary; +pub mod pallet_salary_fellowship_salary; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_xcm; +pub mod paritydb_weights; +pub mod rocksdb_weights; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs new file mode 100644 index 0000000000000000000000000000000000000000..d8ede609a67c6df5c707504206b72e7382d2f7b4 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs @@ -0,0 +1,494 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_alliance` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_alliance +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_alliance`. +pub struct WeightInfo(PhantomData); +impl pallet_alliance::WeightInfo for WeightInfo { + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `439 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` + // Minimum execution time: 32_783_000 picoseconds. + Weight::from_parts(32_174_037, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 198 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(b.into())) + // Standard Error: 2_074 + .saturating_add(Weight::from_parts(40_945, 0).saturating_mul(m.into())) + // Standard Error: 2_048 + .saturating_add(Weight::from_parts(181_087, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `868 + m * (64 ±0)` + // Estimated: `6676 + m * (64 ±0)` + // Minimum execution time: 28_520_000 picoseconds. + Weight::from_parts(29_661_024, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_336 + .saturating_add(Weight::from_parts(89_873, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `312 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 39_353_000 picoseconds. + Weight::from_parts(33_028_008, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_137 + .saturating_add(Weight::from_parts(90_946, 0).saturating_mul(m.into())) + // Standard Error: 2_084 + .saturating_add(Weight::from_parts(175_827, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `762 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` + // Minimum execution time: 52_835_000 picoseconds. + Weight::from_parts(45_963_292, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 3_189 + .saturating_add(Weight::from_parts(111_627, 0).saturating_mul(m.into())) + // Standard Error: 3_109 + .saturating_add(Weight::from_parts(207_923, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `518 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (109 ±0) + p * (43 ±0)` + // Minimum execution time: 49_980_000 picoseconds. + Weight::from_parts(48_110_301, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 5_057 + .saturating_add(Weight::from_parts(169_065, 0).saturating_mul(m.into())) + // Standard Error: 4_995 + .saturating_add(Weight::from_parts(201_349, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 109).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[5, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `417 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (36 ±0)` + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(36_865_909, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(74_341, 0).saturating_mul(m.into())) + // Standard Error: 2_059 + .saturating_add(Weight::from_parts(170_035, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[1, 100]`. + /// The range of component `z` is `[0, 100]`. + fn init_members(m: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `12362` + // Minimum execution time: 29_710_000 picoseconds. + Weight::from_parts(17_762_170, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 1_652 + .saturating_add(Weight::from_parts(156_967, 0).saturating_mul(m.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(130_352, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[0, 100]`. + /// The range of component `z` is `[0, 50]`. + fn disband(x: u32, y: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + x * (52 ±0) + y * (53 ±0) + z * (250 ±0)` + // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` + // Minimum execution time: 294_258_000 picoseconds. + Weight::from_parts(295_116_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 23_663 + .saturating_add(Weight::from_parts(553_978, 0).saturating_mul(x.into())) + // Standard Error: 23_549 + .saturating_add(Weight::from_parts(567_024, 0).saturating_mul(y.into())) + // Standard Error: 47_055 + .saturating_add(Weight::from_parts(15_439_056, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(z.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(z.into()))) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) + } + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + fn set_rule() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_538_000 picoseconds. + Weight::from_parts(8_752_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `10187` + // Minimum execution time: 11_213_000 picoseconds. + Weight::from_parts(11_792_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `149` + // Estimated: `10187` + // Minimum execution time: 12_477_000 picoseconds. + Weight::from_parts(12_942_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + fn join_alliance() -> Weight { + // Proof Size summary in bytes: + // Measured: `294` + // Estimated: `18048` + // Minimum execution time: 41_517_000 picoseconds. + Weight::from_parts(42_433_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + fn nominate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `18048` + // Minimum execution time: 25_950_000 picoseconds. + Weight::from_parts(26_631_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn elevate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `12362` + // Minimum execution time: 24_470_000 picoseconds. + Weight::from_parts(25_222_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn give_retirement_notice() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `23734` + // Minimum execution time: 31_519_000 picoseconds. + Weight::from_parts(32_827_000, 0) + .saturating_add(Weight::from_parts(0, 23734)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn retire() -> Weight { + // Proof Size summary in bytes: + // Measured: `517` + // Estimated: `6676` + // Minimum execution time: 38_799_000 picoseconds. + Weight::from_parts(39_634_000, 0) + .saturating_add(Weight::from_parts(0, 6676)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn kick_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `643` + // Estimated: `18048` + // Minimum execution time: 137_442_000 picoseconds. + Weight::from_parts(142_142_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `27187` + // Minimum execution time: 7_189_000 picoseconds. + Weight::from_parts(7_387_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 3_417 + .saturating_add(Weight::from_parts(1_581_413, 0).saturating_mul(n.into())) + // Standard Error: 1_338 + .saturating_add(Weight::from_parts(67_739, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + l * (100 ±0) + n * (289 ±0)` + // Estimated: `27187` + // Minimum execution time: 7_201_000 picoseconds. + Weight::from_parts(7_325_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 183_302 + .saturating_add(Weight::from_parts(16_886_382, 0).saturating_mul(n.into())) + // Standard Error: 71_789 + .saturating_add(Weight::from_parts(352_937, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn abdicate_fellow_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `18048` + // Minimum execution time: 29_653_000 picoseconds. + Weight::from_parts(30_365_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..6c1cf072257f0e0e7033bdbeaa1af5dcaec5a5f1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs @@ -0,0 +1,152 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 55_696_000 picoseconds. + Weight::from_parts(56_582_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 40_885_000 picoseconds. + Weight::from_parts(41_993_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 14_565_000 picoseconds. + Weight::from_parts(15_080_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 22_158_000 picoseconds. + Weight::from_parts(22_715_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 57_957_000 picoseconds. + Weight::from_parts(58_618_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 52_018_000 picoseconds. + Weight::from_parts(52_795_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 17_469_000 picoseconds. + Weight::from_parts(18_030_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 17_223_000 picoseconds. + Weight::from_parts(17_587_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 16_201 + .saturating_add(Weight::from_parts(15_360_967, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000000000000000000000000000000..03f3ff602a5b3e91c4e2ff90a4a3433d513079a1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs @@ -0,0 +1,246 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `163 + b * (79 ±0)` + // Estimated: `1154 + b * (2555 ±0)` + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(12_150_410, 0) + .saturating_add(Weight::from_parts(0, 1154)) + // Standard Error: 6_270 + .saturating_add(Weight::from_parts(3_256_932, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `756 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 48_450_000 picoseconds. + Weight::from_parts(51_166_679, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_588 + .saturating_add(Weight::from_parts(167_219, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 15_830_000 picoseconds. + Weight::from_parts(15_792_847, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 5_343 + .saturating_add(Weight::from_parts(167_955, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_767_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_388_000 picoseconds. + Weight::from_parts(7_677_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `736 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 41_241_000 picoseconds. + Weight::from_parts(46_090_319, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_918 + .saturating_add(Weight::from_parts(161_140, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(36_183_872, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_766 + .saturating_add(Weight::from_parts(168_742, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 43_910_000 picoseconds. + Weight::from_parts(44_796_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 17_092_000 picoseconds. + Weight::from_parts(17_635_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 351_635 + .saturating_add(Weight::from_parts(15_162_192, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs new file mode 100644 index 0000000000000000000000000000000000000000..9133baa6120cff6f13bf3a3f45ec5db4b3fe5f3f --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs @@ -0,0 +1,304 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_collective::WeightInfo for WeightInfo { + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:100 w:100) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `p` is `[0, 100]`. + fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` + // Estimated: `15691 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 16_410_000 picoseconds. + Weight::from_parts(16_816_000, 0) + .saturating_add(Weight::from_parts(0, 15691)) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(4_516_537, 0).saturating_mul(m.into())) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(7_992_168, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `1518 + m * (32 ±0)` + // Minimum execution time: 14_418_000 picoseconds. + Weight::from_parts(13_588_617, 0) + .saturating_add(Weight::from_parts(0, 1518)) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_711, 0).saturating_mul(b.into())) + // Standard Error: 223 + .saturating_add(Weight::from_parts(13_836, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:0) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn propose_execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `3498 + m * (32 ±0)` + // Minimum execution time: 17_174_000 picoseconds. + Weight::from_parts(16_192_764, 0) + .saturating_add(Weight::from_parts(0, 3498)) + // Standard Error: 27 + .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(b.into())) + // Standard Error: 280 + .saturating_add(Weight::from_parts(24_343, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `322 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3714 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 23_970_000 picoseconds. + Weight::from_parts(23_004_052, 0) + .saturating_add(Weight::from_parts(0, 3714)) + // Standard Error: 123 + .saturating_add(Weight::from_parts(2_728, 0).saturating_mul(b.into())) + // Standard Error: 1_291 + .saturating_add(Weight::from_parts(32_731, 0).saturating_mul(m.into())) + // Standard Error: 1_275 + .saturating_add(Weight::from_parts(199_537, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `771 + m * (64 ±0)` + // Estimated: `4235 + m * (64 ±0)` + // Minimum execution time: 25_843_000 picoseconds. + Weight::from_parts(26_092_578, 0) + .saturating_add(Weight::from_parts(0, 4235)) + // Standard Error: 1_785 + .saturating_add(Weight::from_parts(67_298, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `360 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3805 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 27_543_000 picoseconds. + Weight::from_parts(26_505_473, 0) + .saturating_add(Weight::from_parts(0, 3805)) + // Standard Error: 1_054 + .saturating_add(Weight::from_parts(35_295, 0).saturating_mul(m.into())) + // Standard Error: 1_028 + .saturating_add(Weight::from_parts(190_508, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `662 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3979 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_375_000 picoseconds. + Weight::from_parts(34_081_294, 0) + .saturating_add(Weight::from_parts(0, 3979)) + // Standard Error: 196 + .saturating_add(Weight::from_parts(3_796, 0).saturating_mul(b.into())) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(50_954, 0).saturating_mul(m.into())) + // Standard Error: 2_020 + .saturating_add(Weight::from_parts(246_000, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + m * (48 ±0) + p * (36 ±0)` + // Estimated: `3898 + m * (49 ±0) + p * (36 ±0)` + // Minimum execution time: 28_793_000 picoseconds. + Weight::from_parts(29_656_832, 0) + .saturating_add(Weight::from_parts(0, 3898)) + // Standard Error: 1_214 + .saturating_add(Weight::from_parts(22_148, 0).saturating_mul(m.into())) + // Standard Error: 1_184 + .saturating_add(Weight::from_parts(189_860, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `682 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3999 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_887_000 picoseconds. + Weight::from_parts(39_529_567, 0) + .saturating_add(Weight::from_parts(0, 3999)) + // Standard Error: 191 + .saturating_add(Weight::from_parts(2_802, 0).saturating_mul(b.into())) + // Standard Error: 2_021 + .saturating_add(Weight::from_parts(35_956, 0).saturating_mul(m.into())) + // Standard Error: 1_970 + .saturating_add(Weight::from_parts(235_154, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[1, 100]`. + fn disapprove_proposal(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `189 + p * (32 ±0)` + // Estimated: `1674 + p * (32 ±0)` + // Minimum execution time: 14_040_000 picoseconds. + Weight::from_parts(15_075_964, 0) + .saturating_add(Weight::from_parts(0, 1674)) + // Standard Error: 854 + .saturating_add(Weight::from_parts(159_597, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs new file mode 100644 index 0000000000000000000000000000000000000000..6be94db22db9f88db97f60db4d850fcd4179aa26 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective_content` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-18, STEPS: `10`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --steps=10 +// --repeat=3 +// --pallet=pallet_collective_content +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective_content`. +pub struct WeightInfo(PhantomData); +impl pallet_collective_content::WeightInfo for WeightInfo { + /// Storage: `AmbassadorContent::Charter` (r:0 w:1) + /// Proof: `AmbassadorContent::Charter` (`max_values`: Some(1), `max_size`: Some(70), added: 565, mode: `MaxEncodedLen`) + fn set_charter() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::NextAnnouncementExpireAt` (r:1 w:1) + /// Proof: `AmbassadorContent::NextAnnouncementExpireAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:0 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3507` + // Minimum execution time: 273_000_000 picoseconds. + Weight::from_parts(278_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:1 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `3555` + // Minimum execution time: 326_000_000 picoseconds. + Weight::from_parts(338_000_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs new file mode 100644 index 0000000000000000000000000000000000000000..f40940a8b25faa7c441b1ac9b237cb34e671cf17 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -0,0 +1,223 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66011` + // Estimated: `69046` + // Minimum execution time: 96_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66121` + // Estimated: `69046` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(116_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `360` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3514` + // Minimum execution time: 36_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `65989` + // Estimated: `69046` + // Minimum execution time: 95_000_000 picoseconds. + Weight::from_parts(110_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:0 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `65967` + // Estimated: `69046` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:0) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs new file mode 100644 index 0000000000000000000000000000000000000000..471ee82ead729ea5abff616f0c9fe3a86704fd91 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -0,0 +1,222 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66144` + // Estimated: `69046` + // Minimum execution time: 109_000_000 picoseconds. + Weight::from_parts(125_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66254` + // Estimated: `69046` + // Minimum execution time: 112_000_000 picoseconds. + Weight::from_parts(114_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `493` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3514` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66122` + // Estimated: `69046` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(129_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:0 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `464` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `66100` + // Estimated: `69046` + // Minimum execution time: 89_000_000 picoseconds. + Weight::from_parts(119_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:0) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(52_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..4bd71c4e7d497b84f9a5b910015e439b69e41ef9 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_440_000 picoseconds. + Weight::from_parts(11_440_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_077_000 picoseconds. + Weight::from_parts(11_077_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_977_000 picoseconds. + Weight::from_parts(3_977_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 4_831_000 picoseconds. + Weight::from_parts(4_831_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_192_000 picoseconds. + Weight::from_parts(5_192_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_750_000 picoseconds. + Weight::from_parts(58_750_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 5_107_000 picoseconds. + Weight::from_parts(5_107_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 46_814_000 picoseconds. + Weight::from_parts(46_814_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 52_510_000 picoseconds. + Weight::from_parts(52_510_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 71_930_000 picoseconds. + Weight::from_parts(71_930_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000000000000000000000000000000..a7827b7200906370039d245b02fcaec6d22430d2 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs @@ -0,0 +1,164 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_288_000 picoseconds. + Weight::from_parts(14_235_741, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `328 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 44_865_000 picoseconds. + Weight::from_parts(33_468_056, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(130_544, 0).saturating_mul(s.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 29_284_000 picoseconds. + Weight::from_parts(18_708_967, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 916 + .saturating_add(Weight::from_parts(119_202, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_447, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 49_462_000 picoseconds. + Weight::from_parts(34_470_286, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(178_227, 0).saturating_mul(s.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_644, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `329 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 30_749_000 picoseconds. + Weight::from_parts(31_841_438, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(123_126, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 17_436_000 picoseconds. + Weight::from_parts(18_036_002, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 829 + .saturating_add(Weight::from_parts(109_450, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `520 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_532_000 picoseconds. + Weight::from_parts(32_818_015, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 977 + .saturating_add(Weight::from_parts(123_121, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs new file mode 100644 index 0000000000000000000000000000000000000000..9b45c8758186d5b36ae5af158b35e4349cb7a1ed --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs @@ -0,0 +1,232 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_preimage` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_preimage`. +pub struct WeightInfo(PhantomData); +impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `3556` + // Minimum execution time: 29_323_000 picoseconds. + Weight::from_parts(29_793_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_504, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_requested_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_581_000 picoseconds. + Weight::from_parts(15_659_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_500, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_no_deposit_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_028_000 picoseconds. + Weight::from_parts(15_150_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_560, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `323` + // Estimated: `3556` + // Minimum execution time: 55_113_000 picoseconds. + Weight::from_parts(59_127_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 38_033_000 picoseconds. + Weight::from_parts(41_203_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `222` + // Estimated: `3556` + // Minimum execution time: 31_482_000 picoseconds. + Weight::from_parts(34_726_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 20_724_000 picoseconds. + Weight::from_parts(22_928_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3556` + // Minimum execution time: 27_015_000 picoseconds. + Weight::from_parts(29_240_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_requested_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 10_712_000 picoseconds. + Weight::from_parts(11_317_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unrequest_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 34_528_000 picoseconds. + Weight::from_parts(35_982_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_059_000 picoseconds. + Weight::from_parts(12_458_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_multi_referenced_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_502_000 picoseconds. + Weight::from_parts(12_180_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..59d9f912bf11d92a9c214e9ff83aa880482d8faa --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs @@ -0,0 +1,225 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 15_597_000 picoseconds. + Weight::from_parts(16_231_993, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(29_818, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 36_685_000 picoseconds. + Weight::from_parts(36_376_358, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 3_003 + .saturating_add(Weight::from_parts(133_776, 0).saturating_mul(a.into())) + // Standard Error: 3_103 + .saturating_add(Weight::from_parts(60_315, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_835_000 picoseconds. + Weight::from_parts(24_154_219, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_580 + .saturating_add(Weight::from_parts(125_884, 0).saturating_mul(a.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(21_563, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_997_000 picoseconds. + Weight::from_parts(24_301_638, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_658 + .saturating_add(Weight::from_parts(133_005, 0).saturating_mul(a.into())) + // Standard Error: 1_713 + .saturating_add(Weight::from_parts(20_237, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_604_000 picoseconds. + Weight::from_parts(33_322_880, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(114_037, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(45_629, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_634_000 picoseconds. + Weight::from_parts(25_509_118, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_278 + .saturating_add(Weight::from_parts(38_401, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_855_000 picoseconds. + Weight::from_parts(25_753_505, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_819 + .saturating_add(Weight::from_parts(44_357, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_211_000 picoseconds. + Weight::from_parts(23_094_124, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_597 + .saturating_add(Weight::from_parts(36_725, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_667_535, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_111 + .saturating_add(Weight::from_parts(3_422, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_632_000 picoseconds. + Weight::from_parts(23_678_772, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(26_492, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs new file mode 100644 index 0000000000000000000000000000000000000000..a6372c4b89dc222ca6a5a6acd36306d79ea3ffb2 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:11 w:11) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:11 w:11) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:11 w:11) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 158_113 + .saturating_add(Weight::from_parts(16_000_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `210 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(26_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 180_277 + .saturating_add(Weight::from_parts(650_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:1 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 335_410 + .saturating_add(Weight::from_parts(550_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:1 w:1) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `566` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(60_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::VotingCleanup` (r:1 w:0) + /// Proof: `AmbassadorCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:100 w:100) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `209 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(18_500_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 11_180 + .saturating_add(Weight::from_parts(1_335_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c773c56ac398a7563427d5d33f837d3977028e1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs @@ -0,0 +1,176 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 254_950 + .saturating_add(Weight::from_parts(15_900_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `310 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(25_500_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 70_710 + .saturating_add(Weight::from_parts(400_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(37_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 150_000 + .saturating_add(Weight::from_parts(350_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:1 w:1) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(57_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) + /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:100 w:100) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `343 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 25_000 + .saturating_add(Weight::from_parts(1_395_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..fdc451c5d31ccae8a1195482ef32e99163c22442 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs @@ -0,0 +1,536 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `255` + // Estimated: `159279` + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(34_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `366` + // Estimated: `317568` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1165` + // Estimated: `159279` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1173` + // Estimated: `159279` + // Minimum execution time: 104_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `702` + // Estimated: `317568` + // Minimum execution time: 140_000_000 picoseconds. + Weight::from_parts(150_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `601` + // Estimated: `317568` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 17_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `317568` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:0) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `626` + // Estimated: `317568` + // Minimum execution time: 183_000_000 picoseconds. + Weight::from_parts(187_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:0) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3636` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 3636)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(97_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(92_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `951` + // Estimated: `4365` + // Minimum execution time: 48_000_000 picoseconds. + Weight::from_parts(50_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `959` + // Estimated: `4365` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(48_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `159279` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `546` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `647` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 100_000_000 picoseconds. + Weight::from_parts(120_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `683` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 77_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `159279` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `317568` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:0 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `419` + // Estimated: `4365` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..63f68833795f20023d8e9040f105d98bc161ef4d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,535 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `389` + // Estimated: `159279` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `317568` + // Minimum execution time: 64_000_000 picoseconds. + Weight::from_parts(67_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2038` + // Estimated: `159279` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(109_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2079` + // Estimated: `159279` + // Minimum execution time: 101_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `836` + // Estimated: `317568` + // Minimum execution time: 135_000_000 picoseconds. + Weight::from_parts(153_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `735` + // Estimated: `317568` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(39_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `4365` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `317568` + // Minimum execution time: 45_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `587` + // Estimated: `317568` + // Minimum execution time: 185_000_000 picoseconds. + Weight::from_parts(196_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `4277` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 82_000_000 picoseconds. + Weight::from_parts(90_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 91_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1841` + // Estimated: `4365` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1808` + // Estimated: `4365` + // Minimum execution time: 46_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1824` + // Estimated: `4365` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(53_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1865` + // Estimated: `4365` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(54_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `680` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `781` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(95_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 84_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `817` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(98_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `159279` + // Minimum execution time: 74_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `317568` + // Minimum execution time: 105_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `4365` + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `319` + // Estimated: `4365` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs new file mode 100644 index 0000000000000000000000000000000000000000..0522420f2f5172ecc2840b70ecb9c74796bbddc3 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs @@ -0,0 +1,190 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `1541` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `191` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:0) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `3551` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `467` + // Estimated: `3551` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(72_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `479` + // Estimated: `3944` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs new file mode 100644 index 0000000000000000000000000000000000000000..37680b4e5df719e61f174c3260bde023fd685623 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs @@ -0,0 +1,189 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1541` + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(17_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:0) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `395` + // Estimated: `3551` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3551` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `512` + // Estimated: `3977` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3977)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf5610df66574a38a388b7e36a56f87e41ff59c2 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs @@ -0,0 +1,206 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_scheduler` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_scheduler`. +pub struct WeightInfo(PhantomData); +impl pallet_scheduler::WeightInfo for WeightInfo { + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn service_agendas_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `31` + // Estimated: `1489` + // Minimum execution time: 3_441_000 picoseconds. + Weight::from_parts(3_604_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 200]`. + fn service_agenda_base(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 2_879_000 picoseconds. + Weight::from_parts(2_963_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 3_764 + .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_172_000 picoseconds. + Weight::from_parts(5_294_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `213 + s * (1 ±0)` + // Estimated: `3678 + s * (1 ±0)` + // Minimum execution time: 19_704_000 picoseconds. + Weight::from_parts(19_903_000, 0) + .saturating_add(Weight::from_parts(0, 3678)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) + } + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn service_task_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_359_000 picoseconds. + Weight::from_parts(6_599_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_periodic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_217_000 picoseconds. + Weight::from_parts(5_333_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_406_000 picoseconds. + Weight::from_parts(2_541_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_370_000 picoseconds. + Weight::from_parts(2_561_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 11_784_000 picoseconds. + Weight::from_parts(5_574_404, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_217 + .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 16_373_000 picoseconds. + Weight::from_parts(3_088_135, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_095 + .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `468 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 14_822_000 picoseconds. + Weight::from_parts(9_591_402, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_151 + .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `509 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 18_541_000 picoseconds. + Weight::from_parts(6_522_239, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 8_349 + .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ac0804df8903dd5ab4184e21b49cb13b32b48dc --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `270` + // Estimated: `3735` + // Minimum execution time: 16_663_000 picoseconds. + Weight::from_parts(17_246_000, 0) + .saturating_add(Weight::from_parts(0, 3735)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `3707` + // Minimum execution time: 11_850_000 picoseconds. + Weight::from_parts(12_204_000, 0) + .saturating_add(Weight::from_parts(0, 3707)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..ca06f43f92e425b8d93b8bd394f0897f71627252 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `49` + // Estimated: `1493` + // Minimum execution time: 7_863_000 picoseconds. + Weight::from_parts(8_183_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_460_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..c60a79d91da32c863604b1d220b2e6c8c4159063 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_650_000 picoseconds. + Weight::from_parts(7_474_437, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_625 + .saturating_add(Weight::from_parts(4_996_146, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_612_000 picoseconds. + Weight::from_parts(4_774_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(10_889_913, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_281 + .saturating_add(Weight::from_parts(5_218_293, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_673_000 picoseconds. + Weight::from_parts(8_980_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(7_801_721, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(5_000_971, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..a3b42cb86c407a05ae205062ffff5a41a4cb7392 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -0,0 +1,323 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_746_000 picoseconds. + Weight::from_parts(26_349_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1489` + // Minimum execution time: 22_660_000 picoseconds. + Weight::from_parts(23_173_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_321_000 picoseconds. + Weight::from_parts(7_542_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_232_000 picoseconds. + Weight::from_parts(2_395_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_006_000 picoseconds. + Weight::from_parts(29_777_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_245_000 picoseconds. + Weight::from_parts(32_125_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_255_000 picoseconds. + Weight::from_parts(2_399_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_521_000 picoseconds. + Weight::from_parts(17_001_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_486_000 picoseconds. + Weight::from_parts(16_729_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_037_000 picoseconds. + Weight::from_parts(18_310_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_901_000 picoseconds. + Weight::from_parts(28_566_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_547_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_768_000 picoseconds. + Weight::from_parts(17_215_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 35_134_000 picoseconds. + Weight::from_parts(35_883_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_562_000 picoseconds. + Weight::from_parts(4_802_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_865_000 picoseconds. + Weight::from_parts(27_400_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..25679703831a13b8d1bb7fb7dd4d92fa84b1f255 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..cefc099c96f9f3031448fa3f7b810a6c9c7aaba8 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -0,0 +1,366 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, WestendTreasuryAccount, XcmpQueue, +}; +use frame_support::{ + match_types, parameter_types, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, + weights::Weight, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use westend_runtime_constants::system_parachain; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, + LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; + +const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + +parameter_types! { + pub const WndLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); + pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); + pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: AssetHubUsdtId::get(), + }; + pub WndAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: WndLocation::get().into(), + }; +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting the native currency on this chain. +pub type CurrencyTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + /// The amount of weight an XCM operation takes. This is a safe overestimate. + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024); + /// A temporary weight value for each XCM instruction. + /// NOTE: This should be removed after we account for PoV weights. + pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +match_types! { + pub type ParentOrParentsPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { .. }) } + }; + pub type ParentOrSiblings: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(_) } + }; +} + +/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly +/// account for proof size weights. +/// +/// Calls that are allowed through this filter must: +/// 1. Have a fixed weight; +/// 2. Cannot lead to another call being made; +/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. +pub struct SafeCallFilter; +impl Contains for SafeCallFilter { + fn contains(call: &RuntimeCall) -> bool { + #[cfg(feature = "runtime-benchmarks")] + { + if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { + return true + } + } + + matches!( + call, + RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::XcmpQueue(..) | + RuntimeCall::MessageQueue(..) | + RuntimeCall::Alliance( + // `init_members` accepts unbounded vecs as arguments, + // but the call can be initiated only by root origin. + pallet_alliance::Call::init_members { .. } | + pallet_alliance::Call::vote { .. } | + pallet_alliance::Call::disband { .. } | + pallet_alliance::Call::set_rule { .. } | + pallet_alliance::Call::announce { .. } | + pallet_alliance::Call::remove_announcement { .. } | + pallet_alliance::Call::join_alliance { .. } | + pallet_alliance::Call::nominate_ally { .. } | + pallet_alliance::Call::elevate_ally { .. } | + pallet_alliance::Call::give_retirement_notice { .. } | + pallet_alliance::Call::retire { .. } | + pallet_alliance::Call::kick_member { .. } | + pallet_alliance::Call::close { .. } | + pallet_alliance::Call::abdicate_fellow_status { .. }, + ) | RuntimeCall::AllianceMotion( + pallet_collective::Call::vote { .. } | + pallet_collective::Call::disapprove_proposal { .. } | + pallet_collective::Call::close { .. }, + ) | RuntimeCall::FellowshipCollective( + pallet_ranked_collective::Call::add_member { .. } | + pallet_ranked_collective::Call::promote_member { .. } | + pallet_ranked_collective::Call::demote_member { .. } | + pallet_ranked_collective::Call::remove_member { .. }, + ) | RuntimeCall::FellowshipCore( + pallet_core_fellowship::Call::bump { .. } | + pallet_core_fellowship::Call::set_params { .. } | + pallet_core_fellowship::Call::set_active { .. } | + pallet_core_fellowship::Call::approve { .. } | + pallet_core_fellowship::Call::induct { .. } | + pallet_core_fellowship::Call::promote { .. } | + pallet_core_fellowship::Call::offboard { .. } | + pallet_core_fellowship::Call::submit_evidence { .. } | + pallet_core_fellowship::Call::import { .. }, + ) + ) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + // Allow local users to buy weight credit. + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + // Allow XCMs with some computed origins to pass through. + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent and its pluralities (i.e. governance bodies) get free execution. + AllowExplicitUnpaidExecutionFrom, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::COLLECTIVES_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - DOT with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = CurrencyTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // Collectives does not recognize a reserve location for any asset. Users must teleport WND + // where allowed (e.g. with the Relay Chain). + type IsReserve = (); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = + UsingComponents>; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = WithOriginFilter; + type SafeCallFilter = SafeCallFilter; + type Aliasers = Nothing; +} + +/// Converts a local signed origin into an XCM multilocation. +/// Forms the basis for local origins sending/executing XCMs. +pub type LocalOriginToLocation = SignedToAccountId32; + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +)>; + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parent.into()); +} + +/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. +pub type FellowsToPlurality = OriginToPluralityVoice; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // We only allow the Fellows to send messages. + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + // We support local origins dispatching XCM executions in principle... + type ExecuteXcmOrigin = EnsureXcmOrigin; + // ... but disallow generic XCM execution. As a result only teleports are allowed. + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 414c7ad9d9423ec880cabe582eb25ec35bd92f10..eded360436b1d2c02a749d263f8d2d2118368fb9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -1,8 +1,10 @@ [package] name = "contracts-rococo-runtime" version = "0.2.0" +description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,7 +16,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -53,16 +55,18 @@ pallet-contracts = { path = "../../../../../substrate/frame/contracts", default- pallet-contracts-primitives = { path = "../../../../../substrate/frame/contracts/primitives", default-features = false} # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } @@ -71,7 +75,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [features] @@ -101,6 +105,7 @@ std = [ "pallet-contracts-primitives/std", "pallet-contracts/std", "pallet-insecure-randomness-collective-flip/std", + "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", "pallet-sudo/std", @@ -114,6 +119,7 @@ std = [ "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", + "rococo-runtime-constants/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", @@ -135,9 +141,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -147,6 +155,7 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", "pallet-contracts/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", @@ -176,6 +185,7 @@ try-runtime = [ "pallet-collator-selection/try-runtime", "pallet-contracts/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", @@ -189,3 +199,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 1c99393d5e52fccf427b40f232d55a833d082c9e..6c100deaa9e44e87e259bc141b082c90bd0eb7ca 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -22,9 +22,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Nothing}, }; use pallet_contracts::{ - migration::{v12, v13, v14, v15}, - weights::SubstrateWeight, - Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, + weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; use sp_runtime::Perbill; @@ -70,13 +68,9 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = ( - v12::Migration, - v13::Migration, - v14::Migration, - v15::Migration, - ); + type Migrations = (); type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type Xcm = pallet_xcm::Pallet; } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 70392c5ecbcc2baf012fb9925ff405a7636fa1db..9d6a53c5ed34284f766ef63d8da5eaf1f4cde9b0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -30,13 +30,14 @@ mod weights; mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::AggregateMessageOrigin; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, Perbill, }; use sp_std::prelude::*; @@ -57,6 +58,7 @@ use frame_system::limits::{BlockLength, BlockWeights}; pub use parachains_common as common; use parachains_common::{ impls::DealWithFees, + message_queue::*, rococo::{consensus::*, currency::*, fee::WeightToFee}, AccountId, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, SLOT_DURATION, @@ -97,10 +99,10 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( - cumulus_pallet_dmp_queue::migration::Migration, cumulus_pallet_parachain_system::migration::Migration, - cumulus_pallet_xcmp_queue::migration::Migration, + cumulus_pallet_xcmp_queue::migration::MigrationToV3, pallet_contracts::Migration, + // unreleased ); type EventRecord = frame_system::EventRecord< @@ -129,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 10001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -219,18 +221,23 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<0>; } +parameter_types! { + pub const TransactionByteFee: Balance = MILLICENTS; +} + impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter>; type WeightToFee = WeightToFee; /// Relay Chain `TransactionByteFee` / 10 - type LengthToFee = ConstantMultiplier>; + type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; } @@ -262,13 +269,15 @@ impl pallet_utility::Config for Runtime { parameter_types! { pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -286,6 +295,32 @@ impl pallet_insecure_randomness_collective_flip::Config for Runtime {} impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { @@ -370,7 +405,7 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Smart Contracts. Contracts: pallet_contracts::{Pallet, Call, Storage, Event, HoldReason} = 40, @@ -384,15 +419,12 @@ construct_runtime!( } ); -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] @@ -400,7 +432,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); } @@ -645,6 +677,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -674,6 +707,30 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use xcm::latest::prelude::*; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Contracts-System-Para and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Contracts-System-Para. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 7433b8e94d6c1e9e343c7b90f0412826d667cc6d..2ac93aed3f8d24eb1c3c0a7b6fd028620553484f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -15,16 +15,25 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, + Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; +use crate::common::rococo::currency::CENTS; +use cumulus_primitives_core::AggregateMessageOrigin; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, EitherOfDiverse, Everything, Nothing}, + traits::{ConstU32, EitherOfDiverse, Equals, Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; use pallet_xcm::{EnsureXcm, IsMajorityOfBody, XcmPassthrough}; +use parachains_common::{ + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + TREASURY_PALLET_ID, +}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use rococo_runtime_constants::system_parachain; +use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -33,7 +42,7 @@ use xcm_builder::{ NativeAsset, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WithComputedOrigin, WithUniqueTopic, + WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -43,7 +52,8 @@ parameter_types! { pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = Parachain(ParachainInfo::parachain_id().into()).into(); pub const ExecutiveBody: BodyId = BodyId::Executive; - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } /// We allow root and the Relay Chain council to execute privileged collator selection operations. @@ -129,11 +139,15 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Parent and its pluralities (i.e. governance bodies) get free execution. - AllowExplicitUnpaidExecutionFrom, + // Parent, its pluralities (i.e. governance bodies) and relay treasury pallet + // get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, ), @@ -144,6 +158,28 @@ pub type Barrier = TrailingSetTopicAsId< >, >; +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::CONTRACTS_ID | + system_parachain::ENCOINTER_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +pub type TrustedTeleporter = ConcreteAssetFromSystem; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -151,7 +187,7 @@ impl xcm_executor::Config for XcmConfig { type AssetTransactor = CurrencyTransactor; type OriginConverter = XcmOriginToTransactDispatchOrigin; type IsReserve = NativeAsset; - type IsTeleporter = NativeAsset; + type IsTeleporter = TrustedTeleporter; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -164,7 +200,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = ConstU32<8>; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -176,20 +215,18 @@ impl xcm_executor::Config for XcmConfig { /// Forms the basis for local origins sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -216,8 +253,6 @@ impl pallet_xcm::Config for Runtime { type MaxLockers = ConstU32<8>; // FIXME: Replace with benchmarked weight info type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -228,23 +263,44 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(RelayLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + #[cfg(feature = "runtime-benchmarks")] + type XcmpQueue = (); + #[cfg(not(feature = "runtime-benchmarks"))] + type XcmpQueue = frame_support::traits::TransformOrigin< + crate::MessageQueue, + AggregateMessageOrigin, + cumulus_primitives_core::ParaId, + parachains_common::message_queue::ParaIdToSibling, + >; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, >; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = cumulus_pallet_xcmp_queue::weights::SubstrateWeight; - type PriceForSiblingDelivery = (); + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index ad13bf05a3eccb750b922d0b20398e09cba7efbb..7f5feb1c880f82cdb32086e6c149b7c75f7ff8ea 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -1,12 +1,14 @@ [package] name = "glutton-runtime" version = "1.0.0" +description = "Glutton parachain runtime." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} @@ -26,6 +28,7 @@ sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} @@ -46,7 +49,7 @@ cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = fals cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] @@ -56,12 +59,14 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" default = [ "std" ] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-glutton/runtime-benchmarks", - "pallet-sudo?/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -85,6 +90,7 @@ std = [ "frame-try-runtime?/std", "pallet-aura/std", "pallet-glutton/std", + "pallet-message-queue/std", "pallet-sudo/std", "pallet-timestamp/std", "parachain-info/std", @@ -117,6 +123,7 @@ try-runtime = [ "frame-try-runtime/try-runtime", "pallet-aura/try-runtime", "pallet-glutton/try-runtime", + "pallet-message-queue/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "parachain-info/try-runtime", @@ -124,3 +131,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index f5d52239e54374a0274289a896a5c3c472c4e615..60a5d004e6c1c3d2e99d590423f58d357fe95289 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -61,6 +61,7 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use cumulus_primitives_core::AggregateMessageOrigin; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -195,6 +196,7 @@ impl frame_system::Config for Runtime { parameter_types! { // We do anything the parent chain tells us in this runtime. pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -208,13 +210,40 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type OutboundXcmpMessageSource = (); - type DmpMessageHandler = cumulus_pallet_xcm::UnlimitedDmpExecution; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; +} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + type QueueChangeHandler = (); + type QueuePausedQuery = (); // No XCMP queue pallet deployed. + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} @@ -224,7 +253,10 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; type WeightInfo = weights::pallet_timestamp::WeightInfo; } @@ -261,6 +293,7 @@ construct_runtime! { // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 11, // The main stage. Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, @@ -355,7 +388,7 @@ impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..f787aa3270118b87202bc78b58dcb8084d5f5a5b --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemint-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemint/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_660_000 picoseconds. + Weight::from_parts(1_720_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 28_418 + .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs index 36c4abc400634fd9ec857675031b11b3c33d3fd2..cf7ef948fd630f892122807250e6973a089955d2 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `frame_system` //! diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs index 955010505d310cffe1660fcec0dace4c2d613181..47f9d1ee105e42fb61afcc5b4cb02a9dcda8e3ca 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs @@ -1,18 +1,19 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +pub mod cumulus_pallet_parachain_system; pub mod pallet_glutton; +pub mod pallet_message_queue; pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs index f278d246b33b1b4f0854b943fef8a1da22122e93..e1b0c5bf232e5ca8b2a4b534095f7b30917ca382 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Autogenerated weights for `pallet_glutton` //! diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..a9f0cb07cfe1385d62df0f1e81cd1b03cc963f7e --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// westmint-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/westmint/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 12_192_000 picoseconds. + Weight::from_parts(12_192_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 10_447_000 picoseconds. + Weight::from_parts(10_447_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_851_000 picoseconds. + Weight::from_parts(4_851_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_342_000 picoseconds. + Weight::from_parts(6_342_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 6_199_000 picoseconds. + Weight::from_parts(6_199_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_612_000 picoseconds. + Weight::from_parts(58_612_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 7_296_000 picoseconds. + Weight::from_parts(7_296_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 48_345_000 picoseconds. + Weight::from_parts(48_345_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 56_441_000 picoseconds. + Weight::from_parts(56_441_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 70_858_000 picoseconds. + Weight::from_parts(70_858_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a30cdf35769460ea09fbeaf9432534f6e5908ac9 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -0,0 +1,138 @@ +[package] +name = "glutton-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Glutton parachain runtime." + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-glutton/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-aura/std", + "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "pallet-aura/std", + "pallet-glutton/std", + "pallet-message-queue/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "parachain-info/std", + "parachains-common/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-glutton/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..1580e6f07bec466c644ccab1f4591d384632135e --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs @@ -0,0 +1,24 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..60107281c22f67da8bee659d9ac5f37b7ff7ce30 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Glutton Westend Runtime +//! +//! The purpose of the Glutton parachain is to do stress testing on the Kusama +//! network. This runtime targets the Westend runtime to allow development +//! separate to the Kusama runtime. +//! +//! There may be multiple instances of the Glutton parachain deployed and +//! connected to its parent relay chain. +//! +//! These parachains are not holding any real value. Their purpose is to stress +//! test the network. +//! +//! ### Governance +//! +//! Glutton defers its governance (namely, its `Root` origin), to its Relay +//! Chain parent, Kusama (or Westend for development purposes). +//! +//! ### XCM +//! +//! Since the main goal of Glutton is solely stress testing, the parachain will +//! only be able receive XCM messages from the Relay Chain via DMP. This way the +//! Glutton parachains will be able to listen for upgrades that are coming from +//! the Relay chain. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod weights; +pub mod xcm_config; + +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use sp_api::impl_runtime_apis; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use cumulus_primitives_core::AggregateMessageOrigin; +pub use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, + }, + weights::{ + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, + IdentityFee, Weight, + }, + PalletId, StorageValue, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use parachains_common::{AccountId, Signature}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("glutton-westend"), + impl_name: create_runtime_str!("glutton-westend"), + authoring_version: 1, + spec_version: 10000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for .5 seconds of compute with a 12 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, +); + +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 2; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. +pub const MILLISECS_PER_BLOCK: u64 = 6000; +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +parameter_types! { + pub const BlockHashCount: BlockNumber = 4096; + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type Version = Version; + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type SystemWeightInfo = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + // We do anything the parent chain tells us in this runtime. + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + +impl cumulus_pallet_parachain_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type OutboundXcmpMessageSource = (); + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; +} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + type QueueChangeHandler = (); + type QueuePausedQuery = (); // No XCMP queue pallet deployed. + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl parachain_info::Config for Runtime {} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] + type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = ConstU64; +} + +impl pallet_glutton::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_glutton::WeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +construct_runtime! { + pub enum Runtime + { + System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, + + // DMP handler. + CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 11, + + // The main stage. + Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, + + // Collator support + Aura: pallet_aura::{Pallet, Storage, Config} = 30, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, + + // Sudo. + Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, + } +} + +/// Index of a transaction in the chain. +pub type Nonce = u32; +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; +/// An index to a block. +pub type BlockNumber = u32; +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + pallet_sudo::CheckOnlySudoAccount, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [cumulus_pallet_parachain_system, ParachainSystem] + [frame_system, SystemBench::] + [pallet_glutton, Glutton] + [pallet_message_queue, MessageQueue] + [pallet_timestamp, Timestamp] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic( + extrinsic: ::Extrinsic, + ) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc8299ab1bd678e58a7909c1de4b0b55f4e1bedf --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_parachain_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::LastDmqMqcHead` (r:1 w:1) + /// Proof: `ParachainSystem::LastDmqMqcHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ProcessedDownwardMessages` (r:0 w:1) + /// Proof: `ParachainSystem::ProcessedDownwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1000) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `3517` + // Minimum execution time: 1_745_000 picoseconds. + Weight::from_parts(1_859_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + // Standard Error: 53_384 + .saturating_add(Weight::from_parts(196_309_089, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f8cf4f39dfdf894e4162ef6f629f88f27e97477 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=frame_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_626_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_200_000 picoseconds. + Weight::from_parts(4_262_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_791, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_936_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 119_097_302_000 picoseconds. + Weight::from_parts(120_914_576_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_704_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_090 + .saturating_add(Weight::from_parts(765_829, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_646_000 picoseconds. + Weight::from_parts(1_719_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(578_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `58 + p * (69 ±0)` + // Estimated: `53 + p * (70 ±0)` + // Minimum execution time: 2_933_000 picoseconds. + Weight::from_parts(3_069_000, 0) + .saturating_add(Weight::from_parts(0, 53)) + // Standard Error: 1_844 + .saturating_add(Weight::from_parts(1_214_377, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..47f9d1ee105e42fb61afcc5b4cb02a9dcda8e3ca --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs @@ -0,0 +1,19 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod cumulus_pallet_parachain_system; +pub mod pallet_glutton; +pub mod pallet_message_queue; +pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs new file mode 100644 index 0000000000000000000000000000000000000000..9345458a704af3dd86b77b0030ae6861b8b2ed23 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_glutton` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_glutton +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_glutton`. +pub struct WeightInfo(PhantomData); +impl pallet_glutton::WeightInfo for WeightInfo { + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_grow(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1489` + // Minimum execution time: 6_453_000 picoseconds. + Weight::from_parts(6_629_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 3_416 + .saturating_add(Weight::from_parts(9_938_610, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_shrink(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `120` + // Estimated: `1489` + // Minimum execution time: 6_456_000 picoseconds. + Weight::from_parts(6_564_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 1_336 + .saturating_add(Weight::from_parts(1_141_705, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// The range of component `i` is `[0, 100000]`. + fn waste_ref_time_iter(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 679_000 picoseconds. + Weight::from_parts(3_310_101, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 10 + .saturating_add(Weight::from_parts(103_703, 0).saturating_mul(i.into())) + } + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn waste_proof_size_some(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119115 + i * (1022 ±0)` + // Estimated: `990 + i * (3016 ±0)` + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(1_004_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 4_008 + .saturating_add(Weight::from_parts(6_130_770, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_high_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `1900498` + // Estimated: `5239782` + // Minimum execution time: 97_248_614_000 picoseconds. + Weight::from_parts(97_728_420_000, 0) + .saturating_add(Weight::from_parts(0, 5239782)) + .saturating_add(T::DbWeight::get().reads(1739)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_low_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `9548` + // Estimated: `16070` + // Minimum execution time: 97_305_112_000 picoseconds. + Weight::from_parts(97_427_728_000, 0) + .saturating_add(Weight::from_parts(0, 16070)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn empty_on_idle() -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1493` + // Minimum execution time: 4_125_000 picoseconds. + Weight::from_parts(4_339_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_compute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_879_000 picoseconds. + Weight::from_parts(4_211_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_storage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_081_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..eab6c15a40d28019967dd3112b1b2bc037b93106 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_message_queue +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 10_833_000 picoseconds. + Weight::from_parts(11_237_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 9_399_000 picoseconds. + Weight::from_parts(9_773_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_277_000 picoseconds. + Weight::from_parts(3_358_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_429_000 picoseconds. + Weight::from_parts(5_667_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_538_000 picoseconds. + Weight::from_parts(5_803_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 89_888_000 picoseconds. + Weight::from_parts(90_929_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 6_129_000 picoseconds. + Weight::from_parts(6_414_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 52_366_000 picoseconds. + Weight::from_parts(53_500_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 67_848_000 picoseconds. + Weight::from_parts(68_910_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 107_564_000 picoseconds. + Weight::from_parts(109_377_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..4218dcc73f4e608e8b40ae5c4fcdd927747a5084 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_timestamp +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 6_306_000 picoseconds. + Weight::from_parts(6_592_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 2_900_000 picoseconds. + Weight::from_parts(3_030_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..5ebb0ade123175bc17303c19812b78377fab1153 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, ParachainInfo, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, +}; +use frame_support::{ + match_types, parameter_types, + traits::{Everything, Nothing}, + weights::Weight, +}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, + SovereignSignedViaLocation, +}; + +parameter_types! { + pub const WestendLocation: MultiLocation = MultiLocation::parent(); + pub const WestendNetwork: Option = Some(NetworkId::Westend); + pub UniversalLocation: InteriorMultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); +} + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// bias the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, RuntimeOrigin>, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, +); + +match_types! { + pub type JustTheParent: impl Contains = { MultiLocation { parents:1, interior: Here } }; +} + +parameter_types! { + // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. + pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); // sending XCM not supported + type AssetTransactor = (); // balances not supported + type OriginConverter = XcmOriginToTransactDispatchOrigin; + type IsReserve = (); // balances not supported + type IsTeleporter = (); // balances not supported + type UniversalLocation = UniversalLocation; + type Barrier = AllowExplicitUnpaidExecutionFrom; + type Weigher = FixedWeightBounds; // balances not supported + type Trader = (); // balances not supported + type ResponseHandler = (); // Don't handle responses for now. + type AssetTrap = (); // don't trap for now + type AssetClaims = (); // don't claim for now + type SubscriptionService = (); // don't handle subscriptions for now + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = xcm_executor::XcmExecutor; +} diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index d9711b57b3770355474b43245366cf6adb8c6e98..65ca58ac8b39ebc06ac6475b9818561fde2ae9ee 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -1,12 +1,14 @@ [package] name = "seedling-runtime" version = "0.1.0" +description = "Seedling parachain runtime. A starter runtime for solochain to parachain migration." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} @@ -35,7 +37,7 @@ cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index c2bcaf8a1266702a0d5c8d6fbbd6463a5d46dc34..43c8f1488a6c139d3adaafbf35ede5f7cca7d9e8 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -183,11 +183,13 @@ impl cumulus_pallet_solo_to_para::Config for Runtime { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = cumulus_pallet_solo_to_para::Pallet; type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = (); - type DmpMessageHandler = (); + // Ignore all DMP messages by enqueueing them into `()`: + type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; type ReservedDmpWeight = (); type XcmpMessageHandler = (); type ReservedXcmpWeight = (); @@ -216,7 +218,10 @@ impl pallet_aura::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ parachains_common::SLOT_DURATION / 2 }>; type WeightInfo = (); } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 675abc07b77359a65f78b21a90ced4978eafede5..77449f977bb298a11a21f45fd5576422ef83bad7 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -1,12 +1,14 @@ [package] name = "shell-runtime" version = "0.1.0" +description = "A minimal runtime to test Relay Chain consensus." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} @@ -27,6 +29,7 @@ sp-session = { path = "../../../../../substrate/primitives/session", default-fea sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} @@ -38,7 +41,7 @@ cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-featu cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] @@ -57,6 +60,7 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "pallet-aura/std", + "pallet-message-queue/std", "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", @@ -87,6 +91,7 @@ try-runtime = [ "frame-system/try-runtime", "frame-try-runtime/try-runtime", "pallet-aura/try-runtime", + "pallet-message-queue/try-runtime", "pallet-timestamp/try-runtime", "parachain-info/try-runtime", "sp-runtime/try-runtime", diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 4aad553e6a3baf78667cad1271965d223ca3ea2d..f67c0c19ec640bb8e131fea2536943315cc6686a 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -33,6 +33,7 @@ pub mod xcm_config; use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use cumulus_primitives_core::AggregateMessageOrigin; use frame_support::unsigned::TransactionValidityError; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; @@ -181,16 +182,17 @@ impl frame_system::Config for Runtime { } parameter_types! { - // We do anything the parent chain tells us in this runtime. - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = (); - type DmpMessageHandler = cumulus_pallet_xcm::UnlimitedDmpExecution; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); @@ -205,6 +207,32 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // These need to be configured to the XCMP pallet - if it is deployed. + type QueueChangeHandler = (); + type QueuePausedQuery = (); + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} impl pallet_aura::Config for Runtime { @@ -219,7 +247,10 @@ impl pallet_aura::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ parachains_common::SLOT_DURATION / 2 }>; type WeightInfo = (); } @@ -234,8 +265,8 @@ construct_runtime! { }, ParachainInfo: parachain_info::{Pallet, Storage, Config}, - // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, Aura: pallet_aura::{Pallet, Storage, Config}, AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config}, diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 378d9ea4c42ce27d0541b2c5ef567801d2e32bd3..62bce02bd3581064259443769cea476d992fb686 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -24,14 +25,13 @@ sp-core = { path = "../../../../substrate/primitives/core", default-features = f # Cumulus cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../pallets/dmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../common", default-features = false } +parachain-info = {package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } assets-common = { path = "../assets/common", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder", default-features = false } -parachain-info = { path = "../../pallets/parachain-info", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} @@ -49,7 +49,7 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } default = [ "std" ] std = [ "assets-common/std", - "cumulus-pallet-dmp-queue/std", + "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 8289a80baa11a8542210b7b691765bed05a9a0ae..e2a6fb45aec33acb6e2e5d729674b9939ce682aa 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -24,11 +24,12 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use frame_support::{ dispatch::{DispatchResult, RawOrigin}, inherent::{InherentData, ProvideInherent}, + pallet_prelude::Get, traits::{OnFinalize, OnInitialize, OriginTrait, UnfilteredDispatchable}, weights::Weight, }; use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; -use parachains_common::{AccountId, SLOT_DURATION}; +use parachains_common::SLOT_DURATION; use polkadot_parachain_primitives::primitives::{ HeadData, HrmpChannelId, RelayChainBlockNumber, XcmpMessageFormat, }; @@ -49,7 +50,7 @@ pub type AccountIdOf = ::AccountId; pub type ValidatorIdOf = ::ValidatorId; pub type SessionKeysOf = ::Keys; -pub struct CollatorSessionKeys< +pub struct CollatorSessionKey< Runtime: frame_system::Config + pallet_balances::Config + pallet_session::Config, > { collator: AccountIdOf, @@ -57,8 +58,14 @@ pub struct CollatorSessionKeys< key: SessionKeysOf, } +pub struct CollatorSessionKeys< + Runtime: frame_system::Config + pallet_balances::Config + pallet_session::Config, +> { + items: Vec>, +} + impl - CollatorSessionKeys + CollatorSessionKey { pub fn new( collator: AccountIdOf, @@ -67,14 +74,43 @@ impl Self { Self { collator, validator, key } } +} + +impl Default + for CollatorSessionKeys +{ + fn default() -> Self { + Self { items: vec![] } + } +} + +impl + CollatorSessionKeys +{ + pub fn new( + collator: AccountIdOf, + validator: ValidatorIdOf, + key: SessionKeysOf, + ) -> Self { + Self { items: vec![CollatorSessionKey::new(collator, validator, key)] } + } + + pub fn add(mut self, item: CollatorSessionKey) -> Self { + self.items.push(item); + self + } + pub fn collators(&self) -> Vec> { - vec![self.collator.clone()] + self.items.iter().map(|item| item.collator.clone()).collect::>() } pub fn session_keys( &self, ) -> Vec<(AccountIdOf, ValidatorIdOf, SessionKeysOf)> { - vec![(self.collator.clone(), self.validator.clone(), self.key.clone())] + self.items + .iter() + .map(|item| (item.collator.clone(), item.validator.clone(), item.key.clone())) + .collect::>() } } @@ -225,7 +261,7 @@ where AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, { - pub fn run_to_block(n: u32, author: AccountId) -> HeaderFor { + pub fn run_to_block(n: u32, author: AccountIdOf) -> HeaderFor { let mut last_header = None; loop { let block_number = frame_system::Pallet::::block_number(); @@ -325,7 +361,7 @@ impl< } impl< - Runtime: cumulus_pallet_dmp_queue::Config + cumulus_pallet_parachain_system::Config, + Runtime: cumulus_pallet_parachain_system::Config + pallet_xcm::Config, AllPalletsWithoutSystem, > RuntimeHelper { @@ -342,7 +378,7 @@ impl< // execute xcm as parent origin let hash = xcm.using_encoded(sp_io::hashing::blake2_256); - <::XcmExecutor>::execute_xcm( + <::XcmExecutor>::execute_xcm( MultiLocation::parent(), xcm, hash, @@ -360,7 +396,6 @@ impl { pub fn xcm_max_weight(from: XcmReceivedFrom) -> Weight { - use frame_support::traits::Get; match from { XcmReceivedFrom::Parent => ParachainSystem::ReservedDmpWeight::get(), XcmReceivedFrom::Sibling => ParachainSystem::ReservedXcmpWeight::get(), @@ -375,16 +410,20 @@ impl) -> Option>>, assert_outcome: fn(Outcome), ) { - let outcome = >::events() + assert_outcome(Self::get_pallet_xcm_event_outcome(unwrap_pallet_xcm_event)); + } + + pub fn get_pallet_xcm_event_outcome( + unwrap_pallet_xcm_event: &Box) -> Option>>, + ) -> Outcome { + >::events() .into_iter() .filter_map(|e| unwrap_pallet_xcm_event(e.event.encode())) .find_map(|e| match e { pallet_xcm::Event::Attempted { outcome } => Some(outcome), _ => None, }) - .expect("No `pallet_xcm::Event::Attempted(outcome)` event found!"); - - assert_outcome(outcome); + .expect("No `pallet_xcm::Event::Attempted(outcome)` event found!") } } @@ -415,8 +454,8 @@ pub fn assert_metadata( expected_symbol: &str, expected_decimals: u8, ) where - Fungibles: frame_support::traits::tokens::fungibles::metadata::Inspect - + frame_support::traits::tokens::fungibles::Inspect, + Fungibles: frame_support::traits::fungibles::metadata::Inspect + + frame_support::traits::fungibles::Inspect, { assert_eq!(Fungibles::name(asset_id.into()), Vec::from(expected_name),); assert_eq!(Fungibles::symbol(asset_id.into()), Vec::from(expected_symbol),); @@ -428,8 +467,8 @@ pub fn assert_total( expected_total_issuance: impl Into, expected_active_issuance: impl Into, ) where - Fungibles: frame_support::traits::tokens::fungibles::metadata::Inspect - + frame_support::traits::tokens::fungibles::Inspect, + Fungibles: frame_support::traits::fungibles::metadata::Inspect + + frame_support::traits::fungibles::Inspect, { assert_eq!(Fungibles::total_issuance(asset_id.into()), expected_total_issuance.into()); assert_eq!(Fungibles::active_issuance(asset_id.into()), expected_active_issuance.into()); diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index 4ccb54496b3b1e303e3f4e4b80f4f572202e4dda..950d0498130e553f57b4a03173b8232ad0bfa061 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -37,7 +37,6 @@ pub fn change_storage_constant_by_governance_works: From>, StorageConstant: Get, diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 6c4f8c3895a884010a6018623e98dd3f667c586d..fb66275b025af8c782bd450793de8c47ae4154ed 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -7,7 +7,6 @@ license = "Unlicense" homepage = "https://substrate.io" repository.workspace = true edition.workspace = true -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,7 +18,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -65,6 +64,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} @@ -73,7 +73,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [features] @@ -102,6 +102,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", @@ -134,9 +135,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -147,6 +150,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -175,6 +179,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index fe0f19c306321e8c8f22d1225611ac6281f44340..1ddad31920a57955a8b1033dfc5665476cb77146 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -33,13 +33,16 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, pallet_prelude::Weight, parameter_types, - traits::{AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, Everything}, + traits::{ + AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin, + }, weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, FeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, @@ -50,6 +53,8 @@ use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, EnsureSigned, }; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -65,7 +70,7 @@ use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm_config::{AssetsToBlockAuthor, XcmConfig, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{AssetsToBlockAuthor, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -78,7 +83,6 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use parachains_common::{AccountId, Signature}; use xcm::latest::prelude::BodyId; -use xcm_executor::XcmExecutor; /// Balance of an account. pub type Balance = u128; @@ -397,6 +401,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -429,7 +434,7 @@ parameter_types! { // pub type AssetsForceOrigin = // EnsureOneOf, EnsureXcm>>; -impl pallet_assets::Config for Runtime { +impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = AssetId; @@ -455,13 +460,15 @@ impl pallet_assets::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type OutboundXcmpMessageSource = XcmpQueue; type XcmpMessageHandler = XcmpQueue; @@ -477,24 +484,40 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = (); -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type PriceForSiblingDelivery = NoPriceForMessageDelivery; } parameter_types! { @@ -554,7 +577,12 @@ impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, + pallet_assets::BalanceToAssetBalance< + Balances, + Runtime, + ConvertInto, + pallet_assets::Instance1, + >, AssetsToBlockAuthor, >; } @@ -593,28 +621,26 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // The main stage. - Assets: pallet_assets::{Pallet, Call, Storage, Event} = 50, + Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } ); -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - #[cfg(feature = "runtime-benchmarks")] mod benches { - define_benchmarks!( + frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_session, SessionBench::] [pallet_sudo, Sudo] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] ); } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index f2ffc451b10ef2f2b0f37983bb3fc633a020193f..74d9a0b071d8551aadf20a3c2d9f8f58611950ef 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -38,6 +38,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_asset_tx_payment::HandleCredit; +use pallet_assets::Instance1; use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; @@ -48,9 +49,10 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -126,6 +128,9 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when // recognized. SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, @@ -160,7 +165,7 @@ pub type Barrier = TrailingSetTopicAsId< // Allow XCMs with some computed origins to pass through. WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // System Assets parachain, parent and its exec plurality get free @@ -182,14 +187,25 @@ pub type Barrier = TrailingSetTopicAsId< /// Type alias to conveniently refer to `frame_system`'s `Config::AccountId`. pub type AccountIdOf = ::AccountId; -/// Asset filter that allows all assets from a certain location. +/// Asset filter that allows all assets from a certain location matching asset id. pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); &loc == origin && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } - if asset_loc.match_and_split(&loc).is_some()) + if asset_loc.starts_with(&loc)) + } +} + +/// Asset filter that allows native/relay asset if coming from a certain location. +pub struct NativeAssetFrom(PhantomData); +impl> ContainsPair for NativeAssetFrom { + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let loc = T::get(); + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + if *asset_loc == MultiLocation::from(Parent)) } } @@ -208,56 +224,19 @@ where /// A `HandleCredit` implementation that naively transfers the fees to the block author. /// Will drop and burn the assets in case the transfer fails. pub struct AssetsToBlockAuthor(PhantomData); -impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor +impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor where - R: pallet_authorship::Config + pallet_assets::Config, + R: pallet_authorship::Config + pallet_assets::Config, AccountIdOf: From + Into, { - fn handle_credit(credit: Credit, pallet_assets::Pallet>) { + fn handle_credit(credit: Credit, pallet_assets::Pallet>) { if let Some(author) = pallet_authorship::Pallet::::author() { // In case of error: Will drop the result triggering the `OnDrop` of the imbalance. - let _ = pallet_assets::Pallet::::resolve(&author, credit); + let _ = pallet_assets::Pallet::::resolve(&author, credit); } } } -pub trait Reserve { - /// Returns assets reserve location. - fn reserve(&self) -> Option; -} - -// Takes the chain part of a MultiAsset -impl Reserve for MultiAsset { - fn reserve(&self) -> Option { - if let AssetId::Concrete(location) = self.id { - let first_interior = location.first_interior(); - let parents = location.parent_count(); - match (parents, first_interior) { - (0, Some(Parachain(id))) => Some(MultiLocation::new(0, X1(Parachain(*id)))), - (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), - (1, _) => Some(MultiLocation::parent()), - _ => None, - } - } else { - None - } - } -} - -/// A `FilterAssetLocation` implementation. Filters multi native assets whose -/// reserve is same with `origin`. -pub struct MultiNativeAsset; -impl ContainsPair for MultiNativeAsset { - fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { - if let Some(ref reserve) = asset.reserve() { - if reserve == origin { - return true - } - } - false - } -} - parameter_types! { /// The location that this chain recognizes as the Relay network's Asset Hub. pub SystemAssetHubLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1000))); @@ -268,7 +247,8 @@ parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } -pub type Reserves = (NativeAsset, AssetsFrom); +pub type Reserves = + (NativeAsset, AssetsFrom, NativeAssetFrom); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -277,7 +257,8 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = MultiNativeAsset; // TODO: maybe needed to be replaced by Reserves + type IsReserve = Reserves; + // no teleport trust established with other chains type IsTeleporter = NativeAsset; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -312,11 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -342,8 +318,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 029d5d10f98684d20f8917af3d90a4df29f3410e..08fa8b691274b0dcaf4aebbacb1212f1749f92e5 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" -publish = false +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} @@ -42,18 +42,21 @@ polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", de xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -68,6 +71,7 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-ping/std", + "cumulus-primitives-aura/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", @@ -78,6 +82,7 @@ std = [ "pallet-assets/std", "pallet-aura/std", "pallet-balances/std", + "pallet-message-queue/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -86,6 +91,7 @@ std = [ "parachain-info/std", "parachains-common/std", "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", @@ -105,19 +111,23 @@ std = [ "xcm/std", ] runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 50c5a445c25fe0c2c0e09b4979539e9a5f0146c0..965fb0d6adc58fac17aa82c7f8cf7b8a26070913 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -22,12 +22,13 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::OpaqueMetadata; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Hash as HashT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -65,8 +66,11 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::traits::TransformOrigin; use parachains_common::{ impls::{AssetsFrom, NonZeroIssuance}, + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, AccountId, AssetIdForTrustBackedAssets, Signature, }; use xcm_builder::{ @@ -109,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 0, }; -pub const MILLISECS_PER_BLOCK: u64 = 12000; +pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; @@ -139,18 +143,18 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. +/// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; +const BLOCK_PROCESSING_VELOCITY: u32 = 2; /// Relay chain slot duration, in milliseconds. const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -246,6 +250,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -269,28 +274,53 @@ impl pallet_sudo::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; } impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { @@ -464,11 +494,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -490,8 +515,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -504,20 +527,15 @@ impl cumulus_pallet_xcm::Config for Runtime { impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; type ChannelInfo = ParachainSystem; type VersionWrapper = (); - type ExecuteOverweightOrigin = EnsureRoot; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = cumulus_pallet_xcmp_queue::weights::SubstrateWeight; - type PriceForSiblingDelivery = (); -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = frame_system::EnsureRoot; + type PriceForSiblingDelivery = NoPriceForMessageDelivery; } impl cumulus_ping::Config for Runtime { @@ -568,9 +586,9 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; + type AllowMultipleBlocksPerSlot = ConstBool; #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + type SlotDuration = ConstU64; } construct_runtime! { @@ -596,7 +614,8 @@ construct_runtime! { XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 50, PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 51, CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event, Origin} = 52, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 53, + // RIP DmpQueue 53 + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 54, Spambot: cumulus_ping::{Pallet, Call, Storage, Event} = 99, } @@ -607,7 +626,7 @@ pub type Balance = u128; /// Index of a transaction in the chain. pub type Nonce = u32; /// A hash of some data used by the chain. -pub type Hash = sp_core::H256; +pub type Hash = ::Output; /// An index to a block. pub type BlockNumber = u32; /// The address format for describing accounts. @@ -734,7 +753,7 @@ impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) } fn authorities() -> Vec { @@ -807,6 +826,15 @@ impl_runtime_apis! { build_config::(config) } } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/testnets-common/Cargo.toml b/cumulus/parachains/testnets-common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e39cf91d3abf97cf35d00acd59447475e559ea20 --- /dev/null +++ b/cumulus/parachains/testnets-common/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "testnets-common" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +description = "Logic and configuration specific to testnet parachain runtimes" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +smallvec = "1.11.0" + +# Substrate +frame-support = { path = "../../../substrate/frame/support", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } + +# Polkadot +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} + +# Cumulus + +[dev-dependencies] + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +std = [ + "frame-support/std", + "polkadot-core-primitives/std", + "rococo-runtime-constants/std", + "sp-runtime/std", + "westend-runtime-constants/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/parachains/testnets-common/src/lib.rs b/cumulus/parachains/testnets-common/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..42d367bff27d64e05a9da4a2fa3eaf8e7f14cbb9 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +/// Since the parachains-common package is now published to crates.io, SP runtimes for testnets +/// will be adapted to use this package, and their config removed from the published common +/// package. Only the configs specific to rococo, westend and wococo will be moved here, and the +/// truly common logic will still be sourced from the parachains-common package. +/// +/// In practice this just means that instead of using e.g. `[parachains_common::westend::*]`, now +/// the westend configs will be in `[testnets_common::westend::*]`. +/// +/// TODO: edit all runtimes to remove the testnet configs as part of PR #1737 +/// +pub mod rococo; +pub mod westend; +pub mod wococo; diff --git a/cumulus/parachains/testnets-common/src/rococo.rs b/cumulus/parachains/testnets-common/src/rococo.rs new file mode 100644 index 0000000000000000000000000000000000000000..6e31def4b55b923f1596793e6cb114163551c017 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/rococo.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod currency { + use polkadot_core_primitives::Balance; + use rococo_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain (v9010). + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // map to 1/100 of what the rococo relay chain charges + constants::currency::deposit(items, bytes) / 100 + } +} + +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updates based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - `[0, MAXIMUM_BLOCK_WEIGHT]` + /// - `[Balance::min, Balance::max]` + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Rococo, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/westend.rs b/cumulus/parachains/testnets-common/src/westend.rs new file mode 100644 index 0000000000000000000000000000000000000000..0ae21e234549cce2e73a097836df85740993911f --- /dev/null +++ b/cumulus/parachains/testnets-common/src/westend.rs @@ -0,0 +1,140 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Universally recognized accounts. +pub mod account { + use frame_support::PalletId; + + /// Westend treasury pallet id, used to convert into AccountId - in Westend as a destination for + /// slashed funds. + pub const WESTEND_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/trsry"); + /// Alliance pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const ALLIANCE_PALLET_ID: PalletId = PalletId(*b"py/allia"); + /// Referenda pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); + /// Ambassador Referenda pallet ID - used as a temporary place to deposit a slashed imbalance + /// before the teleport to the Treasury. + pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); +} + +pub mod currency { + use polkadot_core_primitives::Balance; + use westend_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain. + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const DOLLARS: Balance = UNITS; // 1_000_000_000_000 + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + pub const GRAND: Balance = constants::currency::GRAND; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // 1/100 of Westend testnet + constants::currency::deposit(items, bytes) / 100 + } +} + +/// Fee-related. +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updated based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [Balance::min, Balance::max] + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the + /// relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the number of + /// blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/wococo.rs b/cumulus/parachains/testnets-common/src/wococo.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cd6121135a3321369fee2132b696aef8a2bae81 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/wococo.rs @@ -0,0 +1,17 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// re-export rococo +pub use crate::rococo::{consensus, currency, fee}; diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 778b056b89d15b79d4da045e94c660f7960ba102..d5deda9e7bfdfb9f842fee24f4b4a46641f0b9d9 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -5,6 +5,7 @@ authors.workspace = true build = "build.rs" edition.workspace = true description = "Runs a polkadot parachain node which could be a collator." +license = "Apache-2.0" [[bin]] name = "polkadot-parachain" @@ -18,21 +19,25 @@ futures = "0.3.28" hex-literal = "0.4.1" log = "0.4.20" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } shell-runtime = { path = "../parachains/runtimes/starters/shell" } +glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } glutton-runtime = { path = "../parachains/runtimes/glutton/glutton-kusama" } seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } asset-hub-polkadot-runtime = { path = "../parachains/runtimes/assets/asset-hub-polkadot" } asset-hub-kusama-runtime = { path = "../parachains/runtimes/assets/asset-hub-kusama" } +asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } collectives-polkadot-runtime = { path = "../parachains/runtimes/collectives/collectives-polkadot" } +collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-kusama-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-kusama" } bridge-hub-polkadot-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-polkadot" } +bridge-hub-westend-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-westend" } penpal-runtime = { path = "../parachains/runtimes/testing/penpal" } jsonrpsee = { version = "0.16.2", features = ["server"] } parachains-common = { path = "../parachains/common" } @@ -45,6 +50,7 @@ sp-io = { path = "../../substrate/primitives/io" } sp-core = { path = "../../substrate/primitives/core" } sp-session = { path = "../../substrate/primitives/session" } sc-consensus = { path = "../../substrate/client/consensus/common" } +sp-tracing = { path = "../../substrate/primitives/tracing" } sc-cli = { path = "../../substrate/client/cli" } sc-client-api = { path = "../../substrate/client/api" } sc-executor = { path = "../../substrate/client/executor" } @@ -108,15 +114,20 @@ default = [] runtime-benchmarks = [ "asset-hub-kusama-runtime/runtime-benchmarks", "asset-hub-polkadot-runtime/runtime-benchmarks", + "asset-hub-rococo-runtime/runtime-benchmarks", "asset-hub-westend-runtime/runtime-benchmarks", "bridge-hub-kusama-runtime/runtime-benchmarks", "bridge-hub-polkadot-runtime/runtime-benchmarks", "bridge-hub-rococo-runtime/runtime-benchmarks", + "bridge-hub-westend-runtime/runtime-benchmarks", "collectives-polkadot-runtime/runtime-benchmarks", + "collectives-westend-runtime/runtime-benchmarks", "contracts-rococo-runtime/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "glutton-runtime/runtime-benchmarks", + "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", @@ -129,13 +140,17 @@ runtime-benchmarks = [ try-runtime = [ "asset-hub-kusama-runtime/try-runtime", "asset-hub-polkadot-runtime/try-runtime", + "asset-hub-rococo-runtime/try-runtime", "asset-hub-westend-runtime/try-runtime", "bridge-hub-kusama-runtime/try-runtime", "bridge-hub-polkadot-runtime/try-runtime", "bridge-hub-rococo-runtime/try-runtime", + "bridge-hub-westend-runtime/try-runtime", "collectives-polkadot-runtime/try-runtime", + "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", "glutton-runtime/try-runtime", + "glutton-westend-runtime/try-runtime", "penpal-runtime/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", diff --git a/cumulus/polkadot-parachain/chain-specs/asset-hub-rococo.json b/cumulus/polkadot-parachain/chain-specs/asset-hub-rococo.json new file mode 120000 index 0000000000000000000000000000000000000000..fca916ab5502a179e3210831109cca53856f382a --- /dev/null +++ b/cumulus/polkadot-parachain/chain-specs/asset-hub-rococo.json @@ -0,0 +1 @@ +../../parachains/chain-specs/asset-hub-rococo.json \ No newline at end of file diff --git a/cumulus/polkadot-parachain/chain-specs/asset-hub-wococo.json b/cumulus/polkadot-parachain/chain-specs/asset-hub-wococo.json new file mode 120000 index 0000000000000000000000000000000000000000..30a63a9fc786bd542e38c2dd2cf769dc4ebde3bf --- /dev/null +++ b/cumulus/polkadot-parachain/chain-specs/asset-hub-wococo.json @@ -0,0 +1 @@ +../../parachains/chain-specs/asset-hub-wococo.json \ No newline at end of file diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index c1edeb98cd0aa3b50e1c8efbe47085144f70900e..a8d3d2975adab567678802cbc7d3c070b8b132b1 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -30,6 +30,8 @@ pub type AssetHubKusamaChainSpec = sc_service::GenericChainSpec; pub type AssetHubWestendChainSpec = sc_service::GenericChainSpec; +pub type AssetHubRococoChainSpec = + sc_service::GenericChainSpec; const ASSET_HUB_POLKADOT_ED: AssetHubBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; @@ -37,6 +39,8 @@ const ASSET_HUB_KUSAMA_ED: AssetHubBalance = parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_WESTEND_ED: AssetHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; +const ASSET_HUB_ROCOCO_ED: AssetHubBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// @@ -54,6 +58,13 @@ pub fn asset_hub_kusama_session_keys(keys: AuraId) -> asset_hub_kusama_runtime:: asset_hub_kusama_runtime::SessionKeys { aura: keys } } +/// Generate the session keys from individual elements. +/// +/// The input must be a tuple of individual keys (a single arg for now since we have just one key). +pub fn asset_hub_rococo_session_keys(keys: AuraId) -> asset_hub_rococo_runtime::SessionKeys { + asset_hub_rococo_runtime::SessionKeys { aura: keys } +} + /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). @@ -67,35 +78,30 @@ pub fn asset_hub_polkadot_development_config() -> AssetHubPolkadotChainSpec { properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::from_genesis( - // Name - "Polkadot Asset Hub Development", - // ID - "asset-hub-polkadot-dev", - ChainType::Local, - move || { - asset_hub_polkadot_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubPolkadotChainSpec::builder( + asset_hub_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-dev".into(), para_id: 1000 }, ) + .with_name("Polkadot Asset Hub Development") + .with_id("asset-hub-polkadot-dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_polkadot_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + 1000.into(), + )) + .with_properties(properties) + .build() } pub fn asset_hub_polkadot_local_config() -> AssetHubPolkadotChainSpec { @@ -104,49 +110,45 @@ pub fn asset_hub_polkadot_local_config() -> AssetHubPolkadotChainSpec { properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::from_genesis( - // Name - "Polkadot Asset Hub Local", - // ID - "asset-hub-polkadot-local", - ChainType::Local, - move || { - asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubPolkadotChainSpec::builder( + asset_hub_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-local".into(), para_id: 1000 }, ) + .with_name("Polkadot Asset Hub Local") + .with_id("asset-hub-polkadot-local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_polkadot_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1000.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() } // Not used for syncing, but just to determine the genesis values set for the upgrade from shell. @@ -156,89 +158,81 @@ pub fn asset_hub_polkadot_config() -> AssetHubPolkadotChainSpec { properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::from_genesis( - // Name - "Polkadot Asset Hub", - // ID - "asset-hub-polkadot", - ChainType::Live, - move || { - asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421") - .into(), - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421") - .unchecked_into(), - ), - ( - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811") - .into(), - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811") - .unchecked_into(), - ), - ( - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762") - .into(), - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762") - .unchecked_into(), - ), - ( - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3") - .into(), - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3") - .unchecked_into(), - ), - ], - vec![], - 1000u32.into(), - ) - }, - vec![ - "/ip4/34.65.251.121/tcp/30334/p2p/12D3KooWG3GrM6XKMM4gp3cvemdwUvu96ziYoJmqmetLZBXE8bSa".parse().unwrap(), - "/ip4/34.65.35.228/tcp/30334/p2p/12D3KooWMRyTLrCEPcAQD6c4EnudL3vVzg9zji3whvsMYPUYevpq".parse().unwrap(), - "/ip4/34.83.247.146/tcp/30334/p2p/12D3KooWE4jFh5FpJDkWVZhnWtFnbSqRhdjvC7Dp9b8b3FTuubQC".parse().unwrap(), - "/ip4/104.199.117.230/tcp/30334/p2p/12D3KooWG9R8pVXKumVo2rdkeVD4j5PVhRTqmYgLHY3a4yPYgLqM".parse().unwrap(), - ], - None, - None, - None, - Some(properties), + AssetHubPolkadotChainSpec::builder( + asset_hub_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot".into(), para_id: 1000 }, ) + .with_name("Polkadot Asset Hub") + .with_id("asset-hub-polkadot") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(asset_hub_polkadot_genesis( + // initial collators. + vec![ + ( + hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421").into(), + hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421") + .unchecked_into(), + ), + ( + hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811").into(), + hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811") + .unchecked_into(), + ), + ( + hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762").into(), + hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762") + .unchecked_into(), + ), + ( + hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3").into(), + hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3") + .unchecked_into(), + ), + ], + vec![], + 1000u32.into(), + )) + .with_boot_nodes(vec![ + "/ip4/34.65.251.121/tcp/30334/p2p/12D3KooWG3GrM6XKMM4gp3cvemdwUvu96ziYoJmqmetLZBXE8bSa" + .parse() + .unwrap(), + "/ip4/34.65.35.228/tcp/30334/p2p/12D3KooWMRyTLrCEPcAQD6c4EnudL3vVzg9zji3whvsMYPUYevpq" + .parse() + .unwrap(), + "/ip4/34.83.247.146/tcp/30334/p2p/12D3KooWE4jFh5FpJDkWVZhnWtFnbSqRhdjvC7Dp9b8b3FTuubQC" + .parse() + .unwrap(), + "/ip4/104.199.117.230/tcp/30334/p2p/12D3KooWG9R8pVXKumVo2rdkeVD4j5PVhRTqmYgLHY3a4yPYgLqM" + .parse() + .unwrap(), + ]) + .with_properties(properties) + .build() } fn asset_hub_polkadot_genesis( invulnerables: Vec<(AccountId, AssetHubPolkadotAuraId)>, endowed_accounts: Vec, id: ParaId, -) -> asset_hub_polkadot_runtime::RuntimeGenesisConfig { - asset_hub_polkadot_runtime::RuntimeGenesisConfig { - system: asset_hub_polkadot_runtime::SystemConfig { - code: asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: asset_hub_polkadot_runtime::BalancesConfig { - balances: endowed_accounts +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, ASSET_HUB_POLKADOT_ED * 4096)) - .collect(), + .collect::>(), }, - parachain_info: asset_hub_polkadot_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: asset_hub_polkadot_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: ASSET_HUB_POLKADOT_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": ASSET_HUB_POLKADOT_ED * 16, }, - session: asset_hub_polkadot_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -247,18 +241,12 @@ fn asset_hub_polkadot_genesis( asset_hub_polkadot_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: asset_hub_polkadot_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - } + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + } + }) } pub fn asset_hub_kusama_development_config() -> AssetHubKusamaChainSpec { @@ -267,35 +255,29 @@ pub fn asset_hub_kusama_development_config() -> AssetHubKusamaChainSpec { properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::from_genesis( - // Name - "Kusama Asset Hub Development", - // ID - "asset-hub-kusama-dev", - ChainType::Local, - move || { - asset_hub_kusama_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubKusamaChainSpec::builder( + asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-dev".into(), para_id: 1000 }, ) + .with_name("Kusama Asset Hub Development") + .with_id("asset-hub-kusama-dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_kusama_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + 1000.into(), + )) + .with_properties(properties) + .build() } pub fn asset_hub_kusama_local_config() -> AssetHubKusamaChainSpec { @@ -304,49 +286,43 @@ pub fn asset_hub_kusama_local_config() -> AssetHubKusamaChainSpec { properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::from_genesis( - // Name - "Kusama Asset Hub Local", - // ID - "asset-hub-kusama-local", - ChainType::Local, - move || { - asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubKusamaChainSpec::builder( + asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-local".into(), para_id: 1000 }, ) + .with_name("Kusama Asset Hub Local") + .with_id("asset-hub-kusama-local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_kusama_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1000.into(), + )) + .with_properties(properties) + .build() } pub fn asset_hub_kusama_config() -> AssetHubKusamaChainSpec { @@ -355,84 +331,66 @@ pub fn asset_hub_kusama_config() -> AssetHubKusamaChainSpec { properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::from_genesis( - // Name - "Kusama Asset Hub", - // ID - "asset-hub-kusama", - ChainType::Live, - move || { - asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730") - .into(), - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730") - .unchecked_into(), - ), - ( - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a") - .into(), - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a") - .unchecked_into(), - ), - ( - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a") - .into(), - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a") - .unchecked_into(), - ), - ( - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415") - .into(), - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415") - .unchecked_into(), - ), - ], - Vec::new(), - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubKusamaChainSpec::builder( + asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama".into(), para_id: 1000 }, ) + .with_name("Kusama Asset Hub") + .with_id("asset-hub-kusama") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(asset_hub_kusama_genesis( + // initial collators. + vec![ + ( + hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730").into(), + hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730") + .unchecked_into(), + ), + ( + hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a").into(), + hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a") + .unchecked_into(), + ), + ( + hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a").into(), + hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a") + .unchecked_into(), + ), + ( + hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415").into(), + hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415") + .unchecked_into(), + ), + ], + Vec::new(), + 1000.into(), + )) + .with_properties(properties) + .build() } fn asset_hub_kusama_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, -) -> asset_hub_kusama_runtime::RuntimeGenesisConfig { - asset_hub_kusama_runtime::RuntimeGenesisConfig { - system: asset_hub_kusama_runtime::SystemConfig { - code: asset_hub_kusama_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: asset_hub_kusama_runtime::BalancesConfig { - balances: endowed_accounts +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, ASSET_HUB_KUSAMA_ED * 524_288)) - .collect(), + .collect::>(), }, - parachain_info: asset_hub_kusama_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: asset_hub_kusama_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: ASSET_HUB_KUSAMA_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": ASSET_HUB_KUSAMA_ED * 16, }, - session: asset_hub_kusama_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -441,16 +399,12 @@ fn asset_hub_kusama_genesis( asset_hub_kusama_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), }, - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: asset_hub_kusama_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - } + }) } pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { @@ -458,35 +412,31 @@ pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::from_genesis( - // Name - "Westend Asset Hub Development", - // ID - "asset-hub-westend-dev", - ChainType::Local, - move || { - asset_hub_westend_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubWestendChainSpec::builder( + asset_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, ) + .with_name("Westend Asset Hub Development") + .with_id("asset-hub-westend-dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_westend_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + parachains_common::westend::currency::UNITS * 1_000_000, + 1000.into(), + )) + .with_properties(properties) + .build() } pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { @@ -494,49 +444,45 @@ pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::from_genesis( - // Name - "Westend Asset Hub Local", - // ID - "asset-hub-westend-local", - ChainType::Local, - move || { - asset_hub_westend_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubWestendChainSpec::builder( + asset_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-local".into(), para_id: 1000 }, ) + .with_name("Westend Asset Hub Local") + .with_id("asset-hub-westend-local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_westend_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + parachains_common::westend::currency::UNITS * 1_000_000, + 1000.into(), + )) + .with_properties(properties) + .build() } pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { @@ -544,102 +490,275 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::from_genesis( - // Name - "Westend Asset Hub", - // ID - "asset-hub-westend", - ChainType::Live, - move || { - asset_hub_westend_genesis( - // initial collators. - vec![ - ( - hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325") - .into(), - hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325") - .unchecked_into(), - ), - ( - hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876") - .into(), - hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876") - .unchecked_into(), - ), - ( - hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f") - .into(), - hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f") - .unchecked_into(), - ), - ( - hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322") - .into(), - hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322") - .unchecked_into(), - ), - ], - Vec::new(), - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + AssetHubWestendChainSpec::builder( + asset_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, ) + .with_name("Westend Asset Hub") + .with_id("asset-hub-westend") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(asset_hub_westend_genesis( + // initial collators. + vec![ + ( + hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325").into(), + hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325") + .unchecked_into(), + ), + ( + hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876").into(), + hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876") + .unchecked_into(), + ), + ( + hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f").into(), + hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f") + .unchecked_into(), + ), + ( + hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322").into(), + hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322") + .unchecked_into(), + ), + ], + Vec::new(), + ASSET_HUB_WESTEND_ED * 4096, + 1000.into(), + )) + .with_properties(properties) + .build() } fn asset_hub_westend_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, -) -> asset_hub_westend_runtime::RuntimeGenesisConfig { - asset_hub_westend_runtime::RuntimeGenesisConfig { - system: asset_hub_westend_runtime::SystemConfig { - code: asset_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts + .iter() + .cloned() + .map(|k| (k, endowment)) + .collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": ASSET_HUB_WESTEND_ED * 16, }, - balances: asset_hub_westend_runtime::BalancesConfig { + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + asset_hub_westend_session_keys(aura), // session keys + ) + }) + .collect::>(), + }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + }) +} + +pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "ROC".into()); + properties.insert("tokenDecimals".into(), 12.into()); + asset_hub_rococo_like_development_config( + properties, + "Rococo Asset Hub Development", + "asset-hub-rococo-dev", + 1000, + ) +} + +fn asset_hub_rococo_like_development_config( + properties: sc_chain_spec::Properties, + name: &str, + chain_id: &str, + para_id: u32, +) -> AssetHubRococoChainSpec { + AssetHubRococoChainSpec::builder( + asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "rococo-dev".into(), para_id }, + ) + .with_name(name) + .with_id(chain_id) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_rococo_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + parachains_common::rococo::currency::UNITS * 1_000_000, + para_id.into(), + )) + .with_properties(properties) + .build() +} + +pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "ROC".into()); + properties.insert("tokenDecimals".into(), 12.into()); + asset_hub_rococo_like_local_config( + properties, + "Rococo Asset Hub Local", + "asset-hub-rococo-local", + 1000, + ) +} + +fn asset_hub_rococo_like_local_config( + properties: sc_chain_spec::Properties, + name: &str, + chain_id: &str, + para_id: u32, +) -> AssetHubRococoChainSpec { + AssetHubRococoChainSpec::builder( + asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "rococo-local".into(), para_id }, + ) + .with_name(name) + .with_id(chain_id) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(asset_hub_rococo_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + parachains_common::rococo::currency::UNITS * 1_000_000, + para_id.into(), + )) + .with_properties(properties) + .build() +} + +pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("tokenSymbol".into(), "ROC".into()); + properties.insert("tokenDecimals".into(), 12.into()); + let para_id = 1000; + AssetHubRococoChainSpec::builder( + asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "rococo".into(), para_id }, + ) + .with_name("Rococo Asset Hub") + .with_id("asset-hub-rococo") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(asset_hub_rococo_genesis( + // initial collators. + vec![ + // E8XC6rTJRsioKCp6KMy6zd24ykj4gWsusZ3AkSeyavpVBAG + ( + hex!("44cb62d1d6cdd2fff2a5ef3bb7ef827be5b3e117a394ecaa634d8dd9809d5608").into(), + hex!("44cb62d1d6cdd2fff2a5ef3bb7ef827be5b3e117a394ecaa634d8dd9809d5608") + .unchecked_into(), + ), + // G28iWEybndgGRbhfx83t7Q42YhMPByHpyqWDUgeyoGF94ri + ( + hex!("9864b85e23aa4506643db9879c3dbbeabaa94d269693a4447f537dd6b5893944").into(), + hex!("9864b85e23aa4506643db9879c3dbbeabaa94d269693a4447f537dd6b5893944") + .unchecked_into(), + ), + // G839e2eMiq7UXbConsY6DS1XDAYG2XnQxAmLuRLGGQ3Px9c + ( + hex!("9ce5741ee2f1ac3bdedbde9f3339048f4da2cb88ddf33a0977fa0b4cf86e2948").into(), + hex!("9ce5741ee2f1ac3bdedbde9f3339048f4da2cb88ddf33a0977fa0b4cf86e2948") + .unchecked_into(), + ), + // GLao4ukFUW6qhexuZowdFrKa2NLCfnEjZMftSXXfvGv1vvt + ( + hex!("a676ed15f5a325eab49ed8d5f8c00f3f814b19bb58cda14ad10894c078dd337f").into(), + hex!("a676ed15f5a325eab49ed8d5f8c00f3f814b19bb58cda14ad10894c078dd337f") + .unchecked_into(), + ), + ], + Vec::new(), + ASSET_HUB_ROCOCO_ED * 524_288, + para_id.into(), + )) + .with_properties(properties) + .build() +} + +fn asset_hub_rococo_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + endowment: AssetHubBalance, + id: ParaId, +) -> serde_json::Value { + serde_json::json!({ + "balances": asset_hub_rococo_runtime::BalancesConfig { balances: endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_WESTEND_ED * 4096)) + .map(|k| (k, endowment)) .collect(), }, - parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { + "parachainInfo": asset_hub_rococo_runtime::ParachainInfoConfig { parachain_id: id, ..Default::default() }, - collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { + "collatorSelection": asset_hub_rococo_runtime::CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: ASSET_HUB_WESTEND_ED * 16, + candidacy_bond: ASSET_HUB_ROCOCO_ED * 16, ..Default::default() }, - session: asset_hub_westend_runtime::SessionConfig { + "session": asset_hub_rococo_runtime::SessionConfig { keys: invulnerables .into_iter() .map(|(acc, aura)| { ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_session_keys(aura), // session keys + acc.clone(), // account id + acc, // validator id + asset_hub_rococo_session_keys(aura), // session keys ) }) .collect(), }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { + "polkadotXcm": asset_hub_rococo_runtime::PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() - }, - } + } + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 5de4a49f8275122c3c86fbf18409068a653ce054..94cef106001edd3377ed9716c27bb0f1300e20dd 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -29,9 +29,6 @@ pub enum BridgeHubRuntimeType { // used by benchmarks RococoDevelopment, - Wococo, - WococoLocal, - Kusama, KusamaLocal, // used by benchmarks @@ -42,8 +39,10 @@ pub enum BridgeHubRuntimeType { // used by benchmarks PolkadotDevelopment, - // used with kusama runtime Westend, + WestendLocal, + // used by benchmarks + WestendDevelopment, } impl FromStr for BridgeHubRuntimeType { @@ -59,11 +58,11 @@ impl FromStr for BridgeHubRuntimeType { kusama::BRIDGE_HUB_KUSAMA_LOCAL => Ok(BridgeHubRuntimeType::KusamaLocal), kusama::BRIDGE_HUB_KUSAMA_DEVELOPMENT => Ok(BridgeHubRuntimeType::KusamaDevelopment), westend::BRIDGE_HUB_WESTEND => Ok(BridgeHubRuntimeType::Westend), + westend::BRIDGE_HUB_WESTEND_LOCAL => Ok(BridgeHubRuntimeType::WestendLocal), + westend::BRIDGE_HUB_WESTEND_DEVELOPMENT => Ok(BridgeHubRuntimeType::WestendDevelopment), rococo::BRIDGE_HUB_ROCOCO => Ok(BridgeHubRuntimeType::Rococo), rococo::BRIDGE_HUB_ROCOCO_LOCAL => Ok(BridgeHubRuntimeType::RococoLocal), rococo::BRIDGE_HUB_ROCOCO_DEVELOPMENT => Ok(BridgeHubRuntimeType::RococoDevelopment), - wococo::BRIDGE_HUB_WOCOCO => Ok(BridgeHubRuntimeType::Wococo), - wococo::BRIDGE_HUB_WOCOCO_LOCAL => Ok(BridgeHubRuntimeType::WococoLocal), _ => Err(format!("Value '{}' is not configured yet", value)), } } @@ -82,14 +81,14 @@ impl BridgeHubRuntimeType { BridgeHubRuntimeType::KusamaLocal | BridgeHubRuntimeType::KusamaDevelopment => Ok(Box::new(kusama::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Westend => + BridgeHubRuntimeType::Westend | + BridgeHubRuntimeType::WestendLocal | + BridgeHubRuntimeType::WestendDevelopment => Ok(Box::new(westend::BridgeHubChainSpec::from_json_file(path)?)), BridgeHubRuntimeType::Rococo | BridgeHubRuntimeType::RococoLocal | BridgeHubRuntimeType::RococoDevelopment => Ok(Box::new(rococo::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Wococo | BridgeHubRuntimeType::WococoLocal => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_file(path)?)), } } @@ -131,6 +130,20 @@ impl BridgeHubRuntimeType { Ok(Box::new(westend::BridgeHubChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], )?)), + BridgeHubRuntimeType::WestendLocal => Ok(Box::new(westend::local_config( + westend::BRIDGE_HUB_WESTEND_LOCAL, + "Westend BridgeHub Local", + "westend-local", + ParaId::new(1002), + Some("Bob".to_string()), + ))), + BridgeHubRuntimeType::WestendDevelopment => Ok(Box::new(westend::local_config( + westend::BRIDGE_HUB_WESTEND_DEVELOPMENT, + "Westend BridgeHub Development", + "westend-dev", + ParaId::new(1002), + Some("Bob".to_string()), + ))), BridgeHubRuntimeType::Rococo => Ok(Box::new(rococo::BridgeHubChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], @@ -151,17 +164,6 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), - BridgeHubRuntimeType::Wococo => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-wococo.json")[..], - )?)), - BridgeHubRuntimeType::WococoLocal => Ok(Box::new(wococo::local_config( - wococo::BRIDGE_HUB_WOCOCO_LOCAL, - "Wococo BridgeHub Local", - "wococo-local", - ParaId::new(1014), - Some("Bob".to_string()), - ))), } } } @@ -195,8 +197,7 @@ pub mod rococo { parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = - sc_service::GenericChainSpec; + pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub type RuntimeApi = bridge_hub_rococo_runtime::RuntimeApi; @@ -215,52 +216,47 @@ pub mod rococo { properties.insert("tokenDecimals".into(), 12.into()); modify_props(&mut properties); - BridgeHubChainSpec::from_genesis( - // Name - chain_name, - // ID - super::ensure_id(id).expect("invalid id"), - ChainType::Local, - move || { - genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - bridges_pallet_owner_seed - .as_ref() - .map(|seed| get_account_id_from_seed::(seed)), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + BridgeHubChainSpec::builder( + bridge_hub_rococo_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, ) + .with_name(chain_name) + .with_id(super::ensure_id(id).expect("invalid id")) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + para_id, + bridges_pallet_owner_seed + .as_ref() + .map(|seed| get_account_id_from_seed::(seed)), + )) + .with_properties(properties) + .build() } fn genesis( @@ -268,28 +264,20 @@ pub mod rococo { endowed_accounts: Vec, id: ParaId, bridges_pallet_owner: Option, - ) -> bridge_hub_rococo_runtime::RuntimeGenesisConfig { - bridge_hub_rococo_runtime::RuntimeGenesisConfig { - system: bridge_hub_rococo_runtime::SystemConfig { - code: bridge_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: bridge_hub_rococo_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), }, - parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: bridge_hub_rococo_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: BRIDGE_HUB_ROCOCO_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": BRIDGE_HUB_ROCOCO_ED * 16, }, - session: bridge_hub_rococo_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -298,63 +286,18 @@ pub mod rococo { bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys ) }) - .collect(), - }, - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: bridge_hub_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + .collect::>(), }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { - owner: bridges_pallet_owner.clone(), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { - owner: bridges_pallet_owner.clone(), - ..Default::default() + "bridgeWestendGrandpa": { + "owner": bridges_pallet_owner.clone(), }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: bridges_pallet_owner.clone(), - ..Default::default() + "bridgeWestendMessages": { + "owner": bridges_pallet_owner.clone(), }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { - owner: bridges_pallet_owner, - ..Default::default() - }, - } - } -} - -/// Sub-module for Wococo setup (reuses stuff from Rococo) -pub mod wococo { - use super::ParaId; - use crate::chain_spec::bridge_hubs::rococo; - - pub(crate) const BRIDGE_HUB_WOCOCO: &str = "bridge-hub-wococo"; - pub(crate) const BRIDGE_HUB_WOCOCO_LOCAL: &str = "bridge-hub-wococo-local"; - - pub type BridgeHubChainSpec = rococo::BridgeHubChainSpec; - pub type RuntimeApi = rococo::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { - rococo::local_config( - id, - chain_name, - relay_chain, - para_id, - bridges_pallet_owner_seed, - |properties| { - properties.insert("tokenSymbol".into(), "WOOK".into()); - }, - ) + }) } } @@ -375,8 +318,7 @@ pub mod kusama { parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = - sc_service::GenericChainSpec; + pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub type RuntimeApi = bridge_hub_kusama_runtime::RuntimeApi; pub fn local_config( @@ -390,81 +332,68 @@ pub mod kusama { properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - BridgeHubChainSpec::from_genesis( - // Name - chain_name, - // ID - super::ensure_id(id).expect("invalid id"), - ChainType::Local, - move || { - genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + BridgeHubChainSpec::builder( + bridge_hub_kusama_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, ) + .with_name(chain_name) + .with_id(super::ensure_id(id).expect("invalid id")) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + para_id, + )) + .with_properties(properties) + .build() } fn genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, - ) -> bridge_hub_kusama_runtime::RuntimeGenesisConfig { - bridge_hub_kusama_runtime::RuntimeGenesisConfig { - system: bridge_hub_kusama_runtime::SystemConfig { - code: bridge_hub_kusama_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: bridge_hub_kusama_runtime::BalancesConfig { - balances: endowed_accounts + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, BRIDGE_HUB_KUSAMA_ED * 524_288)) - .collect(), + .collect::>(), }, - parachain_info: bridge_hub_kusama_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: bridge_hub_kusama_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: BRIDGE_HUB_KUSAMA_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": BRIDGE_HUB_KUSAMA_ED * 16, }, - session: bridge_hub_kusama_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -473,26 +402,129 @@ pub mod kusama { bridge_hub_kusama_runtime::SessionKeys { aura }, // session keys ) }) - .collect(), - }, - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: bridge_hub_kusama_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + .collect::>(), }, - } + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + } + }) } } -/// Sub-module for Westend setup (uses Kusama runtime) +/// Sub-module for Westend setup. pub mod westend { - use crate::chain_spec::bridge_hubs::kusama; + use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; + use crate::chain_spec::{Extensions, SAFE_XCM_VERSION}; + use parachains_common::{AccountId, AuraId}; + use sc_chain_spec::ChainType; + + use super::BridgeHubBalance; pub(crate) const BRIDGE_HUB_WESTEND: &str = "bridge-hub-westend"; - pub type BridgeHubChainSpec = kusama::BridgeHubChainSpec; - pub type RuntimeApi = bridge_hub_kusama_runtime::RuntimeApi; + pub(crate) const BRIDGE_HUB_WESTEND_LOCAL: &str = "bridge-hub-westend-local"; + pub(crate) const BRIDGE_HUB_WESTEND_DEVELOPMENT: &str = "bridge-hub-westend-dev"; + const BRIDGE_HUB_WESTEND_ED: BridgeHubBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + + /// Specialized `ChainSpec` for the normal parachain runtime. + pub type BridgeHubChainSpec = + sc_service::GenericChainSpec; + pub type RuntimeApi = bridge_hub_westend_runtime::RuntimeApi; + + pub fn local_config( + id: &str, + chain_name: &str, + relay_chain: &str, + para_id: ParaId, + bridges_pallet_owner_seed: Option, + ) -> BridgeHubChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + BridgeHubChainSpec::builder( + bridge_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not build, please build it!"), + Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, + ) + .with_name(chain_name) + .with_id(super::ensure_id(id).expect("invalid id")) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + para_id, + bridges_pallet_owner_seed + .as_ref() + .map(|seed| get_account_id_from_seed::(seed)), + )) + .with_properties(properties) + .build() + } + + fn genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, + bridges_pallet_owner: Option, + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": BRIDGE_HUB_WESTEND_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect::>(), + }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + "bridgeRococoGrandpa": { + "owner": bridges_pallet_owner.clone(), + }, + "bridgeRococoMessages": { + "owner": bridges_pallet_owner.clone(), + } + }) + } } /// Sub-module for Polkadot setup @@ -512,8 +544,7 @@ pub mod polkadot { parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = - sc_service::GenericChainSpec; + pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub type RuntimeApi = bridge_hub_polkadot_runtime::RuntimeApi; pub fn local_config( @@ -527,81 +558,68 @@ pub mod polkadot { properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - BridgeHubChainSpec::from_genesis( - // Name - chain_name, - // ID - super::ensure_id(id).expect("invalid id"), - ChainType::Local, - move || { - genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + BridgeHubChainSpec::builder( + bridge_hub_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, ) + .with_name(chain_name) + .with_id(super::ensure_id(id).expect("invalid id")) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + para_id, + )) + .with_properties(properties) + .build() } fn genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, - ) -> bridge_hub_polkadot_runtime::RuntimeGenesisConfig { - bridge_hub_polkadot_runtime::RuntimeGenesisConfig { - system: bridge_hub_polkadot_runtime::SystemConfig { - code: bridge_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: bridge_hub_polkadot_runtime::BalancesConfig { - balances: endowed_accounts + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, BRIDGE_HUB_POLKADOT_ED * 4096)) - .collect(), + .collect::>(), }, - parachain_info: bridge_hub_polkadot_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: bridge_hub_polkadot_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: BRIDGE_HUB_POLKADOT_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": BRIDGE_HUB_POLKADOT_ED * 16, }, - session: bridge_hub_polkadot_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -610,15 +628,11 @@ pub mod polkadot { bridge_hub_polkadot_runtime::SessionKeys { aura }, // session keys ) }) - .collect(), - }, - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: bridge_hub_polkadot_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + .collect::>(), }, - } + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + } + }) } } diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index fbf49b9535a42f632df29931e7515745414eb9c6..07bd742fa8e3279f7f947287bb8e618812ba3de4 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -22,11 +22,13 @@ use parachains_common::{AccountId, AuraId, Balance as CollectivesBalance}; use sc_service::ChainType; use sp_core::sr25519; -pub type CollectivesPolkadotChainSpec = - sc_service::GenericChainSpec; +pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type CollectivesWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; const COLLECTIVES_POLKADOT_ED: CollectivesBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; +const COLLECTIVES_WESTEND_ED: CollectivesBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// @@ -43,37 +45,33 @@ pub fn collectives_polkadot_development_config() -> CollectivesPolkadotChainSpec properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - CollectivesPolkadotChainSpec::from_genesis( - // Name - "Polkadot Collectives Development", - // ID - "collectives_polkadot_dev", - ChainType::Local, - move || { - collectives_polkadot_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever - // be a collective para on Kusama. - 1002.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + CollectivesPolkadotChainSpec::builder( + collectives_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-dev".into(), para_id: 1002 }, ) + .with_name("Polkadot Collectives Development") + .with_id("collectives_polkadot_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_polkadot_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever + // be a collective para on Kusama. + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() } /// Collectives Polkadot Local Config. @@ -83,81 +81,69 @@ pub fn collectives_polkadot_local_config() -> CollectivesPolkadotChainSpec { properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - CollectivesPolkadotChainSpec::from_genesis( - // Name - "Polkadot Collectives Local", - // ID - "collectives_polkadot_local", - ChainType::Local, - move || { - collectives_polkadot_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1002.into(), - ) - }, - Vec::new(), - None, - None, - None, - Some(properties), + CollectivesPolkadotChainSpec::builder( + collectives_polkadot_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-local".into(), para_id: 1002 }, ) + .with_name("Polkadot Collectives Local") + .with_id("collectives_polkadot_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_polkadot_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() } fn collectives_polkadot_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, -) -> collectives_polkadot_runtime::RuntimeGenesisConfig { - collectives_polkadot_runtime::RuntimeGenesisConfig { - system: collectives_polkadot_runtime::SystemConfig { - code: collectives_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: collectives_polkadot_runtime::BalancesConfig { - balances: endowed_accounts +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, COLLECTIVES_POLKADOT_ED * 4096)) - .collect(), + .collect::>(), }, - parachain_info: collectives_polkadot_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: collectives_polkadot_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: COLLECTIVES_POLKADOT_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": COLLECTIVES_POLKADOT_ED * 16, }, - session: collectives_polkadot_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -166,18 +152,142 @@ fn collectives_polkadot_genesis( collectives_polkadot_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), + }, + // no need to pass anything to aura, in fact it will panic if we do. Session will take care + // of this. + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + }) +} + +/// Generate the session keys from individual elements. +/// +/// The input must be a tuple of individual keys (a single arg for now since we have just one key). +pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_runtime::SessionKeys { + collectives_westend_runtime::SessionKeys { aura: keys } +} + +pub fn collectives_westend_development_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Development") + .with_id("collectives_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever + // be a collective para on Kusama. + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +/// Collectives Westend Local Config. +pub fn collectives_westend_local_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Local") + .with_id("collectives_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +fn collectives_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts + .iter() + .cloned() + .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) + .collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": COLLECTIVES_WESTEND_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + collectives_westend_session_keys(aura), // session keys + ) + }) + .collect::>(), }, // no need to pass anything to aura, in fact it will panic if we do. Session will take care // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: collectives_polkadot_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - alliance: Default::default(), - alliance_motion: Default::default(), - } + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs index 0d5012858edc6f81a1554996fbafe4a1f6973072..7ca66354fbfde220b550d033dda39d2be6fa4836 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs @@ -23,8 +23,7 @@ use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type ContractsRococoChainSpec = - sc_service::GenericChainSpec; +pub type ContractsRococoChainSpec = sc_service::GenericChainSpec<(), Extensions>; /// No relay chain suffix because the id is the same over all relay chains. const CONTRACTS_PARACHAIN_ID: u32 = 1002; @@ -38,52 +37,46 @@ pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::from_genesis( - // Name - "Contracts on Rococo Development", - // ID - "contracts-rococo-dev", - ChainType::Development, - move || { - contracts_rococo_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - CONTRACTS_PARACHAIN_ID.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + ContractsRococoChainSpec::builder( + contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! para_id: CONTRACTS_PARACHAIN_ID, }, ) + .with_name("Contracts on Rococo Development") + .with_id("contracts-rococo-dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(contracts_rococo_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + CONTRACTS_PARACHAIN_ID.into(), + )) + .with_boot_nodes(Vec::new()) + .build() } pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { @@ -91,58 +84,46 @@ pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::from_genesis( - // Name - "Contracts on Rococo", - // ID - "contracts-rococo-local", - ChainType::Local, - move || { - contracts_rococo_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - CONTRACTS_PARACHAIN_ID.into(), - ) - }, - // Bootnodes - Vec::new(), - // Telemetry - None, - // Protocol ID - None, - // Fork ID - None, - // Properties - Some(properties), - // Extensions + ContractsRococoChainSpec::builder( + contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! para_id: CONTRACTS_PARACHAIN_ID, }, ) + .with_name("Contracts on Rococo") + .with_id("contracts-rococo-local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(contracts_rococo_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + CONTRACTS_PARACHAIN_ID.into(), + )) + .with_properties(properties) + .build() } pub fn contracts_rococo_config() -> ContractsRococoChainSpec { @@ -151,111 +132,92 @@ pub fn contracts_rococo_config() -> ContractsRococoChainSpec { properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::from_genesis( - // Name - "Contracts on Rococo", - // ID - "contracts-rococo", - ChainType::Live, - move || { - contracts_rococo_genesis( - vec![ - // 5GKFbTTgrVS4Vz1UWWHPqMZQNFWZtqo7H2KpCDyYhEL3aS26 - ( - hex!["bc09354c12c054c8f6b3da208485eacec4ac648bad348895273b37bab5a0937c"] - .into(), - hex!["bc09354c12c054c8f6b3da208485eacec4ac648bad348895273b37bab5a0937c"] - .unchecked_into(), - ), - // 5EPRJHm2GpABVWcwnAujcrhnrjFZyDGd5TwKFzkBoGgdRyv2 - ( - hex!["66be63b7bcbfb91040e5248e2d1ceb822cf219c57848c5924ffa3a1f8e67ba72"] - .into(), - hex!["66be63b7bcbfb91040e5248e2d1ceb822cf219c57848c5924ffa3a1f8e67ba72"] - .unchecked_into(), - ), - // 5GH62vrJrVZxLREcHzm2PR5uTLAT5RQMJitoztCGyaP4o3uM - ( - hex!["ba62886472a0a9f66b5e39f1469ce1c5b3d8cad6be39078daf16f111e89d1e44"] - .into(), - hex!["ba62886472a0a9f66b5e39f1469ce1c5b3d8cad6be39078daf16f111e89d1e44"] - .unchecked_into(), - ), - // 5FHfoJDLdjRYX5KXLRqMDYBbWrwHLMtti21uK4QByUoUAbJF - ( - hex!["8e97f65cda001976311df9bed39e8d0c956089093e94a75ef76fe9347a0eda7b"] - .into(), - hex!["8e97f65cda001976311df9bed39e8d0c956089093e94a75ef76fe9347a0eda7b"] - .unchecked_into(), - ), - ], - // Warning: The configuration for a production chain should not contain - // any endowed accounts here, otherwise it'll be minting extra native tokens - // from the relay chain on the parachain. - vec![ - // NOTE: Remove endowed accounts if deployed on other relay chains. - // Endowed accounts - hex!["baa78c7154c7f82d6d377177e20bcab65d327eca0086513f9964f5a0f6bdad56"].into(), - // AccountId of an account which `ink-waterfall` uses for automated testing - hex!["0e47e2344d523c3cc5c34394b0d58b9a4200e813a038e6c5a6163cc07d70b069"].into(), - ], - CONTRACTS_PARACHAIN_ID.into(), - ) - }, - // Bootnodes - vec![ + ContractsRococoChainSpec::builder( + contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "rococo".into(), para_id: CONTRACTS_PARACHAIN_ID } + ) + .with_name("Contracts on Rococo") + .with_id("contracts-rococo") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(contracts_rococo_genesis( + vec![ + // 5GKFbTTgrVS4Vz1UWWHPqMZQNFWZtqo7H2KpCDyYhEL3aS26 + ( + hex!["bc09354c12c054c8f6b3da208485eacec4ac648bad348895273b37bab5a0937c"] + .into(), + hex!["bc09354c12c054c8f6b3da208485eacec4ac648bad348895273b37bab5a0937c"] + .unchecked_into(), + ), + // 5EPRJHm2GpABVWcwnAujcrhnrjFZyDGd5TwKFzkBoGgdRyv2 + ( + hex!["66be63b7bcbfb91040e5248e2d1ceb822cf219c57848c5924ffa3a1f8e67ba72"] + .into(), + hex!["66be63b7bcbfb91040e5248e2d1ceb822cf219c57848c5924ffa3a1f8e67ba72"] + .unchecked_into(), + ), + // 5GH62vrJrVZxLREcHzm2PR5uTLAT5RQMJitoztCGyaP4o3uM + ( + hex!["ba62886472a0a9f66b5e39f1469ce1c5b3d8cad6be39078daf16f111e89d1e44"] + .into(), + hex!["ba62886472a0a9f66b5e39f1469ce1c5b3d8cad6be39078daf16f111e89d1e44"] + .unchecked_into(), + ), + // 5FHfoJDLdjRYX5KXLRqMDYBbWrwHLMtti21uK4QByUoUAbJF + ( + hex!["8e97f65cda001976311df9bed39e8d0c956089093e94a75ef76fe9347a0eda7b"] + .into(), + hex!["8e97f65cda001976311df9bed39e8d0c956089093e94a75ef76fe9347a0eda7b"] + .unchecked_into(), + ), + ], + // Warning: The configuration for a production chain should not contain + // any endowed accounts here, otherwise it'll be minting extra native tokens + // from the relay chain on the parachain. + vec![ + // NOTE: Remove endowed accounts if deployed on other relay chains. + // Endowed accounts + hex!["baa78c7154c7f82d6d377177e20bcab65d327eca0086513f9964f5a0f6bdad56"].into(), + // AccountId of an account which `ink-waterfall` uses for automated testing + hex!["0e47e2344d523c3cc5c34394b0d58b9a4200e813a038e6c5a6163cc07d70b069"].into(), + ], + CONTRACTS_PARACHAIN_ID.into(), + )) + .with_boot_nodes(vec![ "/dns/contracts-collator-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj" - .parse() - .expect("MultiaddrWithPeerId"), + .parse() + .expect("MultiaddrWithPeerId"), "/dns/contracts-collator-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh" - .parse() - .expect("MultiaddrWithPeerId"), + .parse() + .expect("MultiaddrWithPeerId"), "/dns/contracts-collator-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWEVU8AFNary4nP4qEnEcwJaRuy59Wefekzdu9pKbnVEhk" - .parse() - .expect("MultiaddrWithPeerId"), + .parse() + .expect("MultiaddrWithPeerId"), "/dns/contracts-collator-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP6pV3ZmcXzGDjv8ZMgA6nZxfAKDxSz4VNiLx6vVCQgJX" - .parse() - .expect("MultiaddrWithPeerId"), - ], - // Telemetry - None, - // Protocol ID - None, - // Fork ID - None, - // Properties - Some(properties), - // Extensions - Extensions { relay_chain: "rococo".into(), para_id: CONTRACTS_PARACHAIN_ID }, - ) + .parse() + .expect("MultiaddrWithPeerId"), + ]) + .with_properties(properties) + .build() } fn contracts_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, -) -> contracts_rococo_runtime::RuntimeGenesisConfig { - contracts_rococo_runtime::RuntimeGenesisConfig { - system: contracts_rococo_runtime::SystemConfig { - code: contracts_rococo_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: contracts_rococo_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), }, - parachain_info: contracts_rococo_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: contracts_rococo_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: CONTRACTS_ROCOCO_ED * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": CONTRACTS_ROCOCO_ED * 16, }, - session: contracts_rococo_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -264,21 +226,17 @@ fn contracts_rococo_genesis( contracts_rococo_runtime::SessionKeys { aura }, // session keys ) }) - .collect(), + .collect::>(), }, // no need to pass anything to aura, in fact it will panic if we do. Session will take care // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: contracts_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - sudo: contracts_rococo_runtime::SudoConfig { - key: Some( - hex!["2681a28014e7d3a5bfb32a003b3571f53c408acbc28d351d6bf58f5028c4ef14"].into(), - ), + "sudo": { + "key": Some(sp_runtime::AccountId32::from(hex![ + "2681a28014e7d3a5bfb32a003b3571f53c408acbc28d351d6bf58f5028c4ef14" + ])), }, - } + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 881fae398827a6adc1fd6195f9d69997a3a6e6af..aff1358d1aecb5f2208bcdc380454148fe9728b4 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -23,103 +23,141 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; /// Specialized `ChainSpec` for the Glutton parachain runtime. -pub type GluttonChainSpec = - sc_service::GenericChainSpec; +pub type GluttonChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type GluttonWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::from_genesis( - // Name - "Glutton Development", - // ID - "glutton_dev", - ChainType::Local, - move || glutton_genesis(para_id, vec![get_collator_keys_from_seed::("Alice")]), - Vec::new(), - None, - None, - None, - None, + GluttonChainSpec::builder( + glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-dev".into(), para_id: para_id.into() }, ) + .with_name("Glutton Development") + .with_id("glutton_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![get_collator_keys_from_seed::("Alice")], + )) + .build() } pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::from_genesis( - // Name - "Glutton Local", - // ID - "glutton_local", - ChainType::Local, - move || { - glutton_genesis( - para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], - ) - }, - Vec::new(), - None, - None, - None, - None, + GluttonChainSpec::builder( + glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-local".into(), para_id: para_id.into() }, ) + .with_name("Glutton Local") + .with_id("glutton_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .build() } pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); - GluttonChainSpec::from_genesis( - // Name - format!("Glutton {}", para_id).as_str(), - // ID - format!("glutton-kusama-{}", para_id).as_str(), - ChainType::Live, - move || { - glutton_genesis( - para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], - ) - }, - Vec::new(), - None, - // Protocol ID - Some(format!("glutton-kusama-{}", para_id).as_str()), - None, - Some(properties), + GluttonChainSpec::builder( + glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama".into(), para_id: para_id.into() }, ) + .with_name(format!("Glutton {}", para_id).as_str()) + .with_id(format!("glutton-kusama-{}", para_id).as_str()) + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .with_protocol_id(format!("glutton-kusama-{}", para_id).as_str()) + .with_properties(properties) + .build() } -fn glutton_genesis( - parachain_id: ParaId, - collators: Vec, -) -> glutton_runtime::RuntimeGenesisConfig { - glutton_runtime::RuntimeGenesisConfig { - system: glutton_runtime::SystemConfig { - code: glutton_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() +fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!( { + "parachainInfo": { + "parachainId": parachain_id }, - parachain_info: glutton_runtime::ParachainInfoConfig { parachain_id, ..Default::default() }, - parachain_system: Default::default(), - glutton: glutton_runtime::GluttonConfig { - compute: Default::default(), - storage: Default::default(), - trash_data_count: Default::default(), - ..Default::default() + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), + }, + "aura": { "authorities": collators }, + }) +} + +pub fn glutton_westend_development_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Development") + .with_id("glutton_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![get_collator_keys_from_seed::("Alice")], + )) + .build() +} + +pub fn glutton_westend_local_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Local") + .with_id("glutton_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .build() +} + +pub fn glutton_westend_config(para_id: ParaId) -> GluttonWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + + GluttonChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend".into(), para_id: para_id.into() }, + ) + .with_name(format!("Glutton {}", para_id).as_str()) + .with_id(format!("glutton-westend-{}", para_id).as_str()) + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(glutton_westend_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .with_protocol_id(format!("glutton-westend-{}", para_id).as_str()) + .with_properties(properties) + .build() +} + +fn glutton_westend_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!( { + "parachainInfo": { + "parachainId": parachain_id }, - aura: glutton_runtime::AuraConfig { authorities: collators }, - aura_ext: Default::default(), - sudo: glutton_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), }, - } + "aura": { "authorities": collators }, + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs index 479d1701e379ea4b2ce9d3520205365c1b07201f..2e35ee231dfff086452162c68212fe76483705bb 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs @@ -22,8 +22,7 @@ use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::sr25519; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type PenpalChainSpec = - sc_service::GenericChainSpec; +pub type PenpalChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> PenpalChainSpec { // Give your base currency a unit name and decimal places @@ -32,84 +31,69 @@ pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> PenpalChainSpec { properties.insert("tokenDecimals".into(), 12u32.into()); properties.insert("ss58Format".into(), 42u32.into()); - PenpalChainSpec::from_genesis( - // Name - "Penpal Parachain", - // ID - &format!("penpal-{}", relay_chain.replace("-local", "")), - ChainType::Development, - move || { - penpal_testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - id, - ) - }, - Vec::new(), - None, - None, - None, - None, + PenpalChainSpec::builder( + penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.into(), // You MUST set this to the correct network! para_id: id.into(), }, ) + .with_name("Penpal Parachain") + .with_id(&format!("penpal-{}", relay_chain.replace("-local", ""))) + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(penpal_testnet_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + id, + )) + .build() } fn penpal_testnet_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, -) -> penpal_runtime::RuntimeGenesisConfig { - penpal_runtime::RuntimeGenesisConfig { - system: penpal_runtime::SystemConfig { - code: penpal_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: penpal_runtime::BalancesConfig { - balances: endowed_accounts +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts .iter() .cloned() .map(|k| (k, penpal_runtime::EXISTENTIAL_DEPOSIT * 4096)) - .collect(), + .collect::>(), }, - parachain_info: penpal_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachainInfo": { + "parachainId": id, }, - collator_selection: penpal_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: penpal_runtime::EXISTENTIAL_DEPOSIT * 16, - ..Default::default() + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": penpal_runtime::EXISTENTIAL_DEPOSIT * 16, }, - session: penpal_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -118,21 +102,15 @@ fn penpal_testnet_genesis( penpal_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: penpal_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - sudo: penpal_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), }, - } + }) } /// Generate the session keys from individual elements. diff --git a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs index fb66efbf9ae1c68eedcc42b8ef6436b979885272..c2ba443145683ac8002f0f60c4e205c66ddb21c0 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs @@ -25,73 +25,61 @@ use rococo_parachain_runtime::AuraId; use sc_chain_spec::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type RococoParachainChainSpec = - sc_service::GenericChainSpec; +pub type RococoParachainChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn rococo_parachain_local_config() -> RococoParachainChainSpec { - RococoParachainChainSpec::from_genesis( - "Rococo Parachain Local", - "local_testnet", - ChainType::Local, - move || { - testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![get_from_seed::("Alice"), get_from_seed::("Bob")], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + RococoParachainChainSpec::builder( + rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) + .with_name("Rococo Parachain Local") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis( + get_account_id_from_seed::("Alice"), + vec![get_from_seed::("Alice"), get_from_seed::("Bob")], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1000.into(), + )) + .build() } pub fn staging_rococo_parachain_local_config() -> RococoParachainChainSpec { - RococoParachainChainSpec::from_genesis( - "Staging Rococo Parachain Local", - "staging_testnet", - ChainType::Live, - move || { - testnet_genesis( - hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into(), - vec![ - // $secret//one - hex!["aad9fa2249f87a210a0f93400b7f90e47b810c6d65caa0ca3f5af982904c2a33"] - .unchecked_into(), - // $secret//two - hex!["d47753f0cca9dd8da00c70e82ec4fc5501a69c49a5952a643d18802837c88212"] - .unchecked_into(), - ], - vec![ - hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into() - ], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + #[allow(deprecated)] + RococoParachainChainSpec::builder( + rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) + .with_name("Staging Rococo Parachain Local") + .with_id("staging_testnet") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(testnet_genesis( + hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into(), + vec![ + // $secret//one + hex!["aad9fa2249f87a210a0f93400b7f90e47b810c6d65caa0ca3f5af982904c2a33"] + .unchecked_into(), + // $secret//two + hex!["d47753f0cca9dd8da00c70e82ec4fc5501a69c49a5952a643d18802837c88212"] + .unchecked_into(), + ], + vec![hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into()], + 1000.into(), + )) + .build() } pub(crate) fn testnet_genesis( @@ -99,28 +87,18 @@ pub(crate) fn testnet_genesis( initial_authorities: Vec, endowed_accounts: Vec, id: ParaId, -) -> rococo_parachain_runtime::RuntimeGenesisConfig { - rococo_parachain_runtime::RuntimeGenesisConfig { - system: rococo_parachain_runtime::SystemConfig { - code: rococo_parachain_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: rococo_parachain_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), }, - sudo: rococo_parachain_runtime::SudoConfig { key: Some(root_key) }, - parachain_info: rococo_parachain_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "sudo": { "key": Some(root_key) }, + "parachainInfo": { + "parachainId": id, }, - aura: rococo_parachain_runtime::AuraConfig { authorities: initial_authorities }, - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: rococo_parachain_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "aura": { "authorities": initial_authorities }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - } + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs index 6a5842320976c3794557a38deffe4105af4245c5..b034588e14c08b8e2b04025e3b4878acc773ea05 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs @@ -23,49 +23,35 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; /// Specialized `ChainSpec` for the seedling parachain runtime. -pub type SeedlingChainSpec = - sc_service::GenericChainSpec; +pub type SeedlingChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn get_seedling_chain_spec() -> SeedlingChainSpec { - SeedlingChainSpec::from_genesis( - "Seedling Local Testnet", - "seedling_local_testnet", - ChainType::Local, - move || { - seedling_testnet_genesis( - get_account_id_from_seed::("Alice"), - 2000.into(), - vec![get_collator_keys_from_seed::("Alice")], - ) - }, - Vec::new(), - None, - None, - None, - None, + SeedlingChainSpec::builder( + seedling_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 2000 }, ) + .with_name("Seedling Local Testnet") + .with_id("seedling_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(seedling_testnet_genesis( + get_account_id_from_seed::("Alice"), + 2000.into(), + vec![get_collator_keys_from_seed::("Alice")], + )) + .with_boot_nodes(Vec::new()) + .build() } fn seedling_testnet_genesis( root_key: AccountId, parachain_id: ParaId, collators: Vec, -) -> seedling_runtime::RuntimeGenesisConfig { - seedling_runtime::RuntimeGenesisConfig { - system: seedling_runtime::SystemConfig { - code: seedling_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - sudo: seedling_runtime::SudoConfig { key: Some(root_key) }, - parachain_info: seedling_runtime::ParachainInfoConfig { - parachain_id, - ..Default::default() +) -> serde_json::Value { + serde_json::json!({ + "sudo": { "key": Some(root_key) }, + "parachainInfo": { + "parachainId": parachain_id, }, - parachain_system: Default::default(), - aura: seedling_runtime::AuraConfig { authorities: collators }, - aura_ext: Default::default(), - } + "aura": { "authorities": collators }, + }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs index 0899c1af3b4d238262191436c0bacf80f557882b..02c65e809a6c9153fa4dccc6e9191dd31198061f 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/shell.rs @@ -22,40 +22,27 @@ use sc_service::ChainType; use super::get_collator_keys_from_seed; /// Specialized `ChainSpec` for the shell parachain runtime. -pub type ShellChainSpec = - sc_service::GenericChainSpec; +pub type ShellChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn get_shell_chain_spec() -> ShellChainSpec { - ShellChainSpec::from_genesis( - "Shell Local Testnet", - "shell_local_testnet", - ChainType::Local, - move || { - shell_testnet_genesis(1000.into(), vec![get_collator_keys_from_seed::("Alice")]) - }, - Vec::new(), - None, - None, - None, - None, + ShellChainSpec::builder( + shell_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, ) + .with_name("Shell Local Testnet") + .with_id("shell_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(shell_testnet_genesis( + 1000.into(), + vec![get_collator_keys_from_seed::("Alice")], + )) + .with_boot_nodes(Vec::new()) + .build() } -fn shell_testnet_genesis( - parachain_id: ParaId, - collators: Vec, -) -> shell_runtime::RuntimeGenesisConfig { - shell_runtime::RuntimeGenesisConfig { - system: shell_runtime::SystemConfig { - code: shell_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - parachain_info: shell_runtime::ParachainInfoConfig { parachain_id, ..Default::default() }, - parachain_system: Default::default(), - aura: shell_runtime::AuraConfig { authorities: collators }, - aura_ext: Default::default(), - } +fn shell_testnet_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!({ + "parachainInfo": { "parachainId": parachain_id}, + "aura": { "authorities": collators }, + }) } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 0e948d24f82da44a2946f01ee033a1ed6e05c78b..7bede22fea78fff75711cbd8b452443f33fef31f 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -42,12 +42,14 @@ enum Runtime { Seedling, AssetHubPolkadot, AssetHubKusama, + AssetHubRococo, AssetHubWestend, Penpal(ParaId), ContractsRococo, CollectivesPolkadot, CollectivesWestend, Glutton, + GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), } @@ -90,6 +92,8 @@ fn runtime(id: &str) -> Runtime { Runtime::AssetHubPolkadot } else if id.starts_with("asset-hub-kusama") | id.starts_with("statemine") { Runtime::AssetHubKusama + } else if id.starts_with("asset-hub-rococo") { + Runtime::AssetHubRococo } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { Runtime::AssetHubWestend } else if id.starts_with("penpal") { @@ -105,6 +109,8 @@ fn runtime(id: &str) -> Runtime { id.parse::() .expect("Invalid value"), ) + } else if id.starts_with("glutton-westend") { + Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else { @@ -164,6 +170,19 @@ fn load_spec(id: &str) -> std::result::Result, String> { &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], )?), + // -- Asset Hub Rococo + "asset-hub-rococo-dev" => + Box::new(chain_spec::asset_hubs::asset_hub_rococo_development_config()), + "asset-hub-rococo-local" => + Box::new(chain_spec::asset_hubs::asset_hub_rococo_local_config()), + // the chain spec as used for generating the upgrade genesis values + "asset-hub-rococo-genesis" => + Box::new(chain_spec::asset_hubs::asset_hub_rococo_genesis_config()), + "asset-hub-rococo" => + Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], + )?), + // -- Asset Hub Westend "asset-hub-westend-dev" | "westmint-dev" => Box::new(chain_spec::asset_hubs::asset_hub_westend_development_config()), @@ -187,8 +206,12 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-polkadot.json")[..], )?), + "collectives-westend-dev" => + Box::new(chain_spec::collectives::collectives_westend_development_config()), + "collectives-westend-local" => + Box::new(chain_spec::collectives::collectives_westend_local_config()), "collectives-westend" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( + Box::new(chain_spec::collectives::CollectivesWestendChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-westend.json")[..], )?), @@ -222,6 +245,18 @@ fn load_spec(id: &str) -> std::result::Result, String> { "polkadot-local", )), + // -- Glutton Westend + "glutton-westend-dev" => Box::new(chain_spec::glutton::glutton_westend_development_config( + para_id.expect("Must specify parachain id"), + )), + "glutton-westend-local" => Box::new(chain_spec::glutton::glutton_westend_local_config( + para_id.expect("Must specify parachain id"), + )), + // the chain spec as used for generating the upgrade genesis values + "glutton-westend-genesis" => Box::new(chain_spec::glutton::glutton_westend_config( + para_id.expect("Must specify parachain id"), + )), + // -- Glutton "glutton-kusama-dev" => Box::new(chain_spec::glutton::glutton_development_config( para_id.expect("Must specify parachain id"), @@ -249,12 +284,17 @@ fn load_spec(id: &str) -> std::result::Result, String> { ), Runtime::AssetHubKusama => Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_file(path)?), + Runtime::AssetHubRococo => + Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_file(path)?), Runtime::AssetHubWestend => Box::new( chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, ), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( + Runtime::CollectivesPolkadot => Box::new( chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, ), + Runtime::CollectivesWestend => Box::new( + chain_spec::collectives::CollectivesWestendChainSpec::from_json_file(path)?, + ), Runtime::Shell => Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), Runtime::Seedling => @@ -265,6 +305,8 @@ fn load_spec(id: &str) -> std::result::Result, String> { bridge_hub_runtime_type.chain_spec_from_json_file(path)?, Runtime::Penpal(_para_id) => Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), + Runtime::GluttonWestend => + Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Glutton => Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Default => Box::new( @@ -286,6 +328,10 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; + const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; + const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; + const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; + let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) @@ -301,6 +347,15 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) } else { (id, id, None) }; @@ -391,6 +446,13 @@ macro_rules! construct_partials { )?; $code }, + Runtime::AssetHubRococo => { + let $partials = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + $code + }, Runtime::AssetHubWestend => { let $partials = new_partial::( &$config, @@ -424,7 +486,9 @@ macro_rules! construct_partials { )?; $code }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend => { + chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, @@ -440,22 +504,21 @@ macro_rules! construct_partials { )?; $code }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; $code }, + Runtime::CollectivesWestend => { + let $partials = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + $code + }, Runtime::Shell => { let $partials = new_partial::( &$config, @@ -484,6 +547,13 @@ macro_rules! construct_partials { )?; $code }, + Runtime::GluttonWestend => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, Runtime::Glutton => { let $partials = new_partial::( &$config, @@ -509,6 +579,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::AssetHubRococo => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::AssetHubKusama => { runner.async_run(|$config| { let $components = new_partial::( @@ -529,7 +609,7 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -539,6 +619,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::CollectivesWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Shell => { runner.async_run(|$config| { let $components = new_partial::( @@ -597,7 +687,9 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend => { + chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -621,18 +713,6 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } } }, Runtime::Penpal(_) | Runtime::Default => { @@ -648,6 +728,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::GluttonWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Glutton => { runner.async_run(|$config| { let $components = new_partial::( @@ -779,7 +869,7 @@ pub fn run() -> Result<()> { // that both file paths exist, the node will exit, as the user must decide (by // deleting one path) the information that they want to use as their DB. let old_name = match config.chain_spec.id() { - "asset-hub-polkadot" => Some("statemint"), + "asset-hub-polkadot" => Some("statemint"), "asset-hub-kusama" => Some("statemine"), "asset-hub-westend" => Some("westmint"), "asset-hub-rococo" => Some("rockmine"), @@ -836,28 +926,35 @@ pub fn run() -> Result<()> { info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); match config.chain_spec.runtime() { - Runtime::AssetHubPolkadot => crate::service::start_generic_aura_node::< + Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::< asset_hub_polkadot_runtime::RuntimeApi, AssetHubPolkadotAuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubKusama => crate::service::start_generic_aura_node::< + Runtime::AssetHubKusama => crate::service::start_asset_hub_node::< asset_hub_kusama_runtime::RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubWestend => crate::service::start_generic_aura_node::< + Runtime::AssetHubRococo => crate::service::start_asset_hub_node::< + asset_hub_rococo_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), + Runtime::AssetHubWestend => crate::service::start_asset_hub_node::< asset_hub_westend_runtime::RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::< collectives_polkadot_runtime::RuntimeApi, AuraId, @@ -865,6 +962,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::CollectivesWestend => + crate::service::start_generic_aura_node::< + collectives_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Shell => crate::service::start_shell_node::( config, @@ -898,7 +1003,7 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | +chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => crate::service::start_generic_aura_node::< @@ -916,7 +1021,9 @@ pub fn run() -> Result<()> { >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => crate::service::start_generic_aura_node::< chain_spec::bridge_hubs::westend::RuntimeApi, AuraId, @@ -932,14 +1039,6 @@ pub fn run() -> Result<()> { >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => - crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::wococo::RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0), } .map_err(Into::into), Runtime::Penpal(_) | Runtime::Default => @@ -953,6 +1052,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::GluttonWestend => + crate::service::start_basic_lookahead_node::< + glutton_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Glutton => crate::service::start_basic_lookahead_node::< glutton_runtime::RuntimeApi, @@ -1134,35 +1241,30 @@ mod tests { cfg_file_path } - pub type DummyChainSpec = - sc_service::GenericChainSpec; + pub type DummyChainSpec = sc_service::GenericChainSpec<(), E>; pub fn create_default_with_extensions( id: &str, extension: E, ) -> DummyChainSpec { - DummyChainSpec::from_genesis( - "Dummy local testnet", - id, - ChainType::Local, - move || { - crate::chain_spec::rococo_parachain::testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![ - get_from_seed::("Alice"), - get_from_seed::("Bob"), - ], - vec![get_account_id_from_seed::("Alice")], - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + DummyChainSpec::builder( + rococo_parachain_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), extension, ) + .with_name("Dummy local testnet") + .with_id(id) + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(crate::chain_spec::rococo_parachain::testnet_genesis( + get_account_id_from_seed::("Alice"), + vec![ + get_from_seed::("Alice"), + get_from_seed::("Bob"), + ], + vec![get_account_id_from_seed::("Alice")], + 1000.into(), + )) + .build() } #[test] diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 2f86f54f12a1c9385b95295dbfb990fef1681bec..f580835db55aa9981fed4849b07baeb983237a5a 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::Codec; +use codec::{Codec, Decode}; use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::collators::{ @@ -44,7 +44,7 @@ use crate::rpc; pub use parachains_common::{AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; -use futures::lock::Mutex; +use futures::{lock::Mutex, prelude::*}; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, BlockImportParams, ImportQueue, @@ -54,10 +54,14 @@ use sc_network::{config::FullNetworkConfiguration, NetworkBlock}; use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::{ApiExt, ConstructRuntimeApi}; +use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; use sp_consensus_aura::AuraApi; +use sp_core::traits::SpawnEssentialNamed; use sp_keystore::KeystorePtr; -use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT}; +use sp_runtime::{ + app_crypto::AppCrypto, + traits::{Block as BlockT, Header as HeaderT}, +}; use std::{marker::PhantomData, sync::Arc, time::Duration}; use substrate_prometheus_endpoint::Registry; @@ -152,6 +156,21 @@ impl sc_executor::NativeExecutionDispatch for CollectivesPolkadotRuntimeExecutor } } +/// Native Westend Collectives executor instance. +pub struct CollectivesWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + collectives_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + collectives_westend_runtime::native_version() + } +} + /// Native BridgeHubPolkadot executor instance. pub struct BridgeHubPolkadotRuntimeExecutor; @@ -212,6 +231,21 @@ impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { } } +/// Native Westend Glutton executor instance. +pub struct GluttonWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + glutton_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + glutton_westend_runtime::native_version() + } +} + /// Native Glutton executor instance. pub struct GluttonRuntimeExecutor; @@ -556,6 +590,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -689,6 +724,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -949,7 +985,8 @@ pub async fn start_rococo_parachain_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -968,11 +1005,15 @@ pub async fn start_rococo_parachain_node( client.clone(), ); - let params = BasicAuraParams { + let params = AuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client, + para_client: client.clone(), + para_backend: backend.clone(), relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, sync_oracle, keystore, collator_key, @@ -982,11 +1023,10 @@ pub async fn start_rococo_parachain_node( relay_chain_slot_duration, proposer, collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), + authoring_duration: Duration::from_millis(1500), }; - let fut = basic_aura::run::< + let fut = aura::run::< Block, sp_consensus_aura::sr25519::AuthorityPair, _, @@ -996,6 +1036,8 @@ pub async fn start_rococo_parachain_node( _, _, _, + _, + _, >(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); @@ -1341,7 +1383,8 @@ where para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1376,6 +1419,7 @@ where collator_service, // Very limited proposal time. authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, }; let fut = @@ -1389,6 +1433,151 @@ where .await } +/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub +/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and +/// needs to sync and upgrade before it can run `AuraApi` functions. +pub async fn start_asset_hub_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> +where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::Metadata + + sp_session::SessionKeys + + sp_api::ApiExt + + sp_offchain::OffchainWorkerApi + + sp_block_builder::BlockBuilder + + cumulus_primitives_core::CollectCollationInfo + + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + frame_rpc_system::AccountNonceApi, + <::Pair as Pair>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + |_| Ok(RpcModule::new(())), + aura_build_import_queue::<_, AuraId>, + |client, + block_import, + prometheus_registry, + telemetry, + task_manager, + relay_chain_interface, + transaction_pool, + sync_oracle, + keystore, + relay_chain_slot_duration, + para_id, + collator_key, + overseer_handle, + announce_block, + _backend| { + let relay_chain_interface2 = relay_chain_interface.clone(); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let spawner = task_manager.spawn_handle(); + + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + spawner, + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collation_future = Box::pin(async move { + // Start collating with the `shell` runtime while waiting for an upgrade to an Aura + // compatible runtime. + let mut request_stream = cumulus_client_collator::relay_chain_driven::init( + collator_key.clone(), + para_id, + overseer_handle.clone(), + ) + .await; + while let Some(request) = request_stream.next().await { + let pvd = request.persisted_validation_data().clone(); + let last_head_hash = + match ::Header::decode(&mut &pvd.parent_head.0[..]) { + Ok(header) => header.hash(), + Err(e) => { + log::error!("Could not decode the head data: {e}"); + request.complete(None); + continue + }, + }; + + // Check if we have upgraded to an Aura compatible runtime and transition if + // necessary. + if client + .runtime_api() + .has_api::>(last_head_hash) + .unwrap_or(false) + { + // Respond to this request before transitioning to Aura. + request.complete(None); + break + } + } + + // Move to Aura consensus. + let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) { + Ok(d) => d, + Err(e) => { + log::error!("Could not get Aura slot duration: {e}"); + return + }, + }; + + let proposer = Proposer::new(proposer_factory); + + let params = BasicAuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client, + relay_client: relay_chain_interface2, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + slot_duration, + relay_chain_slot_duration, + proposer, + collator_service, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: Some(request_stream), + }; + + basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) + .await + }); + + let spawner = task_manager.spawn_essential_handle(); + spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); + + Ok(()) + }, + hwbench, + ) + .await +} + /// Start an aura powered parachain node which uses the lookahead collator to support async backing. /// This node is basic in the sense that its runtime api doesn't include common contents such as /// transaction payment. Used for aura glutton. @@ -1462,7 +1651,7 @@ where para_backend: backend.clone(), relay_client: relay_chain_interface, code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(ValidationCode).map(|c| c.hash()) + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, sync_oracle, keystore, @@ -1533,6 +1722,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -1665,6 +1855,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -1743,7 +1934,8 @@ pub async fn start_contracts_rococo_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1778,6 +1970,7 @@ pub async fn start_contracts_rococo_node( collator_service, // Very limited proposal time. authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, }; let fut = basic_aura::run::< @@ -1804,10 +1997,11 @@ pub async fn start_contracts_rococo_node( fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { // Polkadot para-chains should generally use these requirements to ensure that the relay-chain // will not take longer than expected to import its blocks. - if !frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) { + if let Err(err) = frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) { log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority' find out more at:\n\ - https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware" + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\ + https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", + err ); } } diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 791ec17378a803b4ef5945082a2333a3d7df7952..19607eb7c18c2335adf8cc461cc7dc1bb6f82331 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-primitives-aura" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" +description = "Core primitives for Aura in Cumulus" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index fc7573be138357bab6757b59119e0891c37953fb..23839a10e46b3f28723f49396c442579ff159543 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -3,10 +3,12 @@ name = "cumulus-primitives-core" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" +description = "Cumulus related core primitive types and traits" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false} @@ -34,3 +36,8 @@ std = [ "sp-trie/std", "xcm/std", ] +runtime-benchmarks = [ + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index faaef09b26e174c151144e8a52bb76d8a495c285..835c9de649eada5eb1517797d6d330f9ecf012e8 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -18,7 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; @@ -78,6 +78,43 @@ impl From for &'static str { } } +/// The origin of an inbound message. +#[derive(Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, Debug)] +pub enum AggregateMessageOrigin { + /// The message came from the para-chain itself. + Here, + /// The message came from the relay-chain. + /// + /// This is used by the DMP queue. + Parent, + /// The message came from a sibling para-chain. + /// + /// This is used by the HRMP queue. + Sibling(ParaId), +} + +impl From for xcm::v3::MultiLocation { + fn from(origin: AggregateMessageOrigin) -> Self { + match origin { + AggregateMessageOrigin::Here => MultiLocation::here(), + AggregateMessageOrigin::Parent => MultiLocation::parent(), + AggregateMessageOrigin::Sibling(id) => + MultiLocation::new(1, Junction::Parachain(id.into())), + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl From for AggregateMessageOrigin { + fn from(x: u32) -> Self { + match x { + 0 => Self::Here, + 1 => Self::Parent, + p => Self::Sibling(ParaId::from(p)), + } + } +} + /// Information about an XCMP channel. pub struct ChannelInfo { /// The maximum number of messages that can be pending in the channel at once. @@ -96,7 +133,7 @@ pub struct ChannelInfo { pub trait GetChannelInfo { fn get_channel_status(id: ParaId) -> ChannelStatus; - fn get_channel_max(id: ParaId) -> Option; + fn get_channel_info(id: ParaId) -> Option; } /// Something that should be called when sending an upward message. diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 39b70f20a97ff99587a63a6de816de8ce8b15964..46b5da57f3837a0f840b516db7460f2112ec8494 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -3,11 +3,13 @@ name = "cumulus-primitives-parachain-inherent" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." +license = "Apache-2.0" [dependencies] async-trait = { version = "0.1.73", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } tracing = { version = "0.1.37", optional = true } # Substrate diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 6b0d3d4a4dc6b3b755f0a822ecb0fa1062f1fc9a..a0fea51f8db1ff43682e1cb9093d5a1b2e1fd703 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } @@ -20,6 +21,7 @@ cumulus-primitives-core = { path = "../core", default-features = false } [features] default = [ "std" ] std = [ + "codec/std", "cumulus-primitives-core/std", "sp-inherents/std", "sp-std/std", diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 9ed1e664ac217e7504d2ca6915c588b5c9c82c10..45ce6701988682ed963d13e19f56b5f7c9b08e09 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-primitives-utility" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" +description = "Helper datatypes for Cumulus" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } @@ -15,11 +17,12 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false} # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } +polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} - +pallet-xcm-benchmarks = { path = "../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } @@ -30,7 +33,10 @@ std = [ "codec/std", "cumulus-primitives-core/std", "frame-support/std", + "log/std", + "pallet-xcm-benchmarks/std", "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", "sp-io/std", "sp-runtime/std", "sp-std/std", @@ -40,8 +46,11 @@ std = [ ] runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", "frame-support/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 7d07bc329ed4cc0fdfcb08ec5281e1c7f3f378d2..03f827d7ee2f64ba1afbebe90d080590f4b29886 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -28,29 +28,13 @@ use frame_support::{ }, weights::Weight, }; -use polkadot_runtime_common::xcm_sender::ConstantPrice; +use polkadot_runtime_common::xcm_sender::PriceForMessageDelivery; use sp_runtime::{traits::Saturating, SaturatedConversion}; use sp_std::{marker::PhantomData, prelude::*}; use xcm::{latest::prelude::*, WrapVersion}; use xcm_builder::TakeRevenue; use xcm_executor::traits::{MatchesFungibles, TransactAsset, WeightTrader}; -pub trait PriceForParentDelivery { - fn price_for_parent_delivery(message: &Xcm<()>) -> MultiAssets; -} - -impl PriceForParentDelivery for () { - fn price_for_parent_delivery(_: &Xcm<()>) -> MultiAssets { - MultiAssets::new() - } -} - -impl> PriceForParentDelivery for ConstantPrice { - fn price_for_parent_delivery(_: &Xcm<()>) -> MultiAssets { - T::get() - } -} - /// Xcm router which recognises the `Parent` destination and handles it by sending the message into /// the given UMP `UpwardMessageSender` implementation. Thus this essentially adapts an /// `UpwardMessageSender` trait impl into a `SendXcm` trait impl. @@ -63,7 +47,7 @@ impl SendXcm for ParentAsUmp where T: UpwardMessageSender, W: WrapVersion, - P: PriceForParentDelivery, + P: PriceForMessageDelivery, { type Ticket = Vec; @@ -76,7 +60,7 @@ where if d.contains_parents_only(1) { // An upward message for the relay chain. let xcm = msg.take().ok_or(SendError::MissingArgument)?; - let price = P::price_for_parent_delivery(&xcm); + let price = P::price_for_delivery((), &xcm); let versioned_xcm = W::wrap_version(&d, xcm).map_err(|()| SendError::DestinationUnsupported)?; let data = versioned_xcm.encode(); @@ -118,7 +102,7 @@ struct AssetTraderRefunder { /// Important: Errors if the Trader is being called twice by 2 BuyExecution instructions /// Alternatively we could just return payment in the aforementioned case pub struct TakeFirstAssetTrader< - AccountId, + AccountId: Eq, FeeCharger: ChargeWeightInFungibles, Matcher: MatchesFungibles, ConcreteAssets: fungibles::Mutate + fungibles::Balanced, @@ -128,7 +112,7 @@ pub struct TakeFirstAssetTrader< PhantomData<(AccountId, FeeCharger, Matcher, ConcreteAssets, HandleRefund)>, ); impl< - AccountId, + AccountId: Eq, FeeCharger: ChargeWeightInFungibles, Matcher: MatchesFungibles, ConcreteAssets: fungibles::Mutate + fungibles::Balanced, @@ -257,7 +241,7 @@ impl< } impl< - AccountId, + AccountId: Eq, FeeCharger: ChargeWeightInFungibles, Matcher: MatchesFungibles, ConcreteAssets: fungibles::Mutate + fungibles::Balanced, @@ -280,7 +264,7 @@ pub struct XcmFeesTo32ByteAccount, - ReceiverAccount: frame_support::traits::Get>, + ReceiverAccount: Get>, > TakeRevenue for XcmFeesTo32ByteAccount { fn take_revenue(revenue: MultiAsset) { @@ -288,9 +272,7 @@ impl< let ok = FungiblesMutateAdapter::deposit_asset( &revenue, &(X1(AccountId32 { network: None, id: receiver.into() }).into()), - // We aren't able to track the XCM that initiated the fee deposit, so we create a - // fake message hash here - &XcmContext::with_message_id([0; 32]), + None, ) .is_ok(); @@ -542,3 +524,58 @@ mod tests { assert_eq!(trader.buy_weight(weight_to_buy, payment, &ctx), Err(XcmError::NotWithdrawable)); } } + +/// Implementation of `pallet_xcm_benchmarks::EnsureDelivery` which helps to ensure delivery to the +/// parent relay chain. Deposits existential deposit for origin (if needed). +/// Deposits estimated fee to the origin account (if needed). +/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +#[cfg(feature = "runtime-benchmarks")] +pub struct ToParentDeliveryHelper( + sp_std::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, +); + +#[cfg(feature = "runtime-benchmarks")] +impl< + XcmConfig: xcm_executor::Config, + ExistentialDeposit: Get>, + PriceForDelivery: PriceForMessageDelivery, + > pallet_xcm_benchmarks::EnsureDelivery + for ToParentDeliveryHelper +{ + fn ensure_successful_delivery( + origin_ref: &MultiLocation, + _dest: &MultiLocation, + fee_reason: xcm_executor::traits::FeeReason, + ) -> (Option, Option) { + use xcm::latest::{MAX_INSTRUCTIONS_TO_DECODE, MAX_ITEMS_IN_MULTIASSETS}; + use xcm_executor::{traits::FeeManager, FeesMode}; + + let mut fees_mode = None; + if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { + // if not waived, we need to set up accounts for paying and receiving fees + + // mint ED to origin if needed + if let Some(ed) = ExistentialDeposit::get() { + XcmConfig::AssetTransactor::deposit_asset(&ed, &origin_ref, None).unwrap(); + } + + // overestimate delivery fee + let mut max_assets: Vec = Vec::new(); + for i in 0..MAX_ITEMS_IN_MULTIASSETS { + max_assets.push((GeneralIndex(i as u128), 100u128).into()); + } + let overestimated_xcm = + vec![WithdrawAsset(max_assets.into()); MAX_INSTRUCTIONS_TO_DECODE as usize].into(); + let overestimated_fees = PriceForDelivery::price_for_delivery((), &overestimated_xcm); + + // mint overestimated fee to origin + for fee in overestimated_fees.inner() { + XcmConfig::AssetTransactor::deposit_asset(&fee, &origin_ref, None).unwrap(); + } + + // expected worst case - direct withdraw + fees_mode = Some(FeesMode { jit_withdraw: true }); + } + (fees_mode, None) + } +} diff --git a/cumulus/scripts/benchmarks.sh b/cumulus/scripts/benchmarks.sh index 29d0690592583512e7923f92b9d81d46913d04e9..7da18d9440e5fbe1febb5dd48a7b39855ee04b78 100755 --- a/cumulus/scripts/benchmarks.sh +++ b/cumulus/scripts/benchmarks.sh @@ -7,6 +7,7 @@ repeat=${3:-20} __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ${__dir}/benchmarks-ci.sh collectives collectives-polkadot target/$target $steps $repeat +${__dir}/benchmarks-ci.sh collectives collectives-westend target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-polkadot target/$target $steps $repeat @@ -17,3 +18,4 @@ ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-kusama target/$target $steps $r ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-rococo target/$target $steps $repeat ${__dir}/benchmarks-ci.sh glutton glutton-kusama target/$target $steps $repeat +${__dir}/benchmarks-ci.sh glutton glutton-westend target/$target $steps $repeat diff --git a/cumulus/scripts/bridges_common.sh b/cumulus/scripts/bridges_common.sh new file mode 100755 index 0000000000000000000000000000000000000000..8d64c5ede52a25eaa4f0332addf1b39969ac478d --- /dev/null +++ b/cumulus/scripts/bridges_common.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +function ensure_binaries() { + if [[ ! -f ~/local_bridge_testing/bin/polkadot ]]; then + echo " Required polkadot binary '~/local_bridge_testing/bin/polkadot' does not exist!" + echo " You need to build it and copy to this location!" + echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" + exit 1 + fi + if [[ ! -f ~/local_bridge_testing/bin/polkadot-parachain ]]; then + echo " Required polkadot-parachain binary '~/local_bridge_testing/bin/polkadot-parachain' does not exist!" + echo " You need to build it and copy to this location!" + echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" + exit 1 + fi +} + +function ensure_relayer() { + if [[ ! -f ~/local_bridge_testing/bin/substrate-relay ]]; then + echo " Required substrate-relay binary '~/local_bridge_testing/bin/substrate-relay' does not exist!" + echo " You need to build it and copy to this location!" + echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" + exit 1 + fi +} + +function ensure_polkadot_js_api() { + if ! which polkadot-js-api &> /dev/null; then + echo '' + echo 'Required command `polkadot-js-api` not in PATH, please, install, e.g.:' + echo "npm install -g @polkadot/api-cli@beta" + echo " or" + echo "yarn global add @polkadot/api-cli" + echo '' + exit 1 + fi + if ! which jq &> /dev/null; then + echo '' + echo 'Required command `jq` not in PATH, please, install, e.g.:' + echo "apt install -y jq" + echo '' + exit 1 + fi + generate_hex_encoded_call_data "check" "--" + local retVal=$? + if [ $retVal -ne 0 ]; then + echo "" + echo "" + echo "-------------------" + echo "Installing (nodejs) sub module: $(dirname "$0")/generate_hex_encoded_call" + pushd $(dirname "$0")/generate_hex_encoded_call + npm install + popd + fi +} + +function generate_hex_encoded_call_data() { + local type=$1 + local endpoint=$2 + local output=$3 + shift + shift + shift + echo "Input params: $@" + + node $(dirname "$0")/generate_hex_encoded_call "$type" "$endpoint" "$output" "$@" + local retVal=$? + + if [ $type != "check" ]; then + local hex_encoded_data=$(cat $output) + echo "Generated hex-encoded bytes to file '$output': $hex_encoded_data" + fi + + return $retVal +} + +function transfer_balance() { + local runtime_para_endpoint=$1 + local seed=$2 + local target_account=$3 + local amount=$4 + echo " calling transfer_balance:" + echo " runtime_para_endpoint: ${runtime_para_endpoint}" + echo " seed: ${seed}" + echo " target_account: ${target_account}" + echo " amount: ${amount}" + echo "--------------------------------------------------" + + polkadot-js-api \ + --ws "${runtime_para_endpoint}" \ + --seed "${seed?}" \ + tx.balances.transferAllowDeath \ + "${target_account}" \ + "${amount}" +} + +function send_governance_transact() { + local relay_url=$1 + local relay_chain_seed=$2 + local para_id=$3 + local hex_encoded_data=$4 + local require_weight_at_most_ref_time=$5 + local require_weight_at_most_proof_size=$6 + echo " calling send_governance_transact:" + echo " relay_url: ${relay_url}" + echo " relay_chain_seed: ${relay_chain_seed}" + echo " para_id: ${para_id}" + echo " hex_encoded_data: ${hex_encoded_data}" + echo " require_weight_at_most_ref_time: ${require_weight_at_most_ref_time}" + echo " require_weight_at_most_proof_size: ${require_weight_at_most_proof_size}" + echo " params:" + + local dest=$(jq --null-input \ + --arg para_id "$para_id" \ + '{ "V3": { "parents": 0, "interior": { "X1": { "Parachain": $para_id } } } }') + + local message=$(jq --null-input \ + --argjson hex_encoded_data $hex_encoded_data \ + --arg require_weight_at_most_ref_time "$require_weight_at_most_ref_time" \ + --arg require_weight_at_most_proof_size "$require_weight_at_most_proof_size" \ + ' + { + "V3": [ + { + "UnpaidExecution": { + "weight_limit": "Unlimited" + } + }, + { + "Transact": { + "origin_kind": "Superuser", + "require_weight_at_most": { + "ref_time": $require_weight_at_most_ref_time, + "proof_size": $require_weight_at_most_proof_size, + }, + "call": { + "encoded": $hex_encoded_data + } + } + } + ] + } + ') + + echo "" + echo " dest:" + echo "${dest}" + echo "" + echo " message:" + echo "${message}" + echo "" + echo "--------------------------------------------------" + + polkadot-js-api \ + --ws "${relay_url?}" \ + --seed "${relay_chain_seed?}" \ + --sudo \ + tx.xcmPallet.send \ + "${dest}" \ + "${message}" +} + +function open_hrmp_channels() { + local relay_url=$1 + local relay_chain_seed=$2 + local sender_para_id=$3 + local recipient_para_id=$4 + local max_capacity=$5 + local max_message_size=$6 + echo " calling open_hrmp_channels:" + echo " relay_url: ${relay_url}" + echo " relay_chain_seed: ${relay_chain_seed}" + echo " sender_para_id: ${sender_para_id}" + echo " recipient_para_id: ${recipient_para_id}" + echo " max_capacity: ${max_capacity}" + echo " max_message_size: ${max_message_size}" + echo " params:" + echo "--------------------------------------------------" + polkadot-js-api \ + --ws "${relay_url?}" \ + --seed "${relay_chain_seed?}" \ + --sudo \ + tx.hrmp.forceOpenHrmpChannel \ + ${sender_para_id} \ + ${recipient_para_id} \ + ${max_capacity} \ + ${max_message_size} +} + +function set_storage() { + local relay_url=$1 + local relay_chain_seed=$2 + local runtime_para_id=$3 + local runtime_para_endpoint=$4 + local items=$5 + echo " calling set_storage:" + echo " relay_url: ${relay_url}" + echo " relay_chain_seed: ${relay_chain_seed}" + echo " runtime_para_id: ${runtime_para_id}" + echo " runtime_para_endpoint: ${runtime_para_endpoint}" + echo " items: ${items}" + echo " params:" + + # 1. generate data for Transact (System::set_storage) + local tmp_output_file=$(mktemp) + generate_hex_encoded_call_data "set-storage" "${runtime_para_endpoint}" "${tmp_output_file}" "$items" + local hex_encoded_data=$(cat $tmp_output_file) + + # 2. trigger governance call + send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 +} + +function force_create_foreign_asset() { + local relay_url=$1 + local relay_chain_seed=$2 + local runtime_para_id=$3 + local runtime_para_endpoint=$4 + local asset_multilocation=$5 + local asset_owner_account_id=$6 + local min_balance=$7 + local is_sufficient=$8 + echo " calling force_create_foreign_asset:" + echo " relay_url: ${relay_url}" + echo " relay_chain_seed: ${relay_chain_seed}" + echo " runtime_para_id: ${runtime_para_id}" + echo " runtime_para_endpoint: ${runtime_para_endpoint}" + echo " asset_multilocation: ${asset_multilocation}" + echo " asset_owner_account_id: ${asset_owner_account_id}" + echo " min_balance: ${min_balance}" + echo " is_sufficient: ${is_sufficient}" + echo " params:" + + # 1. generate data for Transact (ForeignAssets::force_create) + local tmp_output_file=$(mktemp) + generate_hex_encoded_call_data "force-create-asset" "${runtime_para_endpoint}" "${tmp_output_file}" "$asset_multilocation" "$asset_owner_account_id" $is_sufficient $min_balance + local hex_encoded_data=$(cat $tmp_output_file) + + # 2. trigger governance call + send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 +} + +function limited_reserve_transfer_assets() { + local url=$1 + local seed=$2 + local destination=$3 + local beneficiary=$4 + local assets=$5 + local fee_asset_item=$6 + local weight_limit=$7 + echo " calling limited_reserve_transfer_assets:" + echo " url: ${url}" + echo " seed: ${seed}" + echo " destination: ${destination}" + echo " beneficiary: ${beneficiary}" + echo " assets: ${assets}" + echo " fee_asset_item: ${fee_asset_item}" + echo " weight_limit: ${weight_limit}" + echo "" + echo "--------------------------------------------------" + + polkadot-js-api \ + --ws "${url?}" \ + --seed "${seed?}" \ + tx.polkadotXcm.limitedReserveTransferAssets \ + "${destination}" \ + "${beneficiary}" \ + "${assets}" \ + "${fee_asset_item}" \ + "${weight_limit}" +} + +function claim_rewards() { + local runtime_para_endpoint=$1 + local seed=$2 + local lane_id=$3 + local bridged_chain_id=$4 + local owner=$5 + echo " calling claim_rewards:" + echo " runtime_para_endpoint: ${runtime_para_endpoint}" + echo " seed: ${seed}" + echo " lane_id: ${lane_id}" + echo " bridged_chain_id: ${bridged_chain_id}" + echo " owner: ${owner}" + echo "" + + local rewards_account_params=$(jq --null-input \ + --arg lane_id "$lane_id" \ + --arg bridged_chain_id "$bridged_chain_id" \ + --arg owner "$owner" \ + '{ + "laneId": $lane_id, + "bridgedChainId": $bridged_chain_id, + "owner": $owner + }') + + echo " rewards_account_params:" + echo "${rewards_account_params}" + echo "--------------------------------------------------" + + polkadot-js-api \ + --ws "${runtime_para_endpoint}" \ + --seed "${seed?}" \ + tx.bridgeRelayers.claimRewards \ + "${rewards_account_params}" +} \ No newline at end of file diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh new file mode 100755 index 0000000000000000000000000000000000000000..9b3bd350276ff5fbaf388cc137bd6debb0a074b3 --- /dev/null +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -0,0 +1,390 @@ +#!/bin/bash + +# import common functions +source "$(dirname "$0")"/bridges_common.sh + +# Expected sovereign accounts. +# +# Generated by: +# +# #[test] +# fn generate_sovereign_accounts() { +# use sp_core::crypto::Ss58Codec; +# use polkadot_parachain_primitives::primitives::Sibling; +# +# parameter_types! { +# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); +# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Westend), Parachain(1000)); +# } +# +# // SS58=42 +# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusParachainConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Rococo), Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# +# // SS58=42 +# println!("GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Westend)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusParachainConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Westend), Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# } +GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" +GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT="5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ" +ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" +GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT="5He2Qdztyxxa4GoagY6q1jaiLMmKy1gXS7PdZkhfj8ZG9hk5" +GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT="5GUD9X494SnhfBTNReHwhV1599McpyVrAqFY6WnTfVQVYNUM" +ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" + +# Expected sovereign accounts for rewards on BridgeHubs. +# +# Generated by: +# #[test] +# fn generate_sovereign_accounts_for_rewards() { +# use bp_messages::LaneId; +# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; +# use sp_core::crypto::Ss58Codec; +# +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( +# LaneId([0, 0, 0, 2]), +# *b"bhwd", +# RewardsAccountOwner::ThisChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( +# LaneId([0, 0, 0, 2]), +# *b"bhwd", +# RewardsAccountOwner::BridgedChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( +# LaneId([0, 0, 0, 2]), +# *b"bhro", +# RewardsAccountOwner::ThisChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( +# LaneId([0, 0, 0, 2]), +# *b"bhro", +# RewardsAccountOwner::BridgedChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# } +ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain="5EHnXaT5BhiSGP5hbdsoVGtzi2sQVgpDNToTxLYeQvKoMPEm" +ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain="5EHnXaT5BhiSGP5hbdt5EJSapXYbxEv678jyWHEUskCXcjqo" +ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain="5EHnXaT5BhiSGP5h9Rg8sgUJqoLym3iEaWUiboT8S9AT5xFh" +ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain="5EHnXaT5BhiSGP5h9RgQci1txJ2BDbp7KBRE9k8xty3BMUSi" + +LANE_ID="00000002" + +function init_ro_wnd() { + ensure_relayer + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-westend \ + --source-host localhost \ + --source-port 9942 \ + --source-version-mode Auto \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Bob +} + +function init_wnd_ro() { + ensure_relayer + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + ~/local_bridge_testing/bin/substrate-relay init-bridge westend-to-bridge-hub-rococo \ + --source-host localhost \ + --source-port 9945 \ + --source-version-mode Auto \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Bob +} + +function run_relay() { + ensure_relayer + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --rococo-host localhost \ + --rococo-port 9942 \ + --rococo-version-mode Auto \ + --bridge-hub-rococo-host localhost \ + --bridge-hub-rococo-port 8943 \ + --bridge-hub-rococo-version-mode Auto \ + --bridge-hub-rococo-signer //Charlie \ + --bridge-hub-rococo-transactions-mortality 4 \ + --westend-host localhost \ + --westend-port 9945 \ + --westend-version-mode Auto \ + --bridge-hub-westend-host localhost \ + --bridge-hub-westend-port 8945 \ + --bridge-hub-westend-version-mode Auto \ + --bridge-hub-westend-signer //Charlie \ + --bridge-hub-westend-transactions-mortality 4 \ + --lane "${LANE_ID}" +} + +case "$1" in + run-relay) + init_ro_wnd + init_wnd_ro + run_relay + ;; + init-asset-hub-rococo-local) + ensure_polkadot_js_api + # create foreign assets for native Westend token (governance call on Rococo) + force_create_foreign_asset \ + "ws://127.0.0.1:9942" \ + "//Alice" \ + 1000 \ + "ws://127.0.0.1:9910" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } }')" \ + "$GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT" \ + 10000000000 \ + true + # drip SA which holds reserves + transfer_balance \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT" \ + $((1000000000000 + 50000000000 * 20)) + # HRMP + open_hrmp_channels \ + "ws://127.0.0.1:9942" \ + "//Alice" \ + 1000 1013 4 524288 + open_hrmp_channels \ + "ws://127.0.0.1:9942" \ + "//Alice" \ + 1013 1000 4 524288 + ;; + init-bridge-hub-rococo-local) + ensure_polkadot_js_api + # SA of sibling asset hub pays for the execution + transfer_balance \ + "ws://127.0.0.1:8943" \ + "//Alice" \ + "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ + $((1000000000000 + 50000000000 * 20)) + # drip SA of lane dedicated to asset hub for paying rewards for delivery + transfer_balance \ + "ws://127.0.0.1:8943" \ + "//Alice" \ + "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain" \ + $((1000000000000 + 2000000000000)) + # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation + transfer_balance \ + "ws://127.0.0.1:8943" \ + "//Alice" \ + "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain" \ + $((1000000000000 + 2000000000000)) + ;; + init-asset-hub-westend-local) + ensure_polkadot_js_api + # create foreign assets for native Rococo token (governance call on Westend) + force_create_foreign_asset \ + "ws://127.0.0.1:9945" \ + "//Alice" \ + 1000 \ + "ws://127.0.0.1:9010" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } }')" \ + "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ + 10000000000 \ + true + # drip SA which holds reserves + transfer_balance \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ + $((1000000000000000 + 50000000000 * 20)) + # HRMP + open_hrmp_channels \ + "ws://127.0.0.1:9945" \ + "//Alice" \ + 1000 1002 4 524288 + open_hrmp_channels \ + "ws://127.0.0.1:9945" \ + "//Alice" \ + 1002 1000 4 524288 + ;; + init-bridge-hub-westend-local) + # SA of sibling asset hub pays for the execution + transfer_balance \ + "ws://127.0.0.1:8945" \ + "//Alice" \ + "$ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND" \ + $((1000000000000000 + 50000000000 * 20)) + # drip SA of lane dedicated to asset hub for paying rewards for delivery + transfer_balance \ + "ws://127.0.0.1:8945" \ + "//Alice" \ + "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain" \ + $((1000000000000000 + 2000000000000)) + # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation + transfer_balance \ + "ws://127.0.0.1:8945" \ + "//Alice" \ + "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain" \ + $((1000000000000000 + 2000000000000)) + ;; + reserve-transfer-assets-from-asset-hub-rococo-local) + ensure_polkadot_js_api + # send ROCs to Alice account on AHW + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 200000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + withdraw-reserve-assets-from-asset-hub-rococo-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedWNDs to Alice account on AHW + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 40000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + reserve-transfer-assets-from-asset-hub-westend-local) + ensure_polkadot_js_api + # send WNDs to Alice account on AHR + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 150000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + withdraw-reserve-assets-from-asset-hub-westend-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedROCs to Alice account on AHR + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 100000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + claim-rewards-bridge-hub-rococo-local) + ensure_polkadot_js_api + # bhwd -> [62, 68, 77, 64] -> 0x62687764 + claim_rewards \ + "ws://127.0.0.1:8943" \ + "//Charlie" \ + "0x${LANE_ID}" \ + "0x62687764" \ + "ThisChain" + claim_rewards \ + "ws://127.0.0.1:8943" \ + "//Charlie" \ + "0x${LANE_ID}" \ + "0x62687764" \ + "BridgedChain" + ;; + claim-rewards-bridge-hub-westend-local) + # bhro -> [62, 68, 72, 6f] -> 0x6268726f + claim_rewards \ + "ws://127.0.0.1:8945" \ + "//Charlie" \ + "0x${LANE_ID}" \ + "0x6268726f" \ + "ThisChain" + claim_rewards \ + "ws://127.0.0.1:8945" \ + "//Charlie" \ + "0x${LANE_ID}" \ + "0x6268726f" \ + "BridgedChain" + ;; + stop) + pkill -f polkadot + pkill -f parachain + ;; + import) + # to avoid trigger anything here + ;; + *) + echo "A command is require. Supported commands for: + Local (zombienet) run: + - run-relay + - init-asset-hub-rococo-local + - init-bridge-hub-rococo-local + - init-asset-hub-westend-local + - init-bridge-hub-westend-local + - reserve-transfer-assets-from-asset-hub-rococo-local + - withdraw-reserve-assets-from-asset-hub-rococo-local + - reserve-transfer-assets-from-asset-hub-westend-local + - withdraw-reserve-assets-from-asset-hub-westend-local + - claim-rewards-bridge-hub-rococo-local + - claim-rewards-bridge-hub-westend-local"; + exit 1 + ;; +esac diff --git a/cumulus/scripts/bridges_rococo_wococo.sh b/cumulus/scripts/bridges_rococo_wococo.sh deleted file mode 100755 index 1117ed6810928b39ffcbafee27b26a59447412ee..0000000000000000000000000000000000000000 --- a/cumulus/scripts/bridges_rococo_wococo.sh +++ /dev/null @@ -1,695 +0,0 @@ -#!/bin/bash - -# Address: 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY -# AccountId: [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] -ASSET_HUB_KUSAMA_ACCOUNT_SEED_FOR_LOCAL="//Alice" -# Address: 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY -# AccountId: [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] -ASSET_HUB_WOCOCO_ACCOUNT_SEED_FOR_LOCAL="5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" - -# SovereignAccount for `MultiLocation { parents: 2, interior: X2(GlobalConsensus(Rococo), Parachain(1000)) }` => 5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ -# -# use sp_core::crypto::Ss58Codec; -# println!("{}", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_ref( -# MultiLocation { parents: 2, interior: X2(GlobalConsensus(Kusama), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT="5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ" - -# Address: GegTpZJMyzkntLN7NJhRfHDk4GWukLbGSsag6PHrLSrCK4h -ASSET_HUB2_ROCOCO_1000_SOVEREIGN_ACCOUNT="scatter feed race company oxygen trip extra elbow slot bundle auto canoe" - -# Adress: 5Ge7YcbctWCP1CccugzxWDn9hFnTxvTh3bL6PNy4ubNJmp7Y / H9jCvwVWsDJkrS4gPp1QB99qr4hmbGsVyAqn3F2PPaoWyU3 -# AccountId: [202, 107, 198, 135, 15, 25, 193, 165, 172, 73, 137, 218, 115, 177, 204, 0, 5, 155, 215, 86, 208, 51, 50, 130, 190, 110, 184, 143, 124, 50, 160, 20] -ASSET_HUB_WOCOCO_ACCOUNT_ADDRESS_FOR_ROCOCO="5Ge7YcbctWCP1CccugzxWDn9hFnTxvTh3bL6PNy4ubNJmp7Y" -ASSET_HUB_WOCOCO_ACCOUNT_SEED_FOR_WOCOCO="tone spirit magnet sunset cannon poverty forget lock river east blouse random" - -function address_to_account_id_bytes() { - local address=$1 - local output=$2 - echo "address_to_account_id_bytes - address: $address, output: $output" - if [ $address == "$ASSET_HUB_WOCOCO_ACCOUNT_SEED_FOR_LOCAL" ]; then - jq --null-input '[212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125]' > $output - elif [ $address == "$ASSET_HUB_WOCOCO_ACCOUNT_ADDRESS_FOR_ROCOCO" ]; then - jq --null-input '[202, 107, 198, 135, 15, 25, 193, 165, 172, 73, 137, 218, 115, 177, 204, 0, 5, 155, 215, 86, 208, 51, 50, 130, 190, 110, 184, 143, 124, 50, 160, 20]' > $output - else - echo -n "Sorry, unknown address: $address - please, add bytes here or function for that!" - exit 1 - fi -} - -function ensure_binaries() { - if [[ ! -f ~/local_bridge_testing/bin/polkadot ]]; then - echo " Required polkadot binary '~/local_bridge_testing/bin/polkadot' does not exist!" - echo " You need to build it and copy to this location!" - echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" - exit 1 - fi - if [[ ! -f ~/local_bridge_testing/bin/polkadot-parachain ]]; then - echo " Required polkadot-parachain binary '~/local_bridge_testing/bin/polkadot-parachain' does not exist!" - echo " You need to build it and copy to this location!" - echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" - exit 1 - fi -} - -function ensure_relayer() { - if [[ ! -f ~/local_bridge_testing/bin/substrate-relay ]]; then - echo " Required substrate-relay binary '~/local_bridge_testing/bin/substrate-relay' does not exist!" - echo " You need to build it and copy to this location!" - echo " Please, check ./parachains/runtimes/bridge-hubs/README.md (Prepare/Build/Deploy)" - exit 1 - fi -} - -function ensure_polkadot_js_api() { - if ! which polkadot-js-api &> /dev/null; then - echo '' - echo 'Required command `polkadot-js-api` not in PATH, please, install, e.g.:' - echo "npm install -g @polkadot/api-cli@beta" - echo " or" - echo "yarn global add @polkadot/api-cli" - echo '' - exit 1 - fi - if ! which jq &> /dev/null; then - echo '' - echo 'Required command `jq` not in PATH, please, install, e.g.:' - echo "apt install -y jq" - echo '' - exit 1 - fi - generate_hex_encoded_call_data "check" "--" - local retVal=$? - if [ $retVal -ne 0 ]; then - echo "" - echo "" - echo "-------------------" - echo "Installing (nodejs) sub module: ./scripts/generate_hex_encoded_call" - pushd ./scripts/generate_hex_encoded_call - npm install - popd - fi -} - -function generate_hex_encoded_call_data() { - local type=$1 - local endpoint=$2 - local output=$3 - shift - shift - shift - echo "Input params: $@" - - node ./scripts/generate_hex_encoded_call "$type" "$endpoint" "$output" "$@" - local retVal=$? - - if [ $type != "check" ]; then - local hex_encoded_data=$(cat $output) - echo "Generated hex-encoded bytes to file '$output': $hex_encoded_data" - fi - - return $retVal -} - -function transfer_balance() { - local runtime_para_endpoint=$1 - local seed=$2 - local target_account=$3 - local amount=$4 - echo " calling transfer_balance:" - echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " seed: ${seed}" - echo " target_account: ${target_account}" - echo " amount: ${amount}" - echo "--------------------------------------------------" - - polkadot-js-api \ - --ws "${runtime_para_endpoint}" \ - --seed "${seed?}" \ - tx.balances.transfer \ - "${target_account}" \ - "${amount}" -} - -function send_governance_transact() { - local relay_url=$1 - local relay_chain_seed=$2 - local para_id=$3 - local hex_encoded_data=$4 - local require_weight_at_most_ref_time=$5 - local require_weight_at_most_proof_size=$6 - echo " calling send_governance_transact:" - echo " relay_url: ${relay_url}" - echo " relay_chain_seed: ${relay_chain_seed}" - echo " para_id: ${para_id}" - echo " hex_encoded_data: ${hex_encoded_data}" - echo " require_weight_at_most_ref_time: ${require_weight_at_most_ref_time}" - echo " require_weight_at_most_proof_size: ${require_weight_at_most_proof_size}" - echo " params:" - - local dest=$(jq --null-input \ - --arg para_id "$para_id" \ - '{ "V3": { "parents": 0, "interior": { "X1": { "Parachain": $para_id } } } }') - - local message=$(jq --null-input \ - --argjson hex_encoded_data $hex_encoded_data \ - --arg require_weight_at_most_ref_time "$require_weight_at_most_ref_time" \ - --arg require_weight_at_most_proof_size "$require_weight_at_most_proof_size" \ - ' - { - "V3": [ - { - "UnpaidExecution": { - "weight_limit": "Unlimited" - } - }, - { - "Transact": { - "origin_kind": "Superuser", - "require_weight_at_most": { - "ref_time": $require_weight_at_most_ref_time, - "proof_size": $require_weight_at_most_proof_size, - }, - "call": { - "encoded": $hex_encoded_data - } - } - } - ] - } - ') - - echo "" - echo " dest:" - echo "${dest}" - echo "" - echo " message:" - echo "${message}" - echo "" - echo "--------------------------------------------------" - - polkadot-js-api \ - --ws "${relay_url?}" \ - --seed "${relay_chain_seed?}" \ - --sudo \ - tx.xcmPallet.send \ - "${dest}" \ - "${message}" -} - -function allow_assets_transfer_send() { - local relay_url=$1 - local relay_chain_seed=$2 - local runtime_para_id=$3 - local runtime_para_endpoint=$4 - local bridge_hub_para_id=$5 - local bridged_para_network=$6 - local bridged_para_para_id=$7 - echo " calling allow_assets_transfer_send:" - echo " relay_url: ${relay_url}" - echo " relay_chain_seed: ${relay_chain_seed}" - echo " runtime_para_id: ${runtime_para_id}" - echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " bridge_hub_para_id: ${bridge_hub_para_id}" - echo " bridged_para_network: ${bridged_para_network}" - echo " bridged_para_para_id: ${bridged_para_para_id}" - echo " params:" - - # 1. generate data for Transact (add_exporter_config) - local bridge_config=$(jq --null-input \ - --arg bridge_hub_para_id "$bridge_hub_para_id" \ - --arg bridged_para_network "$bridged_para_network" \ - --arg bridged_para_para_id "$bridged_para_para_id" \ - ' - { - "bridgeLocation": { - "parents": 1, - "interior": { - "X1": { "Parachain": $bridge_hub_para_id } - } - }, - "allowedTargetLocation": { - "parents": 2, - "interior": { - "X2": [ - { - "GlobalConsensus": $bridged_para_network, - }, - { - "Parachain": $bridged_para_para_id - } - ] - } - }, - "maxTargetLocationFee": { - "id": { - "Concrete": { - "parents": 1, - "interior": "Here" - } - }, - "fun": { - "Fungible": 50000000000 - } - } - } - ' - ) - local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "add-exporter-config" "${runtime_para_endpoint}" "${tmp_output_file}" $bridged_para_network "$bridge_config" - local hex_encoded_data=$(cat $tmp_output_file) - - send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 -} - -function force_create_foreign_asset() { - local relay_url=$1 - local relay_chain_seed=$2 - local runtime_para_id=$3 - local runtime_para_endpoint=$4 - local global_consensus=$5 - local asset_owner_account_id=$6 - echo " calling force_create_foreign_asset:" - echo " relay_url: ${relay_url}" - echo " relay_chain_seed: ${relay_chain_seed}" - echo " runtime_para_id: ${runtime_para_id}" - echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " global_consensus: ${global_consensus}" - echo " asset_owner_account_id: ${asset_owner_account_id}" - echo " params:" - - # 1. generate data for Transact (ForeignAssets::force_create) - local asset_id=$(jq --null-input \ - --arg global_consensus "$global_consensus" \ - ' - { - "parents": 2, - "interior": { - "X1": { - "GlobalConsensus": $global_consensus, - } - } - } - ' - ) - local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "force-create-asset" "${runtime_para_endpoint}" "${tmp_output_file}" "$asset_id" "$asset_owner_account_id" false "1000" - local hex_encoded_data=$(cat $tmp_output_file) - - send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 -} - -function allow_assets_transfer_receive() { - local relay_url=$1 - local relay_chain_seed=$2 - local runtime_para_id=$3 - local runtime_para_endpoint=$4 - local bridge_hub_para_id=$5 - local bridged_network=$6 - local bridged_para_id=$7 - echo " calling allow_assets_transfer_receive:" - echo " relay_url: ${relay_url}" - echo " relay_chain_seed: ${relay_chain_seed}" - echo " runtime_para_id: ${runtime_para_id}" - echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " bridge_hub_para_id: ${bridge_hub_para_id}" - echo " bridged_network: ${bridged_network}" - echo " bridged_para_id: ${bridged_para_id}" - echo " params:" - - # 1. generate data for Transact (add_universal_alias) - local location=$(jq --null-input \ - --arg bridge_hub_para_id "$bridge_hub_para_id" \ - '{ "V3": { "parents": 1, "interior": { "X1": { "Parachain": $bridge_hub_para_id } } } }') - - local junction=$(jq --null-input \ - --arg bridged_network "$bridged_network" \ - '{ "GlobalConsensus": $bridged_network } ') - - local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "add-universal-alias" "${runtime_para_endpoint}" "${tmp_output_file}" "$location" "$junction" - local hex_encoded_data=$(cat $tmp_output_file) - - send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 - - # 2. generate data for Transact (add_reserve_location) - local reserve_location=$(jq --null-input \ - --arg bridged_network "$bridged_network" \ - --arg bridged_para_id "$bridged_para_id" \ - '{ "V3": { - "parents": 2, - "interior": { - "X2": [ - { - "GlobalConsensus": $bridged_network, - }, - { - "Parachain": $bridged_para_id - } - ] - } - } }') - - local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "add-reserve-location" "${runtime_para_endpoint}" "${tmp_output_file}" "$reserve_location" - local hex_encoded_data=$(cat $tmp_output_file) - - send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 -} - -function remove_assets_transfer_send() { - local relay_url=$1 - local relay_chain_seed=$2 - local runtime_para_id=$3 - local runtime_para_endpoint=$4 - local bridged_network=$5 - echo " calling remove_assets_transfer_send:" - echo " relay_url: ${relay_url}" - echo " relay_chain_seed: ${relay_chain_seed}" - echo " runtime_para_id: ${runtime_para_id}" - echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " bridged_network: ${bridged_network}" - echo " params:" - - local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "remove-exporter-config" "${runtime_para_endpoint}" "${tmp_output_file}" $bridged_network - local hex_encoded_data=$(cat $tmp_output_file) - - send_governance_transact "${relay_url}" "${relay_chain_seed}" "${runtime_para_id}" "${hex_encoded_data}" 200000000 12000 -} - -# TODO: we need to fill sovereign account for bridge-hub, because, small ammouts does not match ExistentialDeposit, so no reserve pass -# SA for BH: MultiLocation { parents: 1, interior: X1(Parachain(1013)) } - 5Eg2fntRRwLinojmk3sh5xscp7F3S6Zzm5oDVtoLTALKiypR on Kusama Asset Hub - -function transfer_asset_via_bridge() { - local url=$1 - local seed=$2 - local target_account=$3 - local target_global_consensus=$4 - echo " calling transfer_asset_via_bridge:" - echo " url: ${url}" - echo " seed: ${seed}" - echo " target_account: ${target_account}" - echo " target_global_consensus: ${target_global_consensus}" - echo " params:" - - local assets=$(jq --null-input \ - ' - { - "V3": [ - { - "id": { - "Concrete": { - "parents": 1, - "interior": "Here" - } - }, - "fun": { - "Fungible": 100000000 - } - } - ] - } - ' - ) - - local tmp_output_file=$(mktemp) - address_to_account_id_bytes "$target_account" "${tmp_output_file}" - local hex_encoded_data=$(cat $tmp_output_file) - - local destination=$(jq --null-input \ - --arg target_global_consensus "$target_global_consensus" \ - --argjson hex_encoded_data "$hex_encoded_data" \ - ' - { - "V3": { - "parents": 2, - "interior": { - "X3": [ - { - "GlobalConsensus": $target_global_consensus - }, - { - "Parachain": 1000 - }, - { - "AccountId32": { - "id": $hex_encoded_data - } - } - ] - } - } - } - ' - ) - - echo "" - echo " assets:" - echo "${assets}" - echo "" - echo " destination:" - echo "${destination}" - echo "" - echo "--------------------------------------------------" - - polkadot-js-api \ - --ws "${url?}" \ - --seed "${seed?}" \ - tx.bridgeTransfer.transferAssetViaBridge \ - "${assets}" \ - "${destination}" -} - -function ping_via_bridge() { - local url=$1 - local seed=$2 - local target_account=$3 - local target_global_consensus=$4 - echo " calling ping_via_bridge:" - echo " url: ${url}" - echo " seed: ${seed}" - echo " target_account: ${target_account}" - echo " target_global_consensus: ${target_global_consensus}" - echo " params:" - - local tmp_output_file=$(mktemp) - address_to_account_id_bytes "$target_account" "${tmp_output_file}" - local hex_encoded_data=$(cat $tmp_output_file) - - local destination=$(jq --null-input \ - --arg target_global_consensus "$target_global_consensus" \ - --argjson hex_encoded_data "$hex_encoded_data" \ - ' - { - "V3": { - "parents": 2, - "interior": { - "X3": [ - { - "GlobalConsensus": $target_global_consensus - }, - { - "Parachain": 1000 - }, - { - "AccountId32": { - "id": $hex_encoded_data - } - } - ] - } - } - } - ' - ) - - echo "" - echo " destination:" - echo "${destination}" - echo "" - echo "--------------------------------------------------" - - polkadot-js-api \ - --ws "${url?}" \ - --seed "${seed?}" \ - tx.bridgeTransfer.pingViaBridge \ - "${destination}" -} - -function init_ro_wo() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-wococo \ - --source-host localhost \ - --source-port 9942 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function init_wo_ro() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge wococo-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 9945 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function run_relay() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-wococo \ - --rococo-host localhost \ - --rococo-port 9942 \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host localhost \ - --bridge-hub-rococo-port 8943 \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer //Charlie \ - --wococo-headers-to-bridge-hub-rococo-signer //Bob \ - --wococo-parachains-to-bridge-hub-rococo-signer //Bob \ - --bridge-hub-rococo-transactions-mortality 4 \ - --wococo-host localhost \ - --wococo-port 9945 \ - --wococo-version-mode Auto \ - --bridge-hub-wococo-host localhost \ - --bridge-hub-wococo-port 8945 \ - --bridge-hub-wococo-version-mode Auto \ - --bridge-hub-wococo-signer //Charlie \ - --rococo-headers-to-bridge-hub-wococo-signer //Bob \ - --rococo-parachains-to-bridge-hub-wococo-signer //Bob \ - --bridge-hub-wococo-transactions-mortality 4 \ - --lane 00000001 -} - -case "$1" in - run-relay) - init_ro_wo - init_wo_ro - run_relay - ;; - allow-transfers-local) - # this allows send transfers on asset hub kusama local (by governance-like) - ./$0 "allow-transfer-on-asset-hub-kusama-local" - # this allows receive transfers on asset hub westend local (by governance-like) - ./$0 "allow-transfer-on-asset-hub-westend-local" - ;; - allow-transfer-on-asset-hub-kusama-local) - ensure_polkadot_js_api - allow_assets_transfer_send \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9910" \ - 1013 \ - "Wococo" 1000 - ;; - allow-transfer-on-asset-hub-westend-local) - ensure_polkadot_js_api - allow_assets_transfer_receive \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - 1014 \ - "Rococo" \ - 1000 - transfer_balance \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) # ExistentialDeposit + maxTargetLocationFee * 20 - # create foreign assets for native Kusama token (yes, Kusama, because we are using Kusama Asset Hub runtime on rococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "Kusama" \ - "$ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" - ;; - remove-assets-transfer-from-asset-hub-kusama-local) - ensure_polkadot_js_api - remove_assets_transfer_send \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9910" \ - "Wococo" - ;; - transfer-asset-from-asset-hub-kusama-local) - ensure_polkadot_js_api - transfer_asset_via_bridge \ - "ws://127.0.0.1:9910" \ - "$ASSET_HUB_KUSAMA_ACCOUNT_SEED_FOR_LOCAL" \ - "$ASSET_HUB_WOCOCO_ACCOUNT_SEED_FOR_LOCAL" \ - "Wococo" - ;; - ping-via-bridge-from-asset-hub-kusama-local) - ensure_polkadot_js_api - ping_via_bridge \ - "ws://127.0.0.1:9910" \ - "$ASSET_HUB_KUSAMA_ACCOUNT_SEED_FOR_LOCAL" \ - "$ASSET_HUB_WOCOCO_ACCOUNT_SEED_FOR_LOCAL" \ - "Wococo" - ;; - transfer-asset-from-asset-hub-rococo) - ensure_polkadot_js_api - transfer_asset_via_bridge \ - "wss://ws-rococo-rockmine2-collator-node-0.parity-testnet.parity.io" \ - "$ASSET_HUB2_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - "$ASSET_HUB_WOCOCO_ACCOUNT_ADDRESS_FOR_ROCOCO" \ - "Wococo" - ;; - ping-via-bridge-from-asset-hub-rococo) - ensure_polkadot_js_api - ping_via_bridge \ - "wss://ws-rococo-rockmine2-collator-node-0.parity-testnet.parity.io" \ - "${ASSET_HUB2_ROCOCO_1000_SOVEREIGN_ACCOUNT}" \ - "$ASSET_HUB_WOCOCO_ACCOUNT_ADDRESS_FOR_ROCOCO" \ - "Wococo" - ;; - drip) - transfer_balance \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - ;; - stop) - pkill -f polkadot - pkill -f parachain - ;; - import) - # to avoid trigger anything here - ;; - *) - echo "A command is require. Supported commands for: - Local (zombienet) run: - - run-relay - - allow-transfers-local - - allow-transfer-on-asset-hub-kusama-local - - allow-transfer-on-asset-hub-westend-local - - remove-assets-transfer-from-asset-hub-kusama-local - - transfer-asset-from-asset-hub-kusama-local - - ping-via-bridge-from-asset-hub-kusama-local - Live Rococo/Wococo run: - - transfer-asset-from-asset-hub-rococo - - ping-via-bridge-from-asset-hub-rococo"; - exit 1 - ;; -esac diff --git a/cumulus/scripts/generate_hex_encoded_call/index.js b/cumulus/scripts/generate_hex_encoded_call/index.js index 25e094df9053a8e81ad8c9a4147038f42f2de2ba..09f0e6aaf619a44642bfa4f065f3bf69f0bb7a33 100644 --- a/cumulus/scripts/generate_hex_encoded_call/index.js +++ b/cumulus/scripts/generate_hex_encoded_call/index.js @@ -106,6 +106,20 @@ function forceCreateAsset(endpoint, outputFile, assetId, assetOwnerAccountId, is }); } +function setStorage(endpoint, outputFile, items) { + console.log(`Generating setStorage from RPC endpoint: ${endpoint} to outputFile: ${outputFile}, items: ${items}`); + connect(endpoint) + .then((api) => { + const call = api.tx.system.setStorage(JSON.parse(items)); + writeHexEncodedBytesToOutput(call.method, outputFile); + exit(0); + }) + .catch((e) => { + console.error(e); + exit(1); + }); +} + if (!process.argv[2] || !process.argv[3]) { console.log("usage: node ./script/generate_hex_encoded_call "); exit(1); @@ -140,6 +154,9 @@ switch (type) { case 'force-create-asset': forceCreateAsset(rpcEnpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); break; + case 'set-storage': + setStorage(rpcEnpoint, output, inputArgs[0]); + break; case 'check': console.log(`Checking nodejs installation, if you see this everything is ready!`); break; diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 290cfd7e4d899a84dc3d4392f4c2c1e6ae671b18..b760b796ec9a8cdd0ffe9c7f6436e0df6effd53c 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -37,3 +37,15 @@ cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } + +[features] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-test-service/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 2d930d1be59774daf6ab1cd97ef62be88bf1bfe2..929f9f5f48c4d8d97526f97d8ac59cea08797aa8 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::{Backend, Client}; +use crate::Client; use cumulus_primitives_core::{ParachainBlockData, PersistedValidationData}; use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDENTIFIER}; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GetLastTimestamp, Hash, Header}; use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; use sp_api::ProvideRuntimeApi; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -37,7 +37,7 @@ pub trait InitBlockBuilder { &self, validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, - ) -> sc_block_builder::BlockBuilder; + ) -> sc_block_builder::BlockBuilder; /// Init a specific block builder at a specific block that works for the test runtime. /// @@ -48,7 +48,7 @@ pub trait InitBlockBuilder { at: Hash, validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, - ) -> sc_block_builder::BlockBuilder; + ) -> sc_block_builder::BlockBuilder; /// Init a specific block builder that works for the test runtime. /// @@ -61,7 +61,7 @@ pub trait InitBlockBuilder { validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, - ) -> sc_block_builder::BlockBuilder; + ) -> sc_block_builder::BlockBuilder; } fn init_block_builder( @@ -70,9 +70,13 @@ fn init_block_builder( validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, -) -> BlockBuilder<'_, Block, Client, Backend> { - let mut block_builder = client - .new_block_at(at, Default::default(), true) +) -> BlockBuilder<'_, Block, Client> { + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(at) + .fetch_parent_block_number(client) + .unwrap() + .enable_proof_recording() + .build() .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); @@ -118,7 +122,7 @@ impl InitBlockBuilder for Client { &self, validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilder { + ) -> BlockBuilder { let chain_info = self.chain_info(); self.init_block_builder_at(chain_info.best_hash, validation_data, relay_sproof_builder) } @@ -128,7 +132,7 @@ impl InitBlockBuilder for Client { at: Hash, validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilder { + ) -> BlockBuilder { let last_timestamp = self.runtime_api().get_last_timestamp(at).expect("Get last timestamp"); let timestamp = last_timestamp + cumulus_test_runtime::MinimumPeriod::get(); @@ -142,7 +146,7 @@ impl InitBlockBuilder for Client { validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, - ) -> sc_block_builder::BlockBuilder { + ) -> sc_block_builder::BlockBuilder { init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) } } @@ -155,7 +159,7 @@ pub trait BuildParachainBlockData { fn build_parachain_block(self, parent_state_root: Hash) -> ParachainBlockData; } -impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, Client, Backend> { +impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, Client> { fn build_parachain_block(self, parent_state_root: Hash) -> ParachainBlockData { let built_block = self.build().expect("Builds the block"); diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 61249bdb066ad06a0dfbecfa09ac046aca2b0889..a3c79158f492b5671109e6f6d960c840c634a2c6 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,13 +19,13 @@ mod block_builder; use codec::{Decode, Encode}; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, RuntimeGenesisConfig, Signature, - SignedExtra, SignedPayload, UncheckedExtrinsic, VERSION, + Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, + UncheckedExtrinsic, VERSION, }; use sc_executor::HeapAllocStrategy; use sc_executor_common::runtime_blob::RuntimeBlob; use sp_blockchain::HeaderBackend; -use sp_core::{sr25519, Pair}; +use sp_core::Pair; use sp_io::TestExternalities; use sp_runtime::{generic::Era, BuildStorage, SaturatedConversion}; @@ -84,16 +84,12 @@ pub struct GenesisParameters { impl substrate_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> Storage { - if self.endowed_accounts.is_empty() { - genesis_config().build_storage().unwrap() - } else { - cumulus_test_service::testnet_genesis( - cumulus_test_service::get_account_id_from_seed::("Alice"), - self.endowed_accounts.clone(), - ) - .build_storage() - .unwrap() - } + cumulus_test_service::chain_spec::get_chain_spec_with_extra_endowed( + None, + self.endowed_accounts.clone(), + ) + .build_storage() + .expect("Builds test runtime genesis storage") } } @@ -126,10 +122,6 @@ impl DefaultTestClientBuilderExt for TestClientBuilder { } } -fn genesis_config() -> RuntimeGenesisConfig { - cumulus_test_service::testnet_genesis_with_default_endowed(Default::default()) -} - /// Create an unsigned extrinsic from a runtime call. pub fn generate_unsigned(function: impl Into) -> UncheckedExtrinsic { UncheckedExtrinsic::new_unsigned(function.into()) diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e044b92f7c4af4d297c8996c404a479e2d73ec8b..b24ac30849562409ead63a59e7514ec1436d55ac 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -3,6 +3,8 @@ name = "cumulus-test-relay-sproof-builder" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" +description = "Mocked relay state proof builder for testing Cumulus." [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index dbfe9f46b1bd2e1dd9c57a79836b5a3065b03d25..3ea51c1973f4366f666eb4032ca9ca0d2555ea91 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../substrate/frame/executive", default-features = false} @@ -15,6 +15,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = frame-system = { path = "../../../substrate/frame/system", default-features = false} frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false} pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false} @@ -22,6 +23,7 @@ pallet-transaction-payment = { path = "../../../substrate/frame/transaction-paym sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} @@ -50,6 +52,7 @@ std = [ "frame-system/std", "pallet-balances/std", "pallet-glutton/std", + "pallet-message-queue/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment/std", @@ -57,6 +60,7 @@ std = [ "sp-api/std", "sp-block-builder/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index ccf624c0ffa4ef85307fcd3c8a0ff24621b5bf5c..19fd6d5f02dbf0ddda7c1fa9f452297d81402287 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -47,6 +47,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstU8, Randomness}, weights::{ @@ -76,10 +77,6 @@ impl_opaque_keys! { pub struct SessionKeys {} } -/// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that -/// [`OnRuntimeUpgrade`] works as expected. -pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; - /// The para-id used in this runtime. pub const PARACHAIN_ID: u32 = 100; @@ -250,6 +247,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -277,11 +275,13 @@ impl pallet_glutton::Config for Runtime { } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type SelfParaId = ParachainId; type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type OutboundXcmpMessageSource = (); - type DmpMessageHandler = (); + // Ignore all DMP messages by enqueueing them into `()`: + type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; type ReservedDmpWeight = (); type XcmpMessageHandler = (); type ReservedXcmpWeight = (); @@ -290,6 +290,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { } parameter_types! { + // will be set by test_pallet during genesis init pub storage ParachainId: cumulus_primitives_core::ParaId = PARACHAIN_ID.into(); } @@ -364,7 +365,10 @@ pub struct TestOnRuntimeUpgrade; impl OnRuntimeUpgrade for TestOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { - assert_eq!(sp_io::storage::get(TEST_RUNTIME_UPGRADE_KEY), Some(vec![1, 2, 3, 4].into())); + assert_eq!( + sp_io::storage::get(test_pallet::TEST_RUNTIME_UPGRADE_KEY), + Some(vec![1, 2, 3, 4].into()) + ); Weight::from_parts(1, 0) } } @@ -470,6 +474,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 0af23797dad6dd13561584f9d1d608f23f1a6bc2..5d11b7f490c68351ea0612e809872a0f217681d3 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -17,8 +17,13 @@ /// A special pallet that exposes dispatchables that are only useful for testing. pub use pallet::*; +/// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that +/// [`OnRuntimeUpgrade`] works as expected. +pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; + #[frame_support::pallet(dev_mode)] pub mod pallet { + use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -44,5 +49,47 @@ pub mod pallet { ); Ok(()) } + + /// A dispatchable that first reads two values from two different child tries, asserts they + /// are the expected values (if the values exist in the state) and then writes two different + /// values to these child tries. + #[pallet::weight(0)] + pub fn read_and_write_child_tries(_: OriginFor) -> DispatchResult { + let key = &b"hello"[..]; + let first_trie = &b"first"[..]; + let second_trie = &b"second"[..]; + let first_value = "world1".encode(); + let second_value = "world2".encode(); + + if let Some(res) = sp_io::default_child_storage::get(first_trie, key) { + assert_eq!(first_value, res); + } + if let Some(res) = sp_io::default_child_storage::get(second_trie, key) { + assert_eq!(second_value, res); + } + + sp_io::default_child_storage::set(first_trie, key, &first_value); + sp_io::default_child_storage::set(second_trie, key, &second_value); + + Ok(()) + } + } + + #[derive(frame_support::DefaultNoBound)] + #[pallet::genesis_config] + pub struct GenesisConfig { + pub self_para_id: Option, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); + self.self_para_id.map(|para_id| { + crate::ParachainId::set(¶_id); + }); + } } } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index c996a01a12ed13003939f38b106a0a2f56c57716..ed8c8748cc803ea5407a9128040146ce23667431 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -17,6 +17,7 @@ criterion = { version = "0.5.1", features = [ "async_tokio" ] } jsonrpsee = { version = "0.16.2", features = ["server"] } rand = "0.8.5" serde = { version = "1.0.188", features = ["derive"] } +serde_json = "1.0.108" tokio = { version = "1.32.0", features = ["macros"] } tracing = "0.1.37" url = "2.4.0" @@ -98,6 +99,8 @@ substrate-test-utils = { path = "../../../substrate/test-utils" } [features] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index b79598b1530246670de717b2b9479d26bc691361..254e03b9263a8f30286c50930d2faaa2cb27d4a3 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -17,12 +17,12 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::UsageProvider; use core::time::Duration; use cumulus_primitives_core::ParaId; -use sc_block_builder::{BlockBuilderProvider, RecordProof}; use sp_api::{Core, ProvideRuntimeApi}; use sp_keyring::Sr25519Keyring::Alice; @@ -52,8 +52,12 @@ fn benchmark_block_import(c: &mut Criterion) { utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .fetch_parent_block_number(&*client) + .unwrap() + .build() + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index b49db9f449e971946c9b13086c7d9e89b544e64a..aeaf0722e724eaea8f7b5829809a79e2d8f2e71a 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -17,7 +17,6 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; -use sc_client_api::UsageProvider; use sp_api::{Core, ProvideRuntimeApi}; use sp_arithmetic::{ traits::{One, Zero}, @@ -27,7 +26,7 @@ use sp_arithmetic::{ use core::time::Duration; use cumulus_primitives_core::ParaId; -use sc_block_builder::{BlockBuilderProvider, RecordProof}; +use sc_block_builder::BlockBuilderBuilder; use sp_keyring::Sr25519Keyring::Alice; use cumulus_test_service::bench_utils as utils; @@ -61,10 +60,14 @@ fn benchmark_block_import(c: &mut Criterion) { runtime.block_on(utils::import_block(&client, &block, false)); // Build the block we will use for benchmarking - let parent_hash = client.usage_info().chain.best_hash; + let parent_hash = client.chain_info().best_hash; let parent_header = client.header(parent_hash).expect("Just fetched this hash.").unwrap(); - let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .fetch_parent_block_number(&*client) + .unwrap() + .build() + .unwrap(); block_builder .push(utils::extrinsic_set_validation_data(parent_header.clone()).clone()) .unwrap(); diff --git a/cumulus/test/service/benches/block_production.rs b/cumulus/test/service/benches/block_production.rs index 1b868d73630280297b87701d871c02ad08acab7b..3b0db578041f04a582b7acae81b553d52a24df9e 100644 --- a/cumulus/test/service/benches/block_production.rs +++ b/cumulus/test/service/benches/block_production.rs @@ -21,7 +21,7 @@ use sc_client_api::UsageProvider; use core::time::Duration; use cumulus_primitives_core::ParaId; -use sc_block_builder::{BlockBuilderProvider, RecordProof}; +use sc_block_builder::BlockBuilderBuilder; use sp_keyring::Sr25519Keyring::Alice; @@ -50,7 +50,11 @@ fn benchmark_block_production(c: &mut Criterion) { let parent_header = client.header(parent_hash).expect("Just fetched this hash.").unwrap(); let set_validation_data_extrinsic = utils::extrinsic_set_validation_data(parent_header); - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); block_builder.push(utils::extrinsic_set_time(&client)).unwrap(); block_builder.push(set_validation_data_extrinsic).unwrap(); let built_block = block_builder.build().unwrap(); @@ -66,7 +70,7 @@ fn benchmark_block_production(c: &mut Criterion) { group.measurement_time(Duration::from_secs(120)); group.throughput(Throughput::Elements(max_transfer_count as u64)); - let best_hash = client.chain_info().best_hash; + let chain = client.chain_info(); group.bench_function( format!("(proof = true, transfers = {}) block production", max_transfer_count), @@ -74,9 +78,13 @@ fn benchmark_block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes) + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .enable_proof_recording() + .build() .unwrap(); + for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -93,9 +101,12 @@ fn benchmark_block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No) + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() .unwrap(); + for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/cumulus/test/service/benches/block_production_glutton.rs b/cumulus/test/service/benches/block_production_glutton.rs index 92a368c88c8d81189c48ce8a1360b54110555548..6ab2c0e56bd188f9b7fa9456db8efc2c5a88adab 100644 --- a/cumulus/test/service/benches/block_production_glutton.rs +++ b/cumulus/test/service/benches/block_production_glutton.rs @@ -17,7 +17,6 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; -use sc_client_api::UsageProvider; use sp_arithmetic::{ traits::{One, Zero}, FixedPointNumber, @@ -26,7 +25,7 @@ use sp_arithmetic::{ use core::time::Duration; use cumulus_primitives_core::ParaId; -use sc_block_builder::{BlockBuilderProvider, RecordProof}; +use sc_block_builder::BlockBuilderBuilder; use sp_keyring::Sr25519Keyring::Alice; @@ -60,11 +59,11 @@ fn benchmark_block_production_compute(c: &mut Criterion) { runtime.block_on(utils::import_block(&client, &block, false)); initialize_glutton_pallet = false; - let parent_hash = client.usage_info().chain.best_hash; - let parent_header = client.header(parent_hash).expect("Just fetched this hash.").unwrap(); + let best_hash = client.chain_info().best_hash; + let best_number = client.chain_info().best_number; + let parent_header = client.header(best_hash).expect("Just fetched this hash.").unwrap(); let set_validation_data_extrinsic = utils::extrinsic_set_validation_data(parent_header); let set_time_extrinsic = utils::extrinsic_set_time(&client); - let best_hash = client.chain_info().best_hash; group.bench_function( format!( @@ -76,8 +75,11 @@ fn benchmark_block_production_compute(c: &mut Criterion) { b.iter_batched( || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes) + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(best_hash) + .with_parent_block_number(best_number) + .enable_proof_recording() + .build() .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); @@ -98,8 +100,10 @@ fn benchmark_block_production_compute(c: &mut Criterion) { b.iter_batched( || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No) + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(best_hash) + .with_parent_block_number(best_number) + .build() .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index f3b4d0b12144286ff24fc94ee9ba8169c33fddd0..11a7c4376d4c81156c067566dd20e2f8172b6529 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -27,7 +27,7 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{BalancesCall, Block, Header, UncheckedExtrinsic}; use cumulus_test_service::bench_utils as utils; use polkadot_primitives::HeadData; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::UsageProvider; use sc_executor_common::wasm_runtime::WasmModule; @@ -46,7 +46,11 @@ fn create_extrinsics( dst_accounts: &[sr25519::Pair], ) -> (usize, Vec) { // Add as many tranfer extrinsics as possible into a single block. - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); let mut max_transfer_count = 0; let mut extrinsics = Vec::new(); diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 172c9e504196dd5567e225df7dd32a5055f08c08..82142f21695ff21e33966813eccc20fa79cd6988 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -16,6 +16,7 @@ // limitations under the License. use codec::Encode; +use sc_block_builder::BlockBuilderBuilder; use crate::{construct_extrinsic, Client as TestClient}; use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData}; @@ -26,7 +27,6 @@ use cumulus_test_runtime::{ }; use frame_system_rpc_runtime_api::AccountNonceApi; use polkadot_primitives::HeadData; -use sc_block_builder::BlockBuilderProvider; use sc_client_api::UsageProvider; use sc_consensus::{ block_import::{BlockImportParams, ForkChoiceStrategy}, @@ -126,8 +126,13 @@ pub fn create_benchmarking_transfer_extrinsics( src_accounts: &[sr25519::Pair], dst_accounts: &[sr25519::Pair], ) -> (usize, Vec) { + let chain = client.usage_info().chain; // Add as many transfer extrinsics as possible into a single block. - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .expect("Creates block builder"); let mut max_transfer_count = 0; let mut extrinsics = Vec::new(); // Every block needs one timestamp extrinsic. @@ -248,8 +253,13 @@ pub fn set_glutton_parameters( Some(last_nonce), ); extrinsics.push(set_storage); + let chain = client.usage_info().chain; - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .unwrap(); block_builder.push(extrinsic_set_time(client)).unwrap(); block_builder.push(extrinsic_set_validation_data(parent_header)).unwrap(); for extrinsic in extrinsics { diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 7c8c984c2b207bb1ae86bad5042ee469bc622d4f..61bbf755d890aab92c0c714663a7e24c1c286696 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -17,7 +17,7 @@ #![allow(missing_docs)] use cumulus_primitives_core::ParaId; -use cumulus_test_runtime::{AccountId, Signature}; +use cumulus_test_runtime::{AccountId, RuntimeGenesisConfig, Signature}; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; @@ -25,27 +25,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = sc_service::GenericChainSpec; - -/// Extension for the genesis config to add custom keys easily. -#[derive(serde::Serialize, serde::Deserialize)] -pub struct GenesisExt { - /// The runtime genesis config. - runtime_genesis_config: cumulus_test_runtime::RuntimeGenesisConfig, - /// The parachain id. - para_id: ParaId, -} - -impl sp_runtime::BuildStorage for GenesisExt { - fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { - sp_state_machine::BasicExternalities::execute_with_storage(storage, || { - sp_io::storage::set(cumulus_test_runtime::TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); - cumulus_test_runtime::ParachainId::set(&self.para_id); - }); - - self.runtime_genesis_config.assimilate_storage(storage) - } -} +pub type ChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { @@ -83,37 +63,33 @@ where /// The given accounts are initialized with funds in addition /// to the default known accounts. pub fn get_chain_spec_with_extra_endowed( - id: ParaId, + id: Option, extra_endowed_accounts: Vec, ) -> ChainSpec { - ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - move || GenesisExt { - runtime_genesis_config: testnet_genesis_with_default_endowed( - extra_endowed_accounts.clone(), - ), - para_id: id, - }, - Vec::new(), - None, - None, - None, - None, - Extensions { para_id: id.into() }, + ChainSpec::builder( + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { para_id: id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()).into() }, ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis_with_default_endowed( + extra_endowed_accounts.clone(), + id, + )) + .build() } /// Get the chain spec for a specific parachain ID. -pub fn get_chain_spec(id: ParaId) -> ChainSpec { +pub fn get_chain_spec(id: Option) -> ChainSpec { get_chain_spec_with_extra_endowed(id, Default::default()) } /// Local testnet genesis for testing. pub fn testnet_genesis_with_default_endowed( mut extra_endowed_accounts: Vec, -) -> cumulus_test_runtime::RuntimeGenesisConfig { + self_para_id: Option, +) -> serde_json::Value { let mut endowed = vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -130,27 +106,20 @@ pub fn testnet_genesis_with_default_endowed( ]; endowed.append(&mut extra_endowed_accounts); - testnet_genesis(get_account_id_from_seed::("Alice"), endowed) + testnet_genesis(get_account_id_from_seed::("Alice"), endowed, self_para_id) } /// Creates a local testnet genesis with endowed accounts. pub fn testnet_genesis( root_key: AccountId, endowed_accounts: Vec, -) -> cumulus_test_runtime::RuntimeGenesisConfig { - cumulus_test_runtime::RuntimeGenesisConfig { - system: cumulus_test_runtime::SystemConfig { - code: cumulus_test_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - glutton: Default::default(), - parachain_system: Default::default(), - balances: cumulus_test_runtime::BalancesConfig { + self_para_id: Option, +) -> serde_json::Value { + serde_json::json!({ + "balances": cumulus_test_runtime::BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, - sudo: cumulus_test_runtime::SudoConfig { key: Some(root_key) }, - transaction_payment: Default::default(), - } + "sudo": cumulus_test_runtime::SudoConfig { key: Some(root_key) }, + "testPallet": cumulus_test_runtime::TestPalletConfig { self_para_id, ..Default::default() } + }) } diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 9235a0c45c58638f3966c28b031c39382d157396..ef1159a3c1f8534a0dbfa5ae09859c8e1dadc626 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -287,8 +287,9 @@ impl SubstrateCli for TestCollatorCli { fn load_spec(&self, id: &str) -> std::result::Result, String> { Ok(match id { - "" => Box::new(cumulus_test_service::get_chain_spec(ParaId::from(self.parachain_id))) - as Box<_>, + "" => Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from( + self.parachain_id, + )))) as Box<_>, path => { let chain_spec = cumulus_test_service::chain_spec::ChainSpec::from_json_file(path.into())?; diff --git a/cumulus/test/service/src/genesis.rs b/cumulus/test/service/src/genesis.rs index 9dba57ba9fbc1ff00059284ed1c92e1c7669d208..d4a9a22562646d09cc793fa367396f5ab33cf118 100644 --- a/cumulus/test/service/src/genesis.rs +++ b/cumulus/test/service/src/genesis.rs @@ -23,7 +23,7 @@ use sp_runtime::traits::Block as BlockT; /// Returns the initial head data for a parachain ID. pub fn initial_head_data(para_id: ParaId) -> HeadData { - let spec = crate::chain_spec::get_chain_spec(para_id); + let spec = crate::chain_spec::get_chain_spec(Some(para_id)); let block: Block = generate_genesis_block(&spec, sp_runtime::StateVersion::V1).unwrap(); let genesis_state = block.header().encode(); genesis_state.into() diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a721645546af7417200a7768242573abbe252397..4d4c60d1bda6652eeb908d1ab195e73c42084b3b 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -211,7 +211,8 @@ pub fn new_partial( sc_service::new_full_parts::(config, None, executor)?; let client = Arc::new(client); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let block_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let registry = config.prometheus_registry(); @@ -719,7 +720,7 @@ pub fn node_config( let role = if is_collator { Role::Authority } else { Role::Full }; let key_seed = key.to_seed(); let mut spec = - Box::new(chain_spec::get_chain_spec_with_extra_endowed(para_id, endowed_accounts)); + Box::new(chain_spec::get_chain_spec_with_extra_endowed(Some(para_id), endowed_accounts)); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 5946e9cc3506e220672cb58561d52b93bcad2d8a..16b68796bd391bdfa8a2085aac95f3e7201d9ce0 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -73,7 +73,7 @@ fn main() -> Result<(), sc_cli::Error> { let runner = cli.create_runner(cmd)?; runner.sync_run(|_config| { let parachain_id = ParaId::from(cmd.parachain_id); - let spec = cumulus_test_service::get_chain_spec(parachain_id); + let spec = cumulus_test_service::get_chain_spec(Some(parachain_id)); cmd.base.run(&spec) }) }, diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 72007ba98f7fa1b8962ecbbd6c631449a7c18576..2f851f1bcde06cb57107019b1f477ad747fc6ef6 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -4,7 +4,7 @@ description = "Test kit to emulate XCM program execution." version = "0.1.0" authors.workspace = true edition.workspace = true -publish = false +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } @@ -27,6 +27,7 @@ pallet-message-queue = { path = "../../../substrate/frame/message-queue" } # Cumulus cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue" } cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index caf73ae1e41ca52b5e6645060c9ab72e26b3d38b..f2e4ff397c45cb228fcb80d39972c7f8ceac4a99 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -24,19 +24,23 @@ pub use std::{ }; // Substrate +pub use cumulus_primitives_core::AggregateMessageOrigin as CumulusAggregateMessageOrigin; pub use frame_support::{ assert_ok, sp_runtime::{traits::Header as HeaderT, DispatchResult}, traits::{ - EnqueueMessage, Get, Hooks, OriginTrait, ProcessMessage, ProcessMessageError, ServiceQueues, + EnqueueMessage, ExecuteOverweightError, Get, Hooks, OnInitialize, OriginTrait, + ProcessMessage, ProcessMessageError, ServiceQueues, }, weights::{Weight, WeightMeter}, }; pub use frame_system::{Config as SystemConfig, Pallet as SystemPallet}; pub use pallet_balances::AccountData; +pub use pallet_message_queue; pub use sp_arithmetic::traits::Bounded; pub use sp_core::{blake2_256, parameter_types, sr25519, storage::Storage, Pair}; pub use sp_io::TestExternalities; +pub use sp_runtime::BoundedSlice; pub use sp_std::{cell::RefCell, collections::vec_deque::VecDeque, fmt::Debug}; pub use sp_tracing; @@ -166,30 +170,21 @@ pub trait Network { relay_parent_number: u32, parent_head_data: HeadData, ) -> ParachainInherentData; -} - -pub trait NetworkComponent { - type Network: Network; - fn send_horizontal_messages)>>( to_para_id: u32, iter: I, ) { HORIZONTAL_MESSAGES.with(|b| { b.borrow_mut() - .get_mut(Self::Network::name()) + .get_mut(Self::name()) .unwrap() .push_back((to_para_id, iter.collect())) }); } fn send_upward_message(from_para_id: u32, msg: Vec) { - UPWARD_MESSAGES.with(|b| { - b.borrow_mut() - .get_mut(Self::Network::name()) - .unwrap() - .push_back((from_para_id, msg)) - }); + UPWARD_MESSAGES + .with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back((from_para_id, msg))); } fn send_downward_messages( @@ -198,19 +193,19 @@ pub trait NetworkComponent { ) { DOWNWARD_MESSAGES.with(|b| { b.borrow_mut() - .get_mut(Self::Network::name()) + .get_mut(Self::name()) .unwrap() .push_back((to_para_id, iter.collect())) }); } fn send_bridged_messages(msg: BridgeMessage) { - BRIDGED_MESSAGES - .with(|b| b.borrow_mut().get_mut(Self::Network::name()).unwrap().push_back(msg)); + BRIDGED_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back(msg)); } } -pub trait Chain: TestExt + NetworkComponent { +pub trait Chain: TestExt { + type Network: Network; type Runtime: SystemConfig; type RuntimeCall; type RuntimeOrigin; @@ -227,8 +222,10 @@ pub trait Chain: TestExt + NetworkComponent { } pub trait RelayChain: Chain { - type MessageProcessor: ProcessMessage; type SovereignAccountOf: ConvertLocation>; + type MessageProcessor: ProcessMessage + ServiceQueues; + + fn init(); fn child_location_of(id: ParaId) -> MultiLocation { (Ancestor(0), ParachainJunction(id.into())).into() @@ -245,10 +242,10 @@ pub trait RelayChain: Chain { pub trait Parachain: Chain { type XcmpMessageHandler: XcmpMessageHandler; - type DmpMessageHandler: DmpMessageHandler; type LocationToAccountId: ConvertLocation>; type ParachainInfo: Get; type ParachainSystem; + type MessageProcessor: ProcessMessage + ServiceQueues; fn init(); @@ -344,9 +341,7 @@ macro_rules! decl_test_relay_chains { on_init = $on_init:expr, runtime = $runtime:ident, core = { - MessageProcessor: $mp:path, SovereignAccountOf: $sovereign_acc_of:path, - }, pallets = { $($pallet_name:ident: $pallet_path:path,)* @@ -358,9 +353,10 @@ macro_rules! decl_test_relay_chains { ) => { $( #[derive(Clone)] - pub struct $name; + pub struct $name($crate::PhantomData); - impl $crate::Chain for $name { + impl $crate::Chain for $name { + type Network = N; type Runtime = $runtime::Runtime; type RuntimeCall = $runtime::RuntimeCall; type RuntimeOrigin = $runtime::RuntimeOrigin; @@ -379,27 +375,35 @@ macro_rules! decl_test_relay_chains { } } - impl $crate::RelayChain for $name { + impl $crate::RelayChain for $name { type SovereignAccountOf = $sovereign_acc_of; - type MessageProcessor = $mp; + type MessageProcessor = $crate::DefaultRelayMessageProcessor<$name>; + + fn init() { + use $crate::TestExt; + // Initialize the thread local variable + $crate::paste::paste! { + [].with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); + } + } } $crate::paste::paste! { - pub trait [<$name Pallet>] { + pub trait [<$name RelayPallet>] { $( type $pallet_name; )? } - impl [<$name Pallet>] for $name { + impl [<$name RelayPallet>] for $name { $( type $pallet_name = $pallet_path; )? } } - $crate::__impl_test_ext_for_relay_chain!($name, $genesis, $on_init, $api_version); - $crate::__impl_check_assertion!($name); + $crate::__impl_test_ext_for_relay_chain!($name, N, $genesis, $on_init, $api_version); + $crate::__impl_check_assertion!($name, N); )+ }; } @@ -407,10 +411,11 @@ macro_rules! decl_test_relay_chains { #[macro_export] macro_rules! __impl_test_ext_for_relay_chain { // entry point: generate ext name - ($name:ident, $genesis:expr, $on_init:expr, $api_version:tt) => { + ($name:ident, $network:ident, $genesis:expr, $on_init:expr, $api_version:tt) => { $crate::paste::paste! { $crate::__impl_test_ext_for_relay_chain!( @impl $name, + $network, $genesis, $on_init, [], @@ -420,10 +425,10 @@ macro_rules! __impl_test_ext_for_relay_chain { } }; // impl - (@impl $name:ident, $genesis:expr, $on_init:expr, $api_version:ident, $local_ext:ident, $global_ext:ident) => { + (@impl $name:ident, $network:ident, $genesis:expr, $on_init:expr, $api_version:ident, $local_ext:ident, $global_ext:ident) => { thread_local! { pub static $local_ext: $crate::RefCell<$crate::TestExternalities> - = $crate::RefCell::new(<$name as $crate::TestExt>::build_new_ext($genesis)); + = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } $crate::lazy_static! { @@ -431,9 +436,9 @@ macro_rules! __impl_test_ext_for_relay_chain { = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); } - impl $crate::TestExt for $name { + impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { - use $crate::{sp_tracing, NetworkComponent, Network, Chain, TestExternalities}; + use $crate::{sp_tracing, Network, Chain, TestExternalities}; let mut ext = TestExternalities::new(storage); @@ -450,7 +455,7 @@ macro_rules! __impl_test_ext_for_relay_chain { } fn new_ext() -> $crate::TestExternalities { - <$name>::build_new_ext($genesis) + Self::build_new_ext($genesis) } fn move_ext_out(id: &'static str) { @@ -501,13 +506,13 @@ macro_rules! __impl_test_ext_for_relay_chain { } fn reset_ext() { - $local_ext.with(|v| *v.borrow_mut() = <$name>::build_new_ext($genesis)); + $local_ext.with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); } fn execute_with(execute: impl FnOnce() -> R) -> R { - use $crate::{Chain, NetworkComponent, Network}; + use $crate::{Chain, Network}; // Make sure the Network is initialized - <$name as NetworkComponent>::Network::init(); + <$network>::init(); // Execute let r = $local_ext.with(|v| v.borrow_mut().execute_with(execute)); @@ -518,7 +523,7 @@ macro_rules! __impl_test_ext_for_relay_chain { use $crate::polkadot_primitives::runtime_api::runtime_decl_for_parachain_host::$api_version; //TODO: mark sent count & filter out sent msg - for para_id in<$name as NetworkComponent>::Network::para_ids() { + for para_id in <$network>::para_ids() { // downward messages let downward_messages = ::Runtime::dmq_contents(para_id.into()) .into_iter() @@ -526,7 +531,7 @@ macro_rules! __impl_test_ext_for_relay_chain { if downward_messages.len() == 0 { continue; } - <$name>::send_downward_messages(para_id, downward_messages.into_iter()); + <$network>::send_downward_messages(para_id, downward_messages.into_iter()); // Note: no need to handle horizontal messages, as the // simulator directly sends them to dest (not relayed). @@ -542,7 +547,7 @@ macro_rules! __impl_test_ext_for_relay_chain { }) }); - <$name as NetworkComponent>::Network::process_messages(); + <$network>::process_messages(); r } @@ -569,9 +574,9 @@ macro_rules! decl_test_parachains { runtime = $runtime:ident, core = { XcmpMessageHandler: $xcmp_message_handler:path, - DmpMessageHandler: $dmp_message_handler:path, LocationToAccountId: $location_to_account:path, ParachainInfo: $parachain_info:path, + // MessageProcessor: $message_processor:path, }, pallets = { $($pallet_name:ident: $pallet_path:path,)* @@ -583,14 +588,15 @@ macro_rules! decl_test_parachains { ) => { $( #[derive(Clone)] - pub struct $name; + pub struct $name($crate::PhantomData); - impl $crate::Chain for $name { + impl $crate::Chain for $name { type Runtime = $runtime::Runtime; type RuntimeCall = $runtime::RuntimeCall; type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type Network = N; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { ::ext_wrapper(|| $crate::SystemPallet::::account(account).data.into()) @@ -604,17 +610,21 @@ macro_rules! decl_test_parachains { } } - impl $crate::Parachain for $name { + impl $crate::Parachain for $name { type XcmpMessageHandler = $xcmp_message_handler; - type DmpMessageHandler = $dmp_message_handler; type LocationToAccountId = $location_to_account; type ParachainSystem = $crate::ParachainSystemPallet<::Runtime>; type ParachainInfo = $parachain_info; + type MessageProcessor = $crate::DefaultParaMessageProcessor<$name>; // We run an empty block during initialisation to open HRMP channels // and have them ready for the next block fn init() { - use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + use $crate::{Chain, HeadData, Network, Hooks, Encode, Parachain, TestExt}; + // Initialize the thread local variable + $crate::paste::paste! { + [].with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); + } // Set the last block head for later use in the next block Self::set_last_head(); // Initialize a new block @@ -624,21 +634,21 @@ macro_rules! decl_test_parachains { } fn new_block() { - use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + use $crate::{Chain, HeadData, Network, Hooks, Encode, Parachain, TestExt}; let para_id = Self::para_id().into(); Self::ext_wrapper(|| { // Increase Relay Chain block number - let mut relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); + let mut relay_block_number = N::relay_block_number(); relay_block_number += 1; - <$name as NetworkComponent>::Network::set_relay_block_number(relay_block_number); + N::set_relay_block_number(relay_block_number); // Initialize a new Parachain block let mut block_number = ::System::block_number(); block_number += 1; let parent_head_data = $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) + .get_mut(N::name()) .expect("network not initialized?") .get(¶_id) .expect("network not initialized?") @@ -649,13 +659,13 @@ macro_rules! decl_test_parachains { let _ = ::ParachainSystem::set_validation_data( ::RuntimeOrigin::none(), - <$name as NetworkComponent>::Network::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), + N::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), ); }); } fn finalize_block() { - use $crate::{Chain, Encode, Hooks, Network, NetworkComponent, Parachain, TestExt}; + use $crate::{Chain, Encode, Hooks, Network, Parachain, TestExt}; Self::ext_wrapper(|| { let block_number = ::System::block_number(); @@ -667,7 +677,7 @@ macro_rules! decl_test_parachains { fn set_last_head() { - use $crate::{Chain, Encode, HeadData, Network, NetworkComponent, Parachain, TestExt}; + use $crate::{Chain, Encode, HeadData, Network, Parachain, TestExt}; let para_id = Self::para_id().into(); @@ -675,7 +685,7 @@ macro_rules! decl_test_parachains { // Store parent head data for use later. let created_header = ::System::finalize(); $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) + .get_mut(N::name()) .expect("network not initialized?") .insert(para_id, HeadData(created_header.encode())) ); @@ -684,21 +694,21 @@ macro_rules! decl_test_parachains { } $crate::paste::paste! { - pub trait [<$name Pallet>] { + pub trait [<$name ParaPallet>] { $( type $pallet_name; )* } - impl [<$name Pallet>] for $name { + impl [<$name ParaPallet>] for $name { $( type $pallet_name = $pallet_path; )* } } - $crate::__impl_test_ext_for_parachain!($name, $genesis, $on_init); - $crate::__impl_check_assertion!($name); + $crate::__impl_test_ext_for_parachain!($name, N, $genesis, $on_init); + $crate::__impl_check_assertion!($name, N); )+ }; } @@ -706,16 +716,16 @@ macro_rules! decl_test_parachains { #[macro_export] macro_rules! __impl_test_ext_for_parachain { // entry point: generate ext name - ($name:ident, $genesis:expr, $on_init:expr) => { + ($name:ident, $network:ident, $genesis:expr, $on_init:expr) => { $crate::paste::paste! { - $crate::__impl_test_ext_for_parachain!(@impl $name, $genesis, $on_init, [], []); + $crate::__impl_test_ext_for_parachain!(@impl $name, $network, $genesis, $on_init, [], []); } }; // impl - (@impl $name:ident, $genesis:expr, $on_init:expr, $local_ext:ident, $global_ext:ident) => { + (@impl $name:ident, $network:ident, $genesis:expr, $on_init:expr, $local_ext:ident, $global_ext:ident) => { thread_local! { pub static $local_ext: $crate::RefCell<$crate::TestExternalities> - = $crate::RefCell::new(<$name as $crate::TestExt>::build_new_ext($genesis)); + = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } $crate::lazy_static! { @@ -723,7 +733,7 @@ macro_rules! __impl_test_ext_for_parachain { = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); } - impl $crate::TestExt for $name { + impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { let mut ext = $crate::TestExternalities::new(storage); @@ -740,7 +750,7 @@ macro_rules! __impl_test_ext_for_parachain { } fn new_ext() -> $crate::TestExternalities { - <$name>::build_new_ext($genesis) + Self::build_new_ext($genesis) } fn move_ext_out(id: &'static str) { @@ -791,14 +801,14 @@ macro_rules! __impl_test_ext_for_parachain { } fn reset_ext() { - $local_ext.with(|v| *v.borrow_mut() = <$name>::build_new_ext($genesis)); + $local_ext.with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); } fn execute_with(execute: impl FnOnce() -> R) -> R { - use $crate::{Chain, Get, Hooks, NetworkComponent, Network, Parachain, Encode}; + use $crate::{Chain, Get, Hooks, Network, Parachain, Encode}; // Make sure the Network is initialized - <$name as NetworkComponent>::Network::init(); + <$network>::init(); // Initialize a new block Self::new_block(); @@ -809,7 +819,7 @@ macro_rules! __impl_test_ext_for_parachain { // Finalize the block Self::finalize_block(); - let para_id = <$name>::para_id().into(); + let para_id = Self::para_id().into(); // Send messages if needed $local_ext.with(|v| { @@ -825,27 +835,27 @@ macro_rules! __impl_test_ext_for_parachain { let collation_info = ::ParachainSystem::collect_collation_info(&mock_header); // send upward messages - let relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); + let relay_block_number = <$network>::relay_block_number(); for msg in collation_info.upward_messages.clone() { - <$name>::send_upward_message(para_id, msg); + <$network>::send_upward_message(para_id, msg); } // send horizontal messages for msg in collation_info.horizontal_messages { - <$name>::send_horizontal_messages( + <$network>::send_horizontal_messages( msg.recipient.into(), vec![(para_id.into(), relay_block_number, msg.data)].into_iter(), ); } // get bridge messages - type NetworkBridge = <<$name as NetworkComponent>::Network as $crate::Network>::Bridge; + type NetworkBridge<$network> = <$network as $crate::Network>::Bridge; - let bridge_messages = <::Handler as $crate::BridgeMessageHandler>::get_source_outbound_messages(); + let bridge_messages = < as $crate::Bridge>::Handler as $crate::BridgeMessageHandler>::get_source_outbound_messages(); // send bridged messages for msg in bridge_messages { - <$name>::send_bridged_messages(msg); + <$network>::send_bridged_messages(msg); } // log events @@ -861,7 +871,7 @@ macro_rules! __impl_test_ext_for_parachain { // provide inbound DMP/HRMP messages through a side-channel. // normally this would come through the `set_validation_data`, // but we go around that. - <$name as NetworkComponent>::Network::process_messages(); + <$network>::process_messages(); r } @@ -883,8 +893,8 @@ macro_rules! decl_test_networks { ( $( pub struct $name:ident { - relay_chain = $relay_chain:ty, - parachains = vec![ $( $parachain:ty, )* ], + relay_chain = $relay_chain:ident, + parachains = vec![ $( $parachain:ident, )* ], bridge = $bridge:ty } ), @@ -892,10 +902,11 @@ macro_rules! decl_test_networks { $(,)? ) => { $( + #[derive(Clone)] pub struct $name; impl $crate::Network for $name { - type Relay = $relay_chain; + type Relay = $relay_chain; type Bridge = $bridge; fn name() -> &'static str { @@ -913,8 +924,8 @@ macro_rules! decl_test_networks { $crate::BRIDGED_MESSAGES.with(|b| b.borrow_mut().remove(Self::name())); $crate::LAST_HEAD.with(|b| b.borrow_mut().remove(Self::name())); - <$relay_chain>::reset_ext(); - $( <$parachain>::reset_ext(); )* + <$relay_chain>::reset_ext(); + $( <$parachain>::reset_ext(); )* } fn init() { @@ -929,13 +940,14 @@ macro_rules! decl_test_networks { $crate::PARA_IDS.with(|b| b.borrow_mut().insert(Self::name().to_string(), Self::para_ids())); $crate::LAST_HEAD.with(|b| b.borrow_mut().insert(Self::name().to_string(), $crate::HashMap::new())); - $( <$parachain as $crate::Parachain>::init(); )* + <$relay_chain as $crate::RelayChain>::init(); + $( <$parachain as $crate::Parachain>::init(); )* } } fn para_ids() -> Vec { vec![$( - <$parachain as $crate::Parachain>::para_id().into(), + <$parachain as $crate::Parachain>::para_id().into(), )*] } @@ -968,12 +980,12 @@ macro_rules! decl_test_networks { } fn process_downward_messages() { - use $crate::{DmpMessageHandler, Bounded, Parachain, RelayChainBlockNumber, TestExt}; + use $crate::{DmpMessageHandler, Bounded, Parachain, RelayChainBlockNumber, TestExt, Encode}; while let Some((to_para_id, messages)) = $crate::DOWNWARD_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { $( - let para_id: u32 = <$parachain>::para_id().into(); + let para_id: u32 = <$parachain>::para_id().into(); if $crate::PARA_IDS.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().contains(&to_para_id)) && para_id == to_para_id { let mut msg_dedup: Vec<(RelayChainBlockNumber, Vec)> = Vec::new(); @@ -983,16 +995,25 @@ macro_rules! decl_test_networks { msg_dedup.dedup(); let msgs = msg_dedup.clone().into_iter().filter(|m| { - !$crate::DMP_DONE.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap_or(&mut $crate::VecDeque::new()).contains(&(to_para_id, m.0, m.1.clone()))) + !$crate::DMP_DONE.with(|b| b.borrow().get(Self::name()) + .unwrap_or(&mut $crate::VecDeque::new()) + .contains(&(to_para_id, m.0, m.1.clone())) + ) }).collect::)>>(); - if msgs.len() != 0 { - <$parachain>::ext_wrapper(|| { - <$parachain as Parachain>::DmpMessageHandler::handle_dmp_messages(msgs.clone().into_iter(), $crate::Weight::max_value()); + + use $crate::{ProcessMessage, CumulusAggregateMessageOrigin, BoundedSlice, WeightMeter}; + for (block, msg) in msgs.clone().into_iter() { + let mut weight_meter = WeightMeter::new(); + <$parachain>::ext_wrapper(|| { + let _ = <$parachain as Parachain>::MessageProcessor::process_message( + &msg[..], + $crate::CumulusAggregateMessageOrigin::Parent, + &mut weight_meter, + &mut msg.using_encoded($crate::blake2_256), + ); }); $crate::log::debug!(target: concat!("dmp::", stringify!($name)) , "DMP messages processed {:?} to para_id {:?}", msgs.clone(), &to_para_id); - for m in msgs { - $crate::DMP_DONE.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back((to_para_id, m.0, m.1))); - } + $crate::DMP_DONE.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back((to_para_id, block, msg))); } } )* @@ -1000,17 +1021,19 @@ macro_rules! decl_test_networks { } fn process_horizontal_messages() { - use $crate::{XcmpMessageHandler, Bounded, Parachain, TestExt}; + use $crate::{XcmpMessageHandler, ServiceQueues, Bounded, Parachain, TestExt}; while let Some((to_para_id, messages)) = $crate::HORIZONTAL_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { let iter = messages.iter().map(|(p, b, m)| (*p, *b, &m[..])).collect::>().into_iter(); $( - let para_id: u32 = <$parachain>::para_id().into(); + let para_id: u32 = <$parachain>::para_id().into(); if $crate::PARA_IDS.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().contains(&to_para_id)) && para_id == to_para_id { - <$parachain>::ext_wrapper(|| { - <$parachain as Parachain>::XcmpMessageHandler::handle_xcmp_messages(iter.clone(), $crate::Weight::max_value()); + <$parachain>::ext_wrapper(|| { + <$parachain as Parachain>::XcmpMessageHandler::handle_xcmp_messages(iter.clone(), $crate::Weight::MAX); + // Nudge the MQ pallet to process immediately instead of in the next block. + let _ = <$parachain as Parachain>::MessageProcessor::service_queues($crate::Weight::MAX); }); $crate::log::debug!(target: concat!("hrmp::", stringify!($name)) , "HRMP messages processed {:?} to para_id {:?}", &messages, &to_para_id); } @@ -1019,12 +1042,12 @@ macro_rules! decl_test_networks { } fn process_upward_messages() { - use $crate::{Encode, ProcessMessage, TestExt}; + use $crate::{Encode, ProcessMessage, TestExt, WeightMeter}; while let Some((from_para_id, msg)) = $crate::UPWARD_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { - let mut weight_meter = $crate::WeightMeter::new(); - <$relay_chain>::ext_wrapper(|| { - let _ = <$relay_chain as $crate::RelayChain>::MessageProcessor::process_message( + let mut weight_meter = WeightMeter::new(); + <$relay_chain>::ext_wrapper(|| { + let _ = <$relay_chain as $crate::RelayChain>::MessageProcessor::process_message( &msg[..], from_para_id.into(), &mut weight_meter, @@ -1107,13 +1130,13 @@ macro_rules! decl_test_networks { } } - impl $crate::NetworkComponent for $relay_chain { - type Network = $name; + $crate::paste::paste! { + pub type [<$relay_chain Relay>] = $relay_chain<$name>; } $( - impl $crate::NetworkComponent for $parachain { - type Network = $name; + $crate::paste::paste! { + pub type [<$parachain Para>] = $parachain<$name>; } )* )+ @@ -1125,9 +1148,9 @@ macro_rules! decl_test_bridges { ( $( pub struct $name:ident { - source = $source:ty, - target = $target:ty, - handler = $handler:ty + source = $source:ident, + target = $target:ident, + handler = $handler:ident } ), + @@ -1143,10 +1166,10 @@ macro_rules! decl_test_bridges { type Handler = $handler; fn init() { - use $crate::{NetworkComponent, Network}; + use $crate::{Network, Parachain}; // Make sure source and target `Network` have been initialized - <$source as NetworkComponent>::Network::init(); - <$target as NetworkComponent>::Network::init(); + <$source as Chain>::Network::init(); + <$target as Chain>::Network::init(); } } )+ @@ -1155,10 +1178,11 @@ macro_rules! decl_test_bridges { #[macro_export] macro_rules! __impl_check_assertion { - ($chain:ident) => { - impl - $crate::CheckAssertion for $chain + ($chain:ident, $network:ident) => { + impl<$network, Origin, Destination, Hops, Args> + $crate::CheckAssertion for $chain<$network> where + $network: $crate::Network, Origin: $crate::Chain + Clone, Destination: $crate::Chain + Clone, Origin::RuntimeOrigin: @@ -1171,9 +1195,9 @@ macro_rules! __impl_check_assertion { fn check_assertion(test: $crate::Test) { use $crate::TestExt; - let chain_name = std::any::type_name::<$chain>(); + let chain_name = std::any::type_name::<$chain<$network>>(); - <$chain>::execute_with(|| { + <$chain<$network>>::execute_with(|| { if let Some(dispatchable) = test.hops_dispatchable.get(chain_name) { $crate::assert_ok!(dispatchable(test.clone())); } @@ -1244,7 +1268,14 @@ macro_rules! assert_expected_events { ) ); } else if !event_received { - message.push(format!("\n\n{}::\x1b[31m{}\x1b[0m was never received", stringify!($chain), stringify!($event_pat))); + message.push( + format!( + "\n\n{}::\x1b[31m{}\x1b[0m was never received. All events:\n{:#?}", + stringify!($chain), + stringify!($event_pat), + <$chain as $crate::Chain>::events(), + ) + ); } else { // If we find a perfect match we remove the event to avoid being potentially assessed multiple times events.remove(index_match); @@ -1282,10 +1313,60 @@ macro_rules! decl_test_sender_receiver_accounts_parameter_types { }; } -pub struct DefaultMessageProcessor(PhantomData); -impl ProcessMessage for DefaultMessageProcessor +pub struct DefaultParaMessageProcessor(PhantomData); +// Process HRMP messages from sibling paraids +impl ProcessMessage for DefaultParaMessageProcessor where - T: Chain + RelayChain, + T: Parachain, + T::Runtime: MessageQueueConfig, + <::MessageProcessor as ProcessMessage>::Origin: + PartialEq, + MessageQueuePallet: EnqueueMessage + ServiceQueues, +{ + type Origin = CumulusAggregateMessageOrigin; + + fn process_message( + msg: &[u8], + orig: Self::Origin, + _meter: &mut WeightMeter, + _id: &mut XcmHash, + ) -> Result { + MessageQueuePallet::::enqueue_message( + msg.try_into().expect("Message too long"), + orig.clone(), + ); + MessageQueuePallet::::service_queues(Weight::MAX); + + Ok(true) + } +} +impl ServiceQueues for DefaultParaMessageProcessor +where + T: Parachain, + T::Runtime: MessageQueueConfig, + <::MessageProcessor as ProcessMessage>::Origin: + PartialEq, + MessageQueuePallet: EnqueueMessage + ServiceQueues, +{ + type OverweightMessageAddress = (); + + fn service_queues(weight_limit: Weight) -> Weight { + MessageQueuePallet::::service_queues(weight_limit) + } + + fn execute_overweight( + _weight_limit: Weight, + _address: Self::OverweightMessageAddress, + ) -> Result { + unimplemented!() + } +} + +pub struct DefaultRelayMessageProcessor(PhantomData); +// Process UMP messages on the relay +impl ProcessMessage for DefaultRelayMessageProcessor +where + T: RelayChain, T::Runtime: MessageQueueConfig, <::MessageProcessor as ProcessMessage>::Origin: PartialEq, @@ -1309,6 +1390,28 @@ where } } +impl ServiceQueues for DefaultRelayMessageProcessor +where + T: RelayChain, + T::Runtime: MessageQueueConfig, + <::MessageProcessor as ProcessMessage>::Origin: + PartialEq, + MessageQueuePallet: EnqueueMessage + ServiceQueues, +{ + type OverweightMessageAddress = (); + + fn service_queues(weight_limit: Weight) -> Weight { + MessageQueuePallet::::service_queues(weight_limit) + } + + fn execute_overweight( + _weight_limit: Weight, + _address: Self::OverweightMessageAddress, + ) -> Result { + unimplemented!() + } +} + /// Struct that keeps account's id and balance #[derive(Clone)] pub struct TestAccount { @@ -1335,14 +1438,14 @@ pub struct TestContext { pub args: T, } -/// Struct that help with tests where either dispatchables or assertions need +/// Struct that helps with tests where either dispatchables or assertions need /// to be reused. The struct keeps the test's arguments of your choice in the generic `Args`. -/// These arguments can be easily reused and shared between the assertions functions -/// and dispatchables functions, which are also stored in `Test`. +/// These arguments can be easily reused and shared between the assertion functions +/// and dispatchable functions, which are also stored in `Test`. /// `Origin` corresponds to the chain where the XCM interaction starts with an initial execution. -/// `Destination` corresponds to the last chain where an effect of the intial execution is expected -/// happen. `Hops` refer all the ordered intermediary chains an initial XCM execution can provoke -/// some effect. +/// `Destination` corresponds to the last chain where an effect of the initial execution is expected +/// to happen. `Hops` refer to all the ordered intermediary chains an initial XCM execution can +/// provoke some effect on. #[derive(Clone)] pub struct Test where @@ -1396,7 +1499,7 @@ where let chain_name = std::any::type_name::(); self.hops_assertion.insert(chain_name.to_string(), assertion); } - /// Stores an assertion in a particular Chain + /// Stores a dispatchable in a particular Chain pub fn set_dispatchable(&mut self, dispatchable: fn(Self) -> DispatchResult) { let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml index 80b398ac7240058fe12f460e069d35455a106be9..99a7d0035b511c57ccf5c10fa94165933c495ba9 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml @@ -7,25 +7,25 @@ default_args = [ "-lparachain=debug,xcm=trace" ] chain = "rococo-local" [[relaychain.nodes]] - name = "alice-validator" + name = "alice-rococo-validator" validator = true rpc_port = 9932 ws_port = 9942 - extra_args = ["--no-mdns --bootnodes {{'bob-validator'|zombie('multiAddress')}}"] + balance = 2000000000000 [[relaychain.nodes]] - name = "bob-validator" + name = "bob-rococo-validator" validator = true rpc_port = 9933 ws_port = 9943 - extra_args = ["--no-mdns --bootnodes {{'alice-validator'|zombie('multiAddress')}}"] + balance = 2000000000000 [[relaychain.nodes]] - name = "charlie-validator" + name = "charlie-rococo-validator" validator = true rpc_port = 9934 ws_port = 9944 - extra_args = ["--no-mdns --bootnodes {{'alice-validator'|zombie('multiAddress')}}"] + balance = 2000000000000 [[parachains]] id = 1013 @@ -34,71 +34,57 @@ cumulus_based = true # run alice as parachain collator [[parachains.collators]] - name = "alice-collator" + name = "bridge-hub-rococo-collator1" validator = true command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" rpc_port = 8933 ws_port = 8943 args = [ "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - ] - extra_args = [ - "--force-authoring", "--no-mdns", "--bootnodes {{'bob-collator'|zombie('multiAddress')}}", - "-- --port 41333 --rpc-port 48933 --ws-port 48943 --no-mdns", "--bootnodes {{'alice-validator'|zombie('multiAddress')}}" + "--force-authoring" ] # run bob as parachain collator [[parachains.collators]] - name = "bob-collator" + name = "bridge-hub-rococo-collator2" validator = true command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" rpc_port = 8934 ws_port = 8944 args = [ "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - ] - extra_args = [ - "--force-authoring", "--no-mdns", "--bootnodes {{'alice-collator'|zombie('multiAddress')}}", - "-- --port 41334 --rpc-port 48934 --ws-port 48944 --no-mdns", "--bootnodes {{'bob-validator'|zombie('multiAddress')}}" + "--force-authoring" ] [[parachains]] id = 1000 -chain = "asset-hub-kusama-local" +chain = "asset-hub-rococo-local" cumulus_based = true [[parachains.collators]] - name = "rockmine-collator1" + name = "asset-hub-rococo-collator1" rpc_port = 9911 ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - ] - extra_args = [ - "--no-mdns", "--bootnodes {{'rockmine-collator2'|zombie('multiAddress')}}", - "-- --port 51333 --rpc-port 58933 --ws-port 58943 --no-mdns", "--bootnodes {{'alice-validator'|zombie('multiAddress')}}" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] - name = "rockmine-collator2" + name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] - extra_args = [ - "--no-mdns", "--bootnodes {{'rockmine-collator1'|zombie('multiAddress')}}", - "-- --port 51433 --rpc-port 58833 --ws-port 58843 --no-mdns", "--bootnodes {{'alice-validator'|zombie('multiAddress')}}" - ] - -[[hrmp_channels]] -sender = 1000 -recipient = 1013 -max_capacity = 4 -max_message_size = 524288 -[[hrmp_channels]] -sender = 1013 -recipient = 1000 -max_capacity = 4 -max_message_size = 524288 +#[[hrmp_channels]] +#sender = 1000 +#recipient = 1013 +#max_capacity = 4 +#max_message_size = 524288 +# +#[[hrmp_channels]] +#sender = 1013 +#recipient = 1000 +#max_capacity = 4 +#max_message_size = 524288 diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml new file mode 100644 index 0000000000000000000000000000000000000000..1919d1c63f25f154e4676599afb8a2969598c10b --- /dev/null +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml @@ -0,0 +1,90 @@ +[settings] +node_spawn_timeout = 240 + +[relaychain] +default_command = "{{POLKADOT_BINARY_PATH}}" +default_args = [ "-lparachain=debug,xcm=trace" ] +chain = "westend-local" + + [[relaychain.nodes]] + name = "alice-westend-validator" + validator = true + rpc_port = 9935 + ws_port = 9945 + balance = 2000000000000 + + [[relaychain.nodes]] + name = "bob-westend-validator" + validator = true + rpc_port = 9936 + ws_port = 9946 + balance = 2000000000000 + + [[relaychain.nodes]] + name = "charlie-westend-validator" + validator = true + rpc_port = 9937 + ws_port = 9947 + balance = 2000000000000 + +[[parachains]] +id = 1002 +chain = "bridge-hub-westend-local" +cumulus_based = true + + # run alice as parachain collator + [[parachains.collators]] + name = "bridge-hub-westend-collator1" + validator = true + command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" + rpc_port = 8935 + ws_port = 8945 + args = [ + "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", + "--force-authoring" + ] + + # run bob as parachain collator + [[parachains.collators]] + name = "bridge-hub-westend-collator2" + validator = true + command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" + rpc_port = 8936 + ws_port = 8946 + args = [ + "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", + "--force-authoring" + ] + +[[parachains]] +id = 1000 +chain = "asset-hub-westend-local" +cumulus_based = true + + [[parachains.collators]] + name = "asset-hub-westend-collator1" + rpc_port = 9011 + ws_port = 9010 + command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" + args = [ + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + ] + + [[parachains.collators]] + name = "asset-hub-westend-collator2" + command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" + args = [ + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + ] + +#[[hrmp_channels]] +#sender = 1000 +#recipient = 1002 +#max_capacity = 4 +#max_message_size = 524288 +# +#[[hrmp_channels]] +#sender = 1002 +#recipient = 1000 +#max_capacity = 4 +#max_message_size = 524288 diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml deleted file mode 100644 index 9727b4a087fda438e15b3fd8e4dbfc4fdb0c6ddb..0000000000000000000000000000000000000000 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml +++ /dev/null @@ -1,104 +0,0 @@ -[settings] -node_spawn_timeout = 240 - -[relaychain] -default_command = "{{POLKADOT_BINARY_PATH}}" -default_args = [ "-lparachain=debug,xcm=trace" ] -chain = "wococo-local" - - [[relaychain.nodes]] - name = "alice-validator-wo" - validator = true - rpc_port = 9935 - ws_port = 9945 - extra_args = ["--no-mdns --bootnodes {{'bob-validator-wo'|zombie('multiAddress')}}"] - - [[relaychain.nodes]] - name = "bob-validator-wo" - validator = true - rpc_port = 9936 - ws_port = 9946 - extra_args = ["--no-mdns --bootnodes {{'alice-validator-wo'|zombie('multiAddress')}}"] - - [[relaychain.nodes]] - name = "charlie-validator-wo" - validator = true - rpc_port = 9937 - ws_port = 9947 - extra_args = ["--no-mdns --bootnodes {{'alice-validator-wo'|zombie('multiAddress')}}"] - -[[parachains]] -id = 1014 -chain = "bridge-hub-wococo-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "alice-collator-wo" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8935 - ws_port = 8945 - args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - ] - extra_args = [ - "--force-authoring", "--no-mdns", "--bootnodes {{'bob-collator-wo'|zombie('multiAddress')}}", - "-- --port 41335 --rpc-port 48935 --ws-port 48945 --no-mdns", "--bootnodes {{'alice-validator-wo'|zombie('multiAddress')}}" - ] - - # run bob as parachain collator - [[parachains.collators]] - name = "bob-collator-wo" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8936 - ws_port = 8946 - args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - ] - extra_args = [ - "--force-authoring", "--no-mdns", "--bootnodes {{'alice-collator-wo'|zombie('multiAddress')}}", - "-- --port 41336 --rpc-port 48936 --ws-port 48946 --no-mdns", "--bootnodes {{'bob-validator-wo'|zombie('multiAddress')}}" - ] - -[[parachains]] -id = 1000 -chain = "asset-hub-westend-local" -cumulus_based = true - - [[parachains.collators]] - name = "wockmint-collator1" - rpc_port = 9011 - ws_port = 9010 - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - ] - extra_args = [ - "--no-mdns", "--bootnodes {{'wockmint-collator2'|zombie('multiAddress')}}", - "-- --port 31333 --rpc-port 38933 --ws-port 38943 --no-mdns", "--bootnodes {{'alice-validator-wo'|zombie('multiAddress')}}" - ] - - [[parachains.collators]] - name = "wockmint-collator2" - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - ] - extra_args = [ - "--no-mdns", "--bootnodes {{'wockmint-collator1'|zombie('multiAddress')}}", - "-- --port 31433 --rpc-port 38833 --ws-port 38843 --no-mdns", "--bootnodes {{'alice-validator-wo'|zombie('multiAddress')}}" - ] - -[[hrmp_channels]] -sender = 1000 -recipient = 1014 -max_capacity = 4 -max_message_size = 524288 - -[[hrmp_channels]] -sender = 1014 -recipient = 1000 -max_capacity = 4 -max_message_size = 524288 diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index 7c74a74a75027dd29e67b256698dcfa0892492d5..34cacbc2a9ba8e3907fd90b97c13eaca63af6900 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -4,7 +4,7 @@ default_command = "polkadot" chain = "rococo-local" -[relaychain.genesis.runtime.runtime_genesis_config.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config] # set parameters such that collators only connect to 1 validator as a backing group max_validators_per_core = 1 group_rotation_frequency = 100 # 10 mins diff --git a/cumulus/zombienet/tests/0005-migrate_solo_to_para.toml b/cumulus/zombienet/tests/0005-migrate_solo_to_para.toml index f98c0e6f2592914fe690803a28f05abd1501229c..180ed8bf7b99bc673e76e6b26c19a86c7b103879 100644 --- a/cumulus/zombienet/tests/0005-migrate_solo_to_para.toml +++ b/cumulus/zombienet/tests/0005-migrate_solo_to_para.toml @@ -32,7 +32,7 @@ cumulus_based = true add_to_genesis = false register_para = false # Set some random value in the genesis state to create a different genesis hash. -[parachains.genesis.runtime.runtime_genesis_config.sudo] +[parachains.genesis.runtimeGenesis.patch.sudo] key = "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" # run the parachain that will be used to return the header of the solo chain. diff --git a/docs/DEPRECATION_CHECKLIST.md b/docs/DEPRECATION_CHECKLIST.md new file mode 100644 index 0000000000000000000000000000000000000000..fccf93d227379bdc16d831eac4ffb1f561adbe94 --- /dev/null +++ b/docs/DEPRECATION_CHECKLIST.md @@ -0,0 +1,79 @@ +# Deprecation Checklist + +This deprecation checklist makes sense while we don’t use [SemVer](https://semver.org/). +After that, this document will most likely change. +As deprecation and removal of existing code can happen on any release, we need to be mindful that external builders +could be impacted by the changes we make. +The deprecation checklist tries to mitigate this impact, while still keeping the developer experience, the DevEx, as +smooth as possible. + +To start a deprecation process, a new issue with the label `T15-deprecation` needs to be created for correct tracking. +Then these are the actions to take: + +## Hard deprecate by adding a warning message + +The warning message shall include a removal month and year, which is suggested to be 6 months after the deprecation +notice is released. +This means that the code will be removed in a release within that month (or after, but never before). Please use this +template, doing so makes it easy to search through the code base: + +```rust +#[deprecated(note = "[DEPRECATED] will be removed after [DATE]. [ALTERNATIVE]")] +``` +`[ALTERNATIVE]` won't always be possible but offer it if it is. + +E.g. +```rust +#[deprecated(note = "`GenesisConfig` will be removed after December 2023. Use `RuntimeGenesisConfig` instead.")] +``` + +Some pieces of code cannot be labeled as deprecated, like [reexports](https://github.com/rust-lang/rust/issues/30827) +or [dispatchables](https://github.com/paritytech/polkadot-sdk/issues/182#issuecomment-1691684159), for instance. +In cases like that we can only make a visible enough comment, and make sure that we [announce the deprecation properly](#announce-the-deprecation-and-removal). + +## Remove usage of the deprecated code in the code base + +Just make sure that we are not using the deprecated code ourselves. +If you added the deprecation warning from the previous step, this can be done by making sure that warning is not shown +when building the code. + +## Update examples and tutorials + +Make sure that the rust docs are updated. +We also need [https://docs.substrate.io/](https://docs.substrate.io/) to be updated accordingly. The repo behind it is +[https://github.com/substrate-developer-hub/substrate-docs](https://github.com/substrate-developer-hub/substrate-docs). + +## Announce the deprecation and removal + +**At minimum they should be noted in the release log.** Please see how to document a PR [here](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md#documentation). +There you can give instructions based on the audience and tell them what they need to do to upgrade the code. + +Some breaking changes have a bigger impact than others. When the impact is big the release note is not enough, though +it should still be the primary place for the notice. You can link back to the changelog files in other channels if you +want to announce it somewhere else. +Make sure you are as loud as you need to be for the magnitude of the breaking change. + +## Removal version is planned + +Depending on the removal date indicated in the deprecation warning in the [first step](#hard-deprecate-by-adding-a-warning-message), +the nature and the importance of the change, it might make sense to coordinate the release with other developers and +with the Release team. + +## Deprecated code is removed + +The deprecated code finally gets removed. +Don’t forget to [announce this accordingly](#announce-the-deprecation-and-removal). + +✅ In order to not forget any of these steps, consider using this template in your deprecation issue: + +```markdown +### Tasks + +- [ ] Deprecate code by adding a warning message +- [ ] Remove usage of the deprecated code in the code base +- [ ] Update examples and tutorials +- [ ] Announce code deprecation +- [ ] Plan removal version +- [ ] Announce code removal +- [ ] Remove deprecated code +``` diff --git a/docs/DOCUMENTATION_GUIDELINES.md b/docs/DOCUMENTATION_GUIDELINES.md index 29029b894c761d9ce407f893a00f23b0a62f1651..5d1164e8ca89f8eefea71ad96318b9347ca29c89 100644 --- a/docs/DOCUMENTATION_GUIDELINES.md +++ b/docs/DOCUMENTATION_GUIDELINES.md @@ -210,7 +210,7 @@ The guidelines so far have been general in nature, and are applicable to crates pallets. The following is relevant to how to document parts of a crate that is a pallet. See -[`pallet-fast-unstake`](../frame/fast-unstake/src/lib.rs) as one example of adhering these guidelines. +[`pallet-fast-unstake`](../substrate/frame/fast-unstake/src/lib.rs) as one example of adhering these guidelines. --- diff --git a/docs/STYLE_GUIDE.md b/docs/STYLE_GUIDE.md index a9ac4a5910b3547c44612313bbc9745e02febf1d..1ae9bc5003f60502dae885c49e4af861561a793c 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/STYLE_GUIDE.md @@ -152,7 +152,7 @@ let mut target_path = > **TLDR** > You can use the CLI tool [Zepter](https://crates.io/crates/zepter) to -> format the files: `zepter format features` +> format the files: `zepter format features --fix` (or `zepter f f -f`). Rust `Cargo.toml` files need to respect certain formatting rules. All entries need to be alphabetically sorted. This makes it easier to read them and insert diff --git a/docs/mermaid/substrate_client_runtime.mmd b/docs/mermaid/substrate_client_runtime.mmd new file mode 100644 index 0000000000000000000000000000000000000000..23c3f849224affcb791fc3324df44f4bd4fa398b --- /dev/null +++ b/docs/mermaid/substrate_client_runtime.mmd @@ -0,0 +1,10 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + end + subgraph Runtime + end + Client --runtime-api--> Runtime + Runtime --host-functions--> Client +end diff --git a/docs/mermaid/substrate_simple.mmd b/docs/mermaid/substrate_simple.mmd new file mode 100644 index 0000000000000000000000000000000000000000..475d8be5ef818ca6e2798a71cf3adee5c36a6dbf --- /dev/null +++ b/docs/mermaid/substrate_simple.mmd @@ -0,0 +1,8 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + end + subgraph Runtime + end +end diff --git a/docs/mermaid/substrate_with_frame.mmd b/docs/mermaid/substrate_with_frame.mmd new file mode 100644 index 0000000000000000000000000000000000000000..12d072a3360c2a43d248271348227891f89f6f46 --- /dev/null +++ b/docs/mermaid/substrate_with_frame.mmd @@ -0,0 +1,20 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + Database + Networking + Consensus + end + subgraph Runtime + subgraph FRAME + direction LR + Governance + Currency + Staking + Identity + end + end + Client --runtime-api--> Runtime + Runtime --host-functions--> Client +end diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 6e82cb69f6ecd6c4db535c9797a4ea7fde9d67c1..fb161848fb6cf8ee0fd4f0b6d8aba6da7f3c382c 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -23,7 +23,7 @@ default-run = "polkadot" [dependencies] color-eyre = { version = "0.6.1", default-features = false } -tikv-jemallocator = { version = "0.5.0", optional = true } +tikv-jemallocator = { version = "0.5.0", optional = true, features = [ "unprefixed_malloc_on_supported_platforms" ] } # Crates in our workspace, defined as dependencies so we can pass them feature flags. polkadot-cli = { path = "cli", features = [ "westend-native", "rococo-native" ] } @@ -36,7 +36,7 @@ polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = "0.5.0" +tikv-jemallocator = { version = "0.5.0", features = [ "unprefixed_malloc_on_supported_platforms" ] } [dev-dependencies] assert_cmd = "2.0.4" @@ -64,6 +64,8 @@ jemalloc-allocator = [ "polkadot-node-core-pvf/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator", ] +network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] + # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 8057342aaea0ce7410584bf09dcca0fa6074d106..0c2925c76e82f155a2d0fffd012f80b1ed4ee3cc 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -33,6 +33,7 @@ try-runtime-cli = { path = "../../substrate/utils/frame/try-runtime/cli", option sc-cli = { path = "../../substrate/client/cli", optional = true } sc-service = { path = "../../substrate/client/service", optional = true } polkadot-node-metrics = { path = "../node/metrics" } +polkadot-node-primitives = { path = "../node/primitives" } sc-tracing = { path = "../../substrate/client/tracing", optional = true } sc-sysinfo = { path = "../../substrate/client/sysinfo" } sc-executor = { path = "../../substrate/client/executor" } @@ -73,3 +74,5 @@ runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] + +network-protocol-staging = [ "service/network-protocol-staging" ] diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index faca2b25e23a31202c6ba91de7502c78cee1f348..e20e35c9103813e7884c2a56a9d9d82d061b8768 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -16,19 +16,11 @@ //! Polkadot CLI library. +pub use polkadot_node_primitives::NODE_VERSION; + use clap::Parser; use std::path::PathBuf; -/// The version of the node. -/// -/// This is the version that is used for versioning this node binary. -/// By default the `minor` version is bumped in every release. `Major` or `patch` releases are only -/// expected in very rare cases. -/// -/// The worker binaries associated to the node binary should ensure that they are using the same -/// version as the main node that started them. -pub const NODE_VERSION: &'static str = "1.1.0"; - #[allow(missing_docs)] #[derive(Debug, Parser)] pub enum Subcommand { @@ -92,29 +84,29 @@ pub struct RunCmd { /// Setup a GRANDPA scheduled voting pause. /// - /// This parameter takes two values, namely a block number and a delay (in - /// blocks). After the given block number is finalized the GRANDPA voter - /// will temporarily stop voting for new blocks until the given delay has - /// elapsed (i.e. until a block at height `pause_block + delay` is imported). + /// This parameter takes two values, namely a block number and a delay (in blocks). + /// + /// After the given block number is finalized the GRANDPA voter will temporarily + /// stop voting for new blocks until the given delay has elapsed (i.e. until a + /// block at height `pause_block + delay` is imported). #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, - /// Disable the BEEFY gadget - /// (currently enabled by default on Rococo, Wococo and Versi). + /// Disable the BEEFY gadget. + /// + /// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'. #[arg(long)] pub no_beefy: bool, - /// Add the destination address to the jaeger agent. + /// Add the destination address to the 'Jaeger' agent. /// - /// Must be valid socket address, of format `IP:Port` - /// commonly `127.0.0.1:6831`. + /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:6831`). #[arg(long)] pub jaeger_agent: Option, /// Add the destination address to the `pyroscope` agent. /// - /// Must be valid socket address, of format `IP:Port` - /// commonly `127.0.0.1:4040`. + /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:4040`). #[arg(long)] pub pyroscope_server: Option, @@ -134,10 +126,13 @@ pub struct RunCmd { #[arg(long)] pub overseer_channel_capacity_override: Option, - /// Path to the directory where auxiliary worker binaries reside. If not specified, the main - /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: - /// if the path points to an executable rather then directory, that executable is used both as - /// preparation and execution worker. + /// Path to the directory where auxiliary worker binaries reside. + /// + /// If not specified, the main binary's directory is searched first, then + /// `/usr/lib/polkadot` is searched. + /// + /// TESTING ONLY: if the path points to an executable rather then directory, + /// that executable is used both as preparation and execution worker. #[arg(long, value_name = "PATH")] pub workers_path: Option, diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 11c9014d1d6e3dbd4afd2ea44416bbc3fa436b47..2dcf5e0e8d7bf616d5c1eef3a775ddfa25d44721 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -50,7 +50,7 @@ impl SubstrateCli for Cli { fn impl_version() -> String { let commit_hash = env!("SUBSTRATE_CLI_COMMIT_HASH"); - format!("{NODE_VERSION}-{commit_hash}") + format!("{}-{commit_hash}", NODE_VERSION) } fn description() -> String { @@ -195,9 +195,8 @@ where .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; - // By default, enable BEEFY on all networks except Polkadot (for now), unless - // explicitly disabled through CLI. - let mut enable_beefy = !chain_spec.is_polkadot() && !cli.run.no_beefy; + // By default, enable BEEFY on all networks, unless explicitly disabled through CLI. + let mut enable_beefy = !cli.run.no_beefy; // BEEFY doesn't (yet) support warp sync: // Until we implement https://github.com/paritytech/substrate/issues/14756 // - disallow warp sync for validators, diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 29c6be44454b9429cefa649a72678d8b5d9ad7f2..f843ec17943cbaddc886f73b33fc84bf53bb644e 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-core-primitives" version = "1.0.0" +description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true @@ -9,7 +10,7 @@ license.workspace = true sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } [features] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index d07b77ec4ddf5574bf29cef0113c1dc1ebf8b32d..ccfe7f14eb4647e114eebf5f4272b7a7cae4e664 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-erasure-coding" version = "1.0.0" +description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index b110540140f958b0624738c38fde3aad417bc85e..c1848f47fc69f0dba739fafed79279adbf31cd4b 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 4e13755deedfb98f020cdfc969e80db42df7d602..b8c9c1a36e46b9f4b2e7fcc7d40e2ea68ce8cc7e 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -193,7 +193,7 @@ async fn handle_new_activations( metrics: Metrics, ) -> crate::error::Result<()> { // follow the procedure from the guide: - // https://paritytech.github.io/polkadot/book/node/collators/collation-generation.html + // https://paritytech.github.io/polkadot-sdk/book/node/collators/collation-generation.html if config.collator.is_none() { return Ok(()) diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index acad0d1fa4e4f5c6286fde17181d582298ed2922..59a6708f17e4701276a034c923a79933c8f32092 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Approval Voting Subsystem of the Polkadot node" [dependencies] futures = "0.3.21" @@ -17,6 +18,7 @@ schnorrkel = "0.9.1" kvdb = "0.13.0" derive_more = "0.99.17" thiserror = "1.0.48" +itertools = "0.10.5" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } @@ -30,6 +32,9 @@ sp-consensus = { path = "../../../../substrate/primitives/consensus/common", def sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +rand_core = "0.5.1" +rand_chacha = { version = "0.3.1" } +rand = "0.8.5" [dev-dependencies] async-trait = "0.1.57" @@ -43,3 +48,5 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +log = "0.4.17" +env_logger = "0.9.0" diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index f345b57029b5a63f1f171578d6b2cb45d703a263..5d24ff164193de287893b5dc6bcb156f6bcda578 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -17,7 +17,7 @@ //! Utilities for checking whether a candidate has been approved under a given block. use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice}; -use polkadot_node_primitives::approval::DelayTranche; +use polkadot_node_primitives::approval::v1::DelayTranche; use polkadot_primitives::ValidatorIndex; use crate::{ @@ -472,9 +472,9 @@ mod tests { } .into(); - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: BitVec::default(), + assigned_validators: BitVec::default(), our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -509,22 +509,22 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 0, assignments: (0..2).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 2, assignments: (5..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -581,22 +581,22 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 0, assignments: (0..4).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (4..6).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 2, assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -647,9 +647,9 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 5], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 5], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -691,9 +691,9 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -731,9 +731,9 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -776,9 +776,9 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -843,9 +843,9 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -934,9 +934,9 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -1041,15 +1041,15 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ // Assignments with invalid validator indexes. - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 3], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 3], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -1094,12 +1094,12 @@ mod tests { ]; for test_tranche in test_tranches { - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), backing_group: GroupIndex(0), our_assignment: None, our_approval_sig: None, - assignments: bitvec![u8, BitOrderLsb0; 0; 3], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 3], approved: false, } .into(); diff --git a/polkadot/node/core/approval-voting/src/approval_db/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/mod.rs index f0ace95613da8d39165813e728804605e27cd373..20fb6aa82d8d902885e974d59990b29bf683f55d 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/mod.rs @@ -31,3 +31,4 @@ //! time being we share the same DB with the rest of Substrate. pub mod v1; +pub mod v2; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs index c31389269d2ef2093ffb355738ec0f998505b347..011d0a559c022d2294acf0cdcc22364bc301a8dd 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -23,144 +23,15 @@ //! require a db migration (check `node/service/src/parachains_db/upgrade.rs`). use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche}; -use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; -use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_node_primitives::approval::v1::{AssignmentCert, DelayTranche}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; +use std::collections::BTreeMap; -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use std::{collections::BTreeMap, sync::Arc}; - -use crate::{ - backend::{Backend, BackendWriteOp}, - persisted_entries, -}; - -const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; - -#[cfg(test)] -pub mod tests; - -/// `DbBackend` is a concrete implementation of the higher-level Backend trait -pub struct DbBackend { - inner: Arc, - config: Config, -} - -impl DbBackend { - /// Create a new [`DbBackend`] with the supplied key-value store and - /// config. - pub fn new(db: Arc, config: Config) -> Self { - DbBackend { inner: db, config } - } -} - -impl Backend for DbBackend { - fn load_block_entry( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } - - fn load_candidate_entry( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) - } - - fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { - load_blocks_at_height(&*self.inner, &self.config, block_height) - } - - fn load_all_blocks(&self) -> SubsystemResult> { - load_all_blocks(&*self.inner, &self.config) - } - - fn load_stored_blocks(&self) -> SubsystemResult> { - load_stored_blocks(&*self.inner, &self.config) - } - - /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: I) -> SubsystemResult<()> - where - I: IntoIterator, - { - let mut tx = DBTransaction::new(); - for op in ops { - match op { - BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { - tx.put_vec( - self.config.col_approval_data, - &STORED_BLOCKS_KEY, - stored_block_range.encode(), - ); - }, - BackendWriteOp::DeleteStoredBlockRange => { - tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); - }, - BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { - tx.put_vec( - self.config.col_approval_data, - &blocks_at_height_key(h), - blocks.encode(), - ); - }, - BackendWriteOp::DeleteBlocksAtHeight(h) => { - tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); - }, - BackendWriteOp::WriteBlockEntry(block_entry) => { - let block_entry: BlockEntry = block_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &block_entry_key(&block_entry.block_hash), - block_entry.encode(), - ); - }, - BackendWriteOp::DeleteBlockEntry(hash) => { - tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); - }, - BackendWriteOp::WriteCandidateEntry(candidate_entry) => { - let candidate_entry: CandidateEntry = candidate_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &candidate_entry_key(&candidate_entry.candidate.hash()), - candidate_entry.encode(), - ); - }, - BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { - tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); - }, - } - } - - self.inner.write(tx).map_err(|e| e.into()) - } -} - -/// A range from earliest..last block number stored within the DB. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); - -// slot_duration * 2 + DelayTranche gives the number of delay tranches since the -// unix epoch. -#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] -pub struct Tick(u64); - -/// Convenience type definition -pub type Bitfield = BitVec; - -/// The database config. -#[derive(Debug, Clone, Copy)] -pub struct Config { - /// The column family in the database where data is stored. - pub col_approval_data: u32, -} +use super::v2::Bitfield; /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] @@ -171,15 +42,7 @@ pub struct OurAssignment { // Whether the assignment has been triggered already. pub triggered: bool, } - -/// Metadata regarding a specific tranche of assignments for a specific candidate. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct TrancheEntry { - pub tranche: DelayTranche, - // Assigned validators, and the instant we received their assignment, rounded - // to the nearest tick. - pub assignments: Vec<(ValidatorIndex, Tick)>, -} +use super::v2::TrancheEntry; /// Metadata regarding approval of a particular candidate within the context of some /// particular block. @@ -226,126 +89,3 @@ pub struct BlockEntry { pub approved_bitfield: Bitfield, pub children: Vec, } - -impl From for Tick { - fn from(tick: crate::Tick) -> Tick { - Tick(tick) - } -} - -impl From for crate::Tick { - fn from(tick: Tick) -> crate::Tick { - tick.0 - } -} - -/// Errors while accessing things from the DB. -#[derive(Debug, derive_more::From, derive_more::Display)] -pub enum Error { - Io(std::io::Error), - InvalidDecoding(parity_scale_codec::Error), -} - -impl std::error::Error for Error {} - -/// Result alias for DB errors. -pub type Result = std::result::Result; - -pub(crate) fn load_decode( - store: &dyn Database, - col_approval_data: u32, - key: &[u8], -) -> Result> { - match store.get(col_approval_data, key)? { - None => Ok(None), - Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), - } -} - -/// The key a given block entry is stored under. -pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { - const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(block_hash.as_ref()); - - key -} - -/// The key a given candidate entry is stored under. -pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { - const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); - - key -} - -/// The key a set of block hashes corresponding to a block number is stored under. -pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { - const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; - - let mut key = [0u8; 12 + 4]; - key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); - block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); - - key -} - -/// Return all blocks which have entries in the DB, ascending, by height. -pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { - let mut hashes = Vec::new(); - if let Some(stored_blocks) = load_stored_blocks(store, config)? { - for height in stored_blocks.0..stored_blocks.1 { - let blocks = load_blocks_at_height(store, config, &height)?; - hashes.extend(blocks); - } - } - - Ok(hashes) -} - -/// Load the stored-blocks key from the state. -pub fn load_stored_blocks( - store: &dyn Database, - config: &Config, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a blocks-at-height entry for a given block number. -pub fn load_blocks_at_height( - store: &dyn Database, - config: &Config, - block_number: &BlockNumber, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) - .map(|x| x.unwrap_or_default()) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a block entry from the aux store. -pub fn load_block_entry( - store: &dyn Database, - config: &Config, - block_hash: &Hash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a candidate entry from the aux store. -pub fn load_candidate_entry( - store: &dyn Database, - config: &Config, - candidate_hash: &CandidateHash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..74e997c7af8422d6075a4b67747aa66013dd1ed4 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -0,0 +1,244 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Approval DB migration helpers. +use super::*; +use crate::backend::Backend; +use polkadot_node_primitives::approval::v1::{ + AssignmentCert, AssignmentCertKind, VrfOutput, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT, +}; +use polkadot_node_subsystem_util::database::Database; +use sp_application_crypto::sp_core::H256; +use std::{collections::HashSet, sync::Arc}; + +fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } +} + +fn make_block_entry_v1( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> crate::approval_db::v1::BlockEntry { + crate::approval_db::v1::BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + } +} + +fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + +/// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. +/// Returns on any error. +/// Must only be used in parachains DB migration code - `polkadot-service` crate. +pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { + let mut backend = crate::DbBackend::new(db, config); + let all_blocks = backend + .load_all_blocks() + .map_err(|e| Error::InternalError(e))? + .iter() + .filter_map(|block_hash| { + backend + .load_block_entry_v1(block_hash) + .map_err(|e| Error::InternalError(e)) + .ok()? + }) + .collect::>(); + + gum::info!( + target: crate::LOG_TARGET, + "Migrating candidate entries on top of {} blocks", + all_blocks.len() + ); + + let mut overlay = crate::OverlayedBackend::new(&backend); + let mut counter = 0; + // Get all candidate entries, approval entries and convert each of them. + for block in all_blocks { + for (_core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend + .load_candidate_entry_v1(&candidate_hash) + .map_err(|e| Error::InternalError(e))? + { + // Write the updated representation. + overlay.write_candidate_entry(candidate_entry); + counter += 1; + } + } + overlay.write_block_entry(block); + } + + gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter); + + // Commit all changes to DB. + let write_ops = overlay.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(()) +} + +// Checks if the migration doesn't leave the DB in an unsane state. +// This function is to be used in tests. +pub fn v1_to_v2_sanity_check( + db: Arc, + config: Config, + expected_candidates: HashSet, +) -> Result<()> { + let backend = crate::DbBackend::new(db, config); + + let all_blocks = backend + .load_all_blocks() + .unwrap() + .iter() + .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) + .collect::>(); + + let mut candidates = HashSet::new(); + + // Iterate all blocks and approval entries. + for block in all_blocks { + for (_core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { + candidates.insert(candidate_entry.candidate.hash()); + } + } + } + + assert_eq!(candidates, expected_candidates); + + Ok(()) +} + +// Fills the db with dummy data in v1 scheme. +pub fn v1_to_v2_fill_test_data( + db: Arc, + config: Config, + dummy_candidate_create: F, +) -> Result> +where + F: Fn(H256) -> CandidateReceipt, +{ + let mut backend = crate::DbBackend::new(db.clone(), config); + let mut overlay_db = crate::OverlayedBackend::new(&backend); + let mut expected_candidates = HashSet::new(); + + const RELAY_BLOCK_COUNT: u32 = 10; + + let range = StoredBlockRange(1, 11); + overlay_db.write_stored_block_range(range.clone()); + + for relay_number in 1..=RELAY_BLOCK_COUNT { + let relay_hash = Hash::repeat_byte(relay_number as u8); + let assignment_core_index = CoreIndex(relay_number); + let candidate = dummy_candidate_create(relay_hash); + let candidate_hash = candidate.hash(); + + let at_height = vec![relay_hash]; + + let block_entry = make_block_entry_v1( + relay_hash, + Default::default(), + relay_number, + vec![(assignment_core_index, candidate_hash)], + ); + + let dummy_assignment = crate::approval_db::v1::OurAssignment { + cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + }; + + let candidate_entry = crate::approval_db::v1::CandidateEntry { + candidate, + session: 123, + block_assignments: vec![( + relay_hash, + crate::approval_db::v1::ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: Some(dummy_assignment), + our_approval_sig: None, + assignments: Default::default(), + approved: false, + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + overlay_db.write_blocks_at_height(relay_number, at_height.clone()); + expected_candidates.insert(candidate_entry.candidate.hash()); + + db.write(write_candidate_entry_v1(candidate_entry, config)).unwrap(); + db.write(write_block_entry_v1(block_entry, config)).unwrap(); + } + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(expected_candidates) +} + +// Low level DB helper to write a candidate entry in v1 scheme. +fn write_candidate_entry_v1( + candidate_entry: crate::approval_db::v1::CandidateEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + tx +} + +// Low level DB helper to write a block entry in v1 scheme. +fn write_block_entry_v1( + block_entry: crate::approval_db::v1::BlockEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + tx +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..66df6ee8f653a992f09428a9c0b2a418aef2209d --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -0,0 +1,394 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 2 of the DB schema. + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2}; +use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, + ValidatorIndex, ValidatorSignature, +}; + +use sp_consensus_slots::Slot; + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{ + backend::{Backend, BackendWriteOp, V1ReadBackend}, + persisted_entries, +}; + +const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; + +pub mod migration_helpers; +#[cfg(test)] +pub mod tests; + +/// `DbBackend` is a concrete implementation of the higher-level Backend trait +pub struct DbBackend { + inner: Arc, + config: Config, +} + +impl DbBackend { + /// Create a new [`DbBackend`] with the supplied key-value store and + /// config. + pub fn new(db: Arc, config: Config) -> Self { + DbBackend { inner: db, config } + } +} + +impl V1ReadBackend for DbBackend { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(Into::into)) + } + + fn load_block_entry_v1( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } +} + +impl Backend for DbBackend { + fn load_block_entry( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } + + fn load_candidate_entry( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) + } + + fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { + load_blocks_at_height(&*self.inner, &self.config, block_height) + } + + fn load_all_blocks(&self) -> SubsystemResult> { + load_all_blocks(&*self.inner, &self.config) + } + + fn load_stored_blocks(&self) -> SubsystemResult> { + load_stored_blocks(&*self.inner, &self.config) + } + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: I) -> SubsystemResult<()> + where + I: IntoIterator, + { + let mut tx = DBTransaction::new(); + for op in ops { + match op { + BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { + tx.put_vec( + self.config.col_approval_data, + &STORED_BLOCKS_KEY, + stored_block_range.encode(), + ); + }, + BackendWriteOp::DeleteStoredBlockRange => { + tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); + }, + BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { + tx.put_vec( + self.config.col_approval_data, + &blocks_at_height_key(h), + blocks.encode(), + ); + }, + BackendWriteOp::DeleteBlocksAtHeight(h) => { + tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); + }, + BackendWriteOp::WriteBlockEntry(block_entry) => { + let block_entry: BlockEntry = block_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + }, + BackendWriteOp::DeleteBlockEntry(hash) => { + tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); + }, + BackendWriteOp::WriteCandidateEntry(candidate_entry) => { + let candidate_entry: CandidateEntry = candidate_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + }, + BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { + tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); + }, + } + } + + self.inner.write(tx).map_err(|e| e.into()) + } +} + +/// A range from earliest..last block number stored within the DB. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); + +// slot_duration * 2 + DelayTranche gives the number of delay tranches since the +// unix epoch. +#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] +pub struct Tick(u64); + +/// Convenience type definition +pub type Bitfield = BitVec; + +/// The database config. +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// The column family in the database where data is stored. + pub col_approval_data: u32, +} + +/// Details pertaining to our assignment on a block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct OurAssignment { + /// Our assignment certificate. + pub cert: AssignmentCertV2, + /// The tranche for which the assignment refers to. + pub tranche: DelayTranche, + /// Our validator index for the session in which the candidates were included. + pub validator_index: ValidatorIndex, + /// Whether the assignment has been triggered already. + pub triggered: bool, +} + +/// Metadata regarding a specific tranche of assignments for a specific candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct TrancheEntry { + pub tranche: DelayTranche, + // Assigned validators, and the instant we received their assignment, rounded + // to the nearest tick. + pub assignments: Vec<(ValidatorIndex, Tick)>, +} + +/// Metadata regarding approval of a particular candidate within the context of some +/// particular block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct ApprovalEntry { + pub tranches: Vec, + pub backing_group: GroupIndex, + pub our_assignment: Option, + pub our_approval_sig: Option, + // `n_validators` bits. + pub assigned_validators: Bitfield, + pub approved: bool, +} + +/// Metadata regarding approval of a particular candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct CandidateEntry { + pub candidate: CandidateReceipt, + pub session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + pub block_assignments: BTreeMap, + pub approvals: Bitfield, +} + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct BlockEntry { + pub block_hash: Hash, + pub block_number: BlockNumber, + pub parent_hash: Hash, + pub session: SessionIndex, + pub slot: Slot, + /// Random bytes derived from the VRF submitted within the block by the block + /// author as a credential and used as input to approval assignment criteria. + pub relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + pub candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + pub approved_bitfield: Bitfield, + pub children: Vec, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + pub distributed_assignments: Bitfield, +} + +impl From for Tick { + fn from(tick: crate::Tick) -> Tick { + Tick(tick) + } +} + +impl From for crate::Tick { + fn from(tick: Tick) -> crate::Tick { + tick.0 + } +} + +/// Errors while accessing things from the DB. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum Error { + Io(std::io::Error), + InvalidDecoding(parity_scale_codec::Error), + InternalError(SubsystemError), +} + +impl std::error::Error for Error {} + +/// Result alias for DB errors. +pub type Result = std::result::Result; + +pub(crate) fn load_decode( + store: &dyn Database, + col_approval_data: u32, + key: &[u8], +) -> Result> { + match store.get(col_approval_data, key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), + } +} + +/// The key a given block entry is stored under. +pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { + const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(block_hash.as_ref()); + + key +} + +/// The key a given candidate entry is stored under. +pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { + const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); + + key +} + +/// The key a set of block hashes corresponding to a block number is stored under. +pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { + const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; + + let mut key = [0u8; 12 + 4]; + key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); + block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); + + key +} + +/// Return all blocks which have entries in the DB, ascending, by height. +pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { + let mut hashes = Vec::new(); + if let Some(stored_blocks) = load_stored_blocks(store, config)? { + for height in stored_blocks.0..stored_blocks.1 { + let blocks = load_blocks_at_height(store, config, &height)?; + hashes.extend(blocks); + } + } + + Ok(hashes) +} + +/// Load the stored-blocks key from the state. +pub fn load_stored_blocks( + store: &dyn Database, + config: &Config, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a blocks-at-height entry for a given block number. +pub fn load_blocks_at_height( + store: &dyn Database, + config: &Config, + block_number: &BlockNumber, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) + .map(|x| x.unwrap_or_default()) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store. +pub fn load_block_entry( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in current version format. +pub fn load_candidate_entry( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in v1 format. +pub fn load_candidate_entry_v1( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store in v1 format. +pub fn load_block_entry_v1( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..50a5a924ca8dba696e1f3e2d0465a91e837194ec --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -0,0 +1,570 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the aux-schema of approval voting. + +use super::{DbBackend, StoredBlockRange, *}; +use crate::{ + backend::{Backend, OverlayedBackend}, + ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, +}; +use polkadot_node_subsystem_util::database::Database; +use polkadot_primitives::Id as ParaId; +use std::{collections::HashMap, sync::Arc}; + +use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; + +const DATA_COL: u32 = 0; + +const NUM_COLUMNS: u32 = 1; + +const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL }; + +fn make_db() -> (DbBackend, Arc) { + let db = kvdb_memorydb::create(NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db_writer: Arc = Arc::new(db); + (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) +} + +fn make_block_entry( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> BlockEntry { + BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + distributed_assignments: Default::default(), + } +} + +fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + +fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { + let mut c = dummy_candidate_receipt(dummy_hash()); + + c.descriptor.para_id = para_id; + c.descriptor.relay_parent = relay_parent; + + c +} + +#[test] +fn read_write() { + let (mut db, store) = make_db(); + + let hash_a = Hash::repeat_byte(1); + let hash_b = Hash::repeat_byte(2); + let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash(); + + let range = StoredBlockRange(10, 20); + let at_height = vec![hash_a, hash_b]; + + let block_entry = + make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]); + + let candidate_entry = CandidateEntry { + candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None), + session: 5, + block_assignments: vec![( + hash_a, + ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: None, + our_approval_sig: None, + assigned_validators: Default::default(), + approved: false, + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(range.clone()); + overlay_db.write_blocks_at_height(1, at_height.clone()); + overlay_db.write_block_entry(block_entry.clone().into()); + overlay_db.write_candidate_entry(candidate_entry.clone().into()); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range)); + assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), + Some(block_entry.into()) + ); + assert_eq!( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), + Some(candidate_entry.into()), + ); + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.delete_blocks_at_height(1); + overlay_db.delete_block_entry(&hash_a); + overlay_db.delete_candidate_entry(&candidate_hash); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash) + .unwrap() + .is_none()); +} + +#[test] +fn add_block_entry_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash); + let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash); + + let candidate_hash_a = candidate_receipt_a.hash(); + let candidate_hash_b = candidate_receipt_b.hash(); + + let block_number = 10; + + let block_entry_a = make_block_entry( + block_hash_a, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a)], + ); + + let block_entry_b = make_block_entry( + block_hash_b, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)], + ); + + let n_validators = 10; + + let mut new_candidate_info = HashMap::new(); + new_candidate_info + .insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + new_candidate_info + .insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); + + let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) + .unwrap() + .unwrap(); + assert_eq!( + candidate_entry_a.block_assignments.keys().collect::>(), + vec![&block_hash_a, &block_hash_b] + ); + + let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) + .unwrap() + .unwrap(); + assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); +} + +#[test] +fn add_block_entry_adds_child() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new()); + + let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new()); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + block_entry_a.children.push(block_hash_b); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); +} + +#[test] +fn canonicalize_works() { + let (mut db, store) = make_db(); + + // -> B1 -> C1 -> D1 + // A -> B2 -> C2 -> D2 + // + // We'll canonicalize C1. Everytning except D1 should disappear. + // + // Candidates: + // Cand1 in B2 + // Cand2 in C2 + // Cand3 in C2 and D1 + // Cand4 in D1 + // Cand5 in D2 + // Only Cand3 and Cand4 should remain after canonicalize. + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 5)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let genesis = Hash::repeat_byte(0); + + let block_hash_a = Hash::repeat_byte(1); + let block_hash_b1 = Hash::repeat_byte(2); + let block_hash_b2 = Hash::repeat_byte(3); + let block_hash_c1 = Hash::repeat_byte(4); + let block_hash_c2 = Hash::repeat_byte(5); + let block_hash_d1 = Hash::repeat_byte(6); + let block_hash_d2 = Hash::repeat_byte(7); + + let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); + let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); + let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); + let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); + let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); + + let cand_hash_1 = candidate_receipt_genesis.hash(); + let cand_hash_2 = candidate_receipt_a.hash(); + let cand_hash_3 = candidate_receipt_b.hash(); + let cand_hash_4 = candidate_receipt_b1.hash(); + let cand_hash_5 = candidate_receipt_c1.hash(); + + let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); + let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); + let block_entry_b2 = + make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]); + let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new()); + let block_entry_c2 = make_block_entry( + block_hash_c2, + block_hash_b2, + 3, + vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)], + ); + let block_entry_d1 = make_block_entry( + block_hash_d1, + block_hash_c1, + 4, + vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)], + ); + let block_entry_d2 = + make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); + + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + cand_hash_1, + NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None), + ); + + candidate_info + .insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None)); + + candidate_info + .insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None)); + + candidate_info + .insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None)); + + candidate_info + .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); + + candidate_info + }; + + // now insert all the blocks. + let blocks = vec![ + block_entry_a.clone(), + block_entry_b1.clone(), + block_entry_b2.clone(), + block_entry_c1.clone(), + block_entry_c2.clone(), + block_entry_d1.clone(), + block_entry_d2.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let check_candidates_in_store = |expected: Vec<(CandidateHash, Option>)>| { + for (c_hash, in_blocks) in expected { + let (entry, in_blocks) = match in_blocks { + None => { + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => ( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(), + i, + ), + }; + + assert_eq!(entry.block_assignments.len(), in_blocks.len()); + + for x in in_blocks { + assert!(entry.block_assignments.contains_key(&x)); + } + } + }; + + let check_blocks_in_store = |expected: Vec<(Hash, Option>)>| { + for (hash, with_candidates) in expected { + let (entry, with_candidates) = match with_candidates { + None => { + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => + (load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), + }; + + assert_eq!(entry.candidates.len(), with_candidates.len()); + + for x in with_candidates { + assert!(entry.candidates.iter().any(|(_, c)| c == &x)); + } + } + }; + + check_candidates_in_store(vec![ + (cand_hash_1, Some(vec![block_hash_b2])), + (cand_hash_2, Some(vec![block_hash_c2])), + (cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, Some(vec![block_hash_d2])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, Some(vec![])), + (block_hash_b1, Some(vec![])), + (block_hash_b2, Some(vec![cand_hash_1])), + (block_hash_c1, Some(vec![])), + (block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, Some(vec![cand_hash_5])), + ]); + + let mut overlay_db = OverlayedBackend::new(&db); + canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), + StoredBlockRange(4, 5) + ); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, Some(vec![block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, None), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, None), + ]); +} + +#[test] +fn force_approve_works() { + let (mut db, store) = make_db(); + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 4)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)]; + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + candidate_hash, + NewCandidateInfo::new( + make_candidate(ParaId::from(1_u32), Default::default()), + GroupIndex(1), + None, + ), + ); + + candidate_info + }; + + let block_hash_a = Hash::repeat_byte(1); // 1 + let block_hash_b = Hash::repeat_byte(2); + let block_hash_c = Hash::repeat_byte(3); + let block_hash_d = Hash::repeat_byte(4); // 4 + + let block_entry_a = + make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone()); + let block_entry_b = + make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone()); + let block_entry_c = + make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone()); + let block_entry_d = + make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone()); + + let blocks = vec![ + block_entry_a.clone(), + block_entry_b.clone(), + block_entry_c.clone(), + block_entry_d.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]); +} + +#[test] +fn load_all_blocks_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + let block_hash_c = Hash::repeat_byte(42); + + let block_number = 10; + + let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]); + + let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]); + + let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + // add C before B to test sorting. + add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), + vec![block_hash_a, block_hash_b, block_hash_c], + ) +} diff --git a/polkadot/node/core/approval-voting/src/backend.rs b/polkadot/node/core/approval-voting/src/backend.rs index 87d67c52c467a2079044a3c86179ce162dd0fc43..d98f3c5fd202eaf94371210f6366c61aa98017c5 100644 --- a/polkadot/node/core/approval-voting/src/backend.rs +++ b/polkadot/node/core/approval-voting/src/backend.rs @@ -27,7 +27,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, Hash}; use std::collections::HashMap; use super::{ - approval_db::v1::StoredBlockRange, + approval_db::v2::StoredBlockRange, persisted_entries::{BlockEntry, CandidateEntry}, }; @@ -44,6 +44,7 @@ pub enum BackendWriteOp { } /// An abstraction over backend storage for the logic of this subsystem. +/// Implementation must always target latest storage version. pub trait Backend { /// Load a block entry from the DB. fn load_block_entry(&self, hash: &Hash) -> SubsystemResult>; @@ -52,6 +53,7 @@ pub trait Backend { &self, candidate_hash: &CandidateHash, ) -> SubsystemResult>; + /// Load all blocks at a specific height. fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult>; /// Load all block from the DB. @@ -64,6 +66,18 @@ pub trait Backend { I: IntoIterator; } +/// A read only backend to enable db migration from version 1 of DB. +pub trait V1ReadBackend: Backend { + /// Load a candidate entry from the DB with scheme version 1. + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult>; + + /// Load a block entry from the DB with scheme version 1. + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult>; +} + // Status of block range in the `OverlayedBackend`. #[derive(PartialEq)] enum BlockRangeStatus { diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 0e1d18198c210fa14629a7343087c508a9ccc2ca..2bb5a151fe23bfc22478623b59d30af9f372dea4 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -16,28 +16,36 @@ //! Assignment criteria VRF generation and checking. +use itertools::Itertools; use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory, + self as approval_types, + v1::{AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory}, + v2::{AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, VrfOutput, VrfProof, VrfSignature}, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, ValidatorIndex, }; +use rand::{seq::SliceRandom, SeedableRng}; +use rand_chacha::ChaCha20Rng; use sc_keystore::LocalKeystore; use sp_application_crypto::ByteArray; use merlin::Transcript; use schnorrkel::vrf::VRFInOut; -use std::collections::{hash_map::Entry, HashMap}; +use std::{ + cmp::min, + collections::{hash_map::Entry, HashMap}, +}; use super::LOG_TARGET; /// Details pertaining to our assignment on a block. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub struct OurAssignment { - cert: AssignmentCert, + cert: AssignmentCertV2, tranche: DelayTranche, validator_index: ValidatorIndex, // Whether the assignment has been triggered already. @@ -45,7 +53,7 @@ pub struct OurAssignment { } impl OurAssignment { - pub(crate) fn cert(&self) -> &AssignmentCert { + pub(crate) fn cert(&self) -> &AssignmentCertV2 { &self.cert } @@ -66,8 +74,8 @@ impl OurAssignment { } } -impl From for OurAssignment { - fn from(entry: crate::approval_db::v1::OurAssignment) -> Self { +impl From for OurAssignment { + fn from(entry: crate::approval_db::v2::OurAssignment) -> Self { OurAssignment { cert: entry.cert, tranche: entry.tranche, @@ -77,7 +85,7 @@ impl From for OurAssignment { } } -impl From for crate::approval_db::v1::OurAssignment { +impl From for crate::approval_db::v2::OurAssignment { fn from(entry: OurAssignment) -> Self { Self { cert: entry.cert, @@ -88,17 +96,97 @@ impl From for crate::approval_db::v1::OurAssignment { } } -fn relay_vrf_modulo_transcript(relay_vrf_story: RelayVRFStory, sample: u32) -> Transcript { - // combine the relay VRF story with a sample number. - let mut t = Transcript::new(approval_types::RELAY_VRF_MODULO_CONTEXT); - t.append_message(b"RC-VRF", &relay_vrf_story.0); - sample.using_encoded(|s| t.append_message(b"sample", s)); +// Combines the relay VRF story with a sample number if any. +fn relay_vrf_modulo_transcript_inner( + mut transcript: Transcript, + relay_vrf_story: RelayVRFStory, + sample: Option, +) -> Transcript { + transcript.append_message(b"RC-VRF", &relay_vrf_story.0); - t + if let Some(sample) = sample { + sample.using_encoded(|s| transcript.append_message(b"sample", s)); + } + + transcript +} + +fn relay_vrf_modulo_transcript_v1(relay_vrf_story: RelayVRFStory, sample: u32) -> Transcript { + relay_vrf_modulo_transcript_inner( + Transcript::new(approval_types::v1::RELAY_VRF_MODULO_CONTEXT), + relay_vrf_story, + Some(sample), + ) +} + +fn relay_vrf_modulo_transcript_v2(relay_vrf_story: RelayVRFStory) -> Transcript { + relay_vrf_modulo_transcript_inner( + Transcript::new(approval_types::v2::RELAY_VRF_MODULO_CONTEXT), + relay_vrf_story, + None, + ) +} + +/// A hard upper bound on num_cores * target_checkers / num_validators +const MAX_MODULO_SAMPLES: usize = 40; + +/// Takes the VRF output as input and returns a Vec of cores the validator is assigned +/// to as a tranche0 checker. +fn relay_vrf_modulo_cores( + vrf_in_out: &VRFInOut, + // Configuration - `relay_vrf_modulo_samples`. + num_samples: u32, + // Configuration - `n_cores`. + max_cores: u32, +) -> Vec { + let rand_chacha = + ChaCha20Rng::from_seed(vrf_in_out.make_bytes::<::Seed>( + approval_types::v2::CORE_RANDOMNESS_CONTEXT, + )); + generate_samples(rand_chacha, num_samples as usize, max_cores as usize) +} + +/// Generates `num_sumples` randomly from (0..max_cores) range +/// +/// Note! The algorithm can't change because validators on the other +/// side won't be able to check the assignments until they update. +/// This invariant is tested with `generate_samples_invariant`, so the +/// tests will catch any subtle changes in the implementation of this function +/// and its dependencies. +fn generate_samples( + mut rand_chacha: ChaCha20Rng, + num_samples: usize, + max_cores: usize, +) -> Vec { + if num_samples as usize > MAX_MODULO_SAMPLES { + gum::warn!( + target: LOG_TARGET, + n_cores = max_cores, + num_samples, + max_modulo_samples = MAX_MODULO_SAMPLES, + "`num_samples` is greater than `MAX_MODULO_SAMPLES`", + ); + } + + if 2 * num_samples > max_cores { + gum::debug!( + target: LOG_TARGET, + n_cores = max_cores, + num_samples, + max_modulo_samples = MAX_MODULO_SAMPLES, + "Suboptimal configuration `num_samples` should be less than `n_cores` / 2", + ); + } + + let num_samples = min(MAX_MODULO_SAMPLES, min(num_samples, max_cores)); + + let mut random_cores = (0..max_cores as u32).map(|val| val.into()).collect::>(); + let (samples, _) = random_cores.partial_shuffle(&mut rand_chacha, num_samples as usize); + samples.into_iter().map(|val| *val).collect_vec() } fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex { - let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::CORE_RANDOMNESS_CONTEXT); + let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::v1::CORE_RANDOMNESS_CONTEXT); // interpret as little-endian u32. let random_core = u32::from_le_bytes(bytes) % n_cores; @@ -106,7 +194,7 @@ fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex { } fn relay_vrf_delay_transcript(relay_vrf_story: RelayVRFStory, core_index: CoreIndex) -> Transcript { - let mut t = Transcript::new(approval_types::RELAY_VRF_DELAY_CONTEXT); + let mut t = Transcript::new(approval_types::v1::RELAY_VRF_DELAY_CONTEXT); t.append_message(b"RC-VRF", &relay_vrf_story.0); core_index.0.using_encoded(|s| t.append_message(b"core", s)); t @@ -117,7 +205,7 @@ fn relay_vrf_delay_tranche( num_delay_tranches: u32, zeroth_delay_tranche_width: u32, ) -> DelayTranche { - let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::TRANCHE_RANDOMNESS_CONTEXT); + let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::v1::TRANCHE_RANDOMNESS_CONTEXT); // interpret as little-endian u32 and reduce by the number of tranches. let wide_tranche = @@ -128,13 +216,13 @@ fn relay_vrf_delay_tranche( } fn assigned_core_transcript(core_index: CoreIndex) -> Transcript { - let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT); + let mut t = Transcript::new(approval_types::v1::ASSIGNED_CORE_CONTEXT); core_index.0.using_encoded(|s| t.append_message(b"core", s)); t } /// Information about the world assignments are being produced in. -#[derive(Clone)] +#[derive(Clone, Debug)] pub(crate) struct Config { /// The assignment public keys for validators. assignment_keys: Vec, @@ -175,12 +263,13 @@ pub(crate) trait AssignmentCriteria { fn check_assignment_cert( &self, - claimed_core_index: CoreIndex, + claimed_core_bitfield: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, - backing_group: GroupIndex, + assignment: &AssignmentCertV2, + // Backing groups for each "leaving core". + backing_groups: Vec, ) -> Result; } @@ -194,25 +283,25 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) } fn check_assignment_cert( &self, - claimed_core_index: CoreIndex, + claimed_core_bitfield: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, - backing_group: GroupIndex, + assignment: &AssignmentCertV2, + backing_groups: Vec, ) -> Result { check_assignment_cert( - claimed_core_index, + claimed_core_bitfield, validator_index, config, relay_vrf_story, assignment, - backing_group, + backing_groups, ) } } @@ -233,6 +322,7 @@ pub(crate) fn compute_assignments( relay_vrf_story: RelayVRFStory, config: &Config, leaving_cores: impl IntoIterator + Clone, + enable_v2_assignments: bool, ) -> HashMap { if config.n_cores == 0 || config.assignment_keys.is_empty() || @@ -291,14 +381,25 @@ pub(crate) fn compute_assignments( let mut assignments = HashMap::new(); // First run `RelayVRFModulo` for each sample. - compute_relay_vrf_modulo_assignments( - &assignments_key, - index, - config, - relay_vrf_story.clone(), - leaving_cores.iter().cloned(), - &mut assignments, - ); + if enable_v2_assignments { + compute_relay_vrf_modulo_assignments_v2( + &assignments_key, + index, + config, + relay_vrf_story.clone(), + leaving_cores.clone(), + &mut assignments, + ); + } else { + compute_relay_vrf_modulo_assignments_v1( + &assignments_key, + index, + config, + relay_vrf_story.clone(), + leaving_cores.clone(), + &mut assignments, + ); + } // Then run `RelayVRFDelay` once for the whole block. compute_relay_vrf_delay_assignments( @@ -313,7 +414,7 @@ pub(crate) fn compute_assignments( assignments } -fn compute_relay_vrf_modulo_assignments( +fn compute_relay_vrf_modulo_assignments_v1( assignments_key: &schnorrkel::Keypair, validator_index: ValidatorIndex, config: &Config, @@ -329,7 +430,7 @@ fn compute_relay_vrf_modulo_assignments( // into closure. let core = &mut core; assignments_key.vrf_sign_extra_after_check( - relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample), + relay_vrf_modulo_transcript_v1(relay_vrf_story.clone(), rvm_sample), |vrf_in_out| { *core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); if let Some((candidate_hash, _)) = @@ -357,15 +458,15 @@ fn compute_relay_vrf_modulo_assignments( // has been executed. let cert = AssignmentCert { kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample }, - vrf: approval_types::VrfSignature { - output: approval_types::VrfOutput(vrf_in_out.to_output()), - proof: approval_types::VrfProof(vrf_proof), + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), }, }; // All assignments of type RelayVRFModulo have tranche 0. assignments.entry(core).or_insert(OurAssignment { - cert, + cert: cert.into(), tranche: 0, validator_index, triggered: false, @@ -374,6 +475,84 @@ fn compute_relay_vrf_modulo_assignments( } } +fn assigned_cores_transcript(core_bitfield: &CoreBitfield) -> Transcript { + let mut t = Transcript::new(approval_types::v2::ASSIGNED_CORE_CONTEXT); + core_bitfield.using_encoded(|s| t.append_message(b"cores", s)); + t +} + +fn compute_relay_vrf_modulo_assignments_v2( + assignments_key: &schnorrkel::Keypair, + validator_index: ValidatorIndex, + config: &Config, + relay_vrf_story: RelayVRFStory, + leaving_cores: Vec<(CandidateHash, CoreIndex)>, + assignments: &mut HashMap, +) { + let mut assigned_cores = Vec::new(); + let leaving_cores = leaving_cores.iter().map(|(_, core)| core).collect::>(); + + let maybe_assignment = { + let assigned_cores = &mut assigned_cores; + assignments_key.vrf_sign_extra_after_check( + relay_vrf_modulo_transcript_v2(relay_vrf_story.clone()), + |vrf_in_out| { + *assigned_cores = relay_vrf_modulo_cores( + &vrf_in_out, + config.relay_vrf_modulo_samples, + config.n_cores, + ) + .into_iter() + .filter(|core| leaving_cores.contains(&core)) + .collect::>(); + + if !assigned_cores.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?assigned_cores, + ?validator_index, + tranche = 0, + "RelayVRFModuloCompact Assignment." + ); + + let assignment_bitfield: CoreBitfield = assigned_cores + .clone() + .try_into() + .expect("Just checked `!assigned_cores.is_empty()`; qed"); + + Some(assigned_cores_transcript(&assignment_bitfield)) + } else { + None + } + }, + ) + }; + + if let Some(assignment) = maybe_assignment.map(|(vrf_in_out, vrf_proof, _)| { + let assignment_bitfield: CoreBitfield = assigned_cores + .clone() + .try_into() + .expect("Just checked `!assigned_cores.is_empty()`; qed"); + + let cert = AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: assignment_bitfield.clone(), + }, + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), + }, + }; + + // All assignments of type RelayVRFModulo have tranche 0. + OurAssignment { cert, tranche: 0, validator_index, triggered: false } + }) { + for core_index in assigned_cores { + assignments.insert(core_index, assignment.clone()); + } + } +} + fn compute_relay_vrf_delay_assignments( assignments_key: &schnorrkel::Keypair, validator_index: ValidatorIndex, @@ -392,11 +571,11 @@ fn compute_relay_vrf_delay_assignments( config.zeroth_delay_tranche_width, ); - let cert = AssignmentCert { - kind: AssignmentCertKind::RelayVRFDelay { core_index: core }, - vrf: approval_types::VrfSignature { - output: approval_types::VrfOutput(vrf_in_out.to_output()), - proof: approval_types::VrfProof(vrf_proof), + let cert = AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFDelay { core_index: core }, + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), }, }; @@ -453,12 +632,15 @@ pub(crate) enum InvalidAssignmentReason { VRFModuloOutputMismatch, VRFDelayCoreIndexMismatch, VRFDelayOutputMismatch, + InvalidArguments, + /// Assignment vrf check resulted in 0 assigned cores. + NullAssignment, } /// Checks the crypto of an assignment cert. Failure conditions: /// * Validator index out of bounds /// * VRF signature check fails -/// * VRF output doesn't match assigned core +/// * VRF output doesn't match assigned cores /// * Core is not covered by extra data in signature /// * Core index out of bounds /// * Sample is out of bounds @@ -467,12 +649,12 @@ pub(crate) enum InvalidAssignmentReason { /// This function does not check whether the core is actually a valid assignment or not. That should /// be done outside the scope of this function. pub(crate) fn check_assignment_cert( - claimed_core_index: CoreIndex, + claimed_core_indices: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, - backing_group: GroupIndex, + assignment: &AssignmentCertV2, + backing_groups: Vec, ) -> Result { use InvalidAssignmentReason as Reason; @@ -484,52 +666,133 @@ pub(crate) fn check_assignment_cert( let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice()) .map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?; - if claimed_core_index.0 >= config.n_cores { - return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) + // Check that we have all backing groups for claimed cores. + if claimed_core_indices.count_ones() == 0 || + claimed_core_indices.count_ones() != backing_groups.len() + { + return Err(InvalidAssignment(Reason::InvalidArguments)) } // Check that the validator was not part of the backing group // and not already assigned. - let is_in_backing = - is_in_backing_group(&config.validator_groups, validator_index, backing_group); + for (claimed_core, backing_group) in claimed_core_indices.iter_ones().zip(backing_groups.iter()) + { + if claimed_core >= config.n_cores as usize { + return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) + } + + let is_in_backing = + is_in_backing_group(&config.validator_groups, validator_index, *backing_group); - if is_in_backing { - return Err(InvalidAssignment(Reason::IsInBackingGroup)) + if is_in_backing { + return Err(InvalidAssignment(Reason::IsInBackingGroup)) + } } - let vrf_signature = &assignment.vrf; - match assignment.kind { - AssignmentCertKind::RelayVRFModulo { sample } => { - if sample >= config.relay_vrf_modulo_samples { + let vrf_output = &assignment.vrf.output; + let vrf_proof = &assignment.vrf.proof; + let first_claimed_core_index = + claimed_core_indices.first_one().expect("Checked above; qed") as u32; + + match &assignment.kind { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { + // Check that claimed core bitfield match the one from certificate. + if &claimed_core_indices != core_bitfield { + return Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } + + let (vrf_in_out, _) = public + .vrf_verify_extra( + relay_vrf_modulo_transcript_v2(relay_vrf_story), + &vrf_output.0, + &vrf_proof.0, + assigned_cores_transcript(core_bitfield), + ) + .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; + + let resulting_cores = relay_vrf_modulo_cores( + &vrf_in_out, + config.relay_vrf_modulo_samples, + config.n_cores, + ); + + // Currently validators can opt out of checking specific cores. + // This is the same issue to how validator can opt out and not send their assignments in + // the first place. Ensure that the `vrf_in_out` actually includes all of the claimed + // cores. + for claimed_core_index in claimed_core_indices.iter_ones() { + if !resulting_cores.contains(&CoreIndex(claimed_core_index as u32)) { + gum::debug!( + target: LOG_TARGET, + ?resulting_cores, + ?claimed_core_indices, + vrf_modulo_cores = ?resulting_cores, + "Assignment claimed cores mismatch", + ); + return Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } + } + + Ok(0) + }, + AssignmentCertKindV2::RelayVRFModulo { sample } => { + if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } + // Enforce claimed candidates is 1. + if claimed_core_indices.count_ones() != 1 { + gum::warn!( + target: LOG_TARGET, + ?claimed_core_indices, + "`RelayVRFModulo` assignment must always claim 1 core", + ); + return Err(InvalidAssignment(Reason::InvalidArguments)) + } + let (vrf_in_out, _) = public .vrf_verify_extra( - relay_vrf_modulo_transcript(relay_vrf_story, sample), - &vrf_signature.output.0, - &vrf_signature.proof.0, - assigned_core_transcript(claimed_core_index), + relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), + &vrf_output.0, + &vrf_proof.0, + assigned_core_transcript(CoreIndex(first_claimed_core_index)), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; + let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. - if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index { + if core.0 == first_claimed_core_index { Ok(0) } else { + gum::debug!( + target: LOG_TARGET, + ?core, + ?claimed_core_indices, + "Assignment claimed cores mismatch", + ); Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, - AssignmentCertKind::RelayVRFDelay { core_index } => { - if core_index != claimed_core_index { + AssignmentCertKindV2::RelayVRFDelay { core_index } => { + // Enforce claimed candidates is 1. + if claimed_core_indices.count_ones() != 1 { + gum::debug!( + target: LOG_TARGET, + ?claimed_core_indices, + "`RelayVRFDelay` assignment must always claim 1 core", + ); + return Err(InvalidAssignment(Reason::InvalidArguments)) + } + + if core_index.0 != first_claimed_core_index { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } let (vrf_in_out, _) = public .vrf_verify( - relay_vrf_delay_transcript(relay_vrf_story, core_index), - &vrf_signature.output.0, - &vrf_signature.proof.0, + relay_vrf_delay_transcript(relay_vrf_story, *core_index), + &vrf_output.0, + &vrf_proof.0, ) .map_err(|_| InvalidAssignment(Reason::VRFDelayOutputMismatch))?; @@ -550,6 +813,19 @@ fn is_in_backing_group( validator_groups.get(group).map_or(false, |g| g.contains(&validator)) } +/// Migration helpers. +impl From for OurAssignment { + fn from(value: crate::approval_db::v1::OurAssignment) -> Self { + Self { + cert: value.cert.into(), + tranche: value.tranche, + validator_index: value.validator_index, + // Whether the assignment has been triggered already. + triggered: value.triggered, + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -630,10 +906,11 @@ mod tests { ]), n_cores: 2, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(1)), (c_b, CoreIndex(1), GroupIndex(0))], + false, ); // Note that alice is in group 0, which was the backing group for core 1. @@ -665,10 +942,11 @@ mod tests { ]), n_cores: 2, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(0)), (c_b, CoreIndex(1), GroupIndex(1))], + false, ); assert_eq!(assignments.len(), 1); @@ -692,19 +970,21 @@ mod tests { validator_groups: Default::default(), n_cores: 0, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![], + false, ); assert!(assignments.is_empty()); } + #[derive(Debug)] struct MutatedAssignment { - core: CoreIndex, - cert: AssignmentCert, - group: GroupIndex, + cores: CoreBitfield, + cert: AssignmentCertV2, + groups: Vec, own_group: GroupIndex, val_index: ValidatorIndex, config: Config, @@ -729,12 +1009,12 @@ mod tests { validator_groups: basic_groups(n_validators, n_cores), n_cores: n_cores as u32, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 15, n_delay_tranches: 40, }; let relay_vrf_story = RelayVRFStory([42u8; 32]); - let assignments = compute_assignments( + let mut assignments = compute_assignments( &keystore, relay_vrf_story.clone(), &config, @@ -747,19 +1027,42 @@ mod tests { ) }) .collect::>(), + false, ); + // Extend with v2 assignments as well + assignments.extend(compute_assignments( + &keystore, + relay_vrf_story.clone(), + &config, + (0..n_cores) + .map(|i| { + ( + CandidateHash(Hash::repeat_byte(i as u8)), + CoreIndex(i as u32), + group_for_core(i), + ) + }) + .collect::>(), + true, + )); + let mut counted = 0; for (core, assignment) in assignments { + let cores = match assignment.cert.kind.clone() { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => core_bitfield, + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => core.into(), + AssignmentCertKindV2::RelayVRFDelay { core_index } => core_index.into(), + }; + let mut mutated = MutatedAssignment { - core, - group: group_for_core(core.0 as _), + cores: cores.clone(), + groups: cores.iter_ones().map(|core| group_for_core(core)).collect(), cert: assignment.cert, own_group: GroupIndex(0), val_index: ValidatorIndex(0), config: config.clone(), }; - let expected = match f(&mut mutated) { None => continue, Some(e) => e, @@ -768,16 +1071,16 @@ mod tests { counted += 1; let is_good = check_assignment_cert( - mutated.core, + mutated.cores, mutated.val_index, &mutated.config, relay_vrf_story.clone(), &mutated.cert, - mutated.group, + mutated.groups, ) .is_ok(); - assert_eq!(expected, is_good) + assert_eq!(expected, is_good); } assert!(counted > 0); @@ -791,7 +1094,7 @@ mod tests { #[test] fn check_rejects_claimed_core_out_of_bounds() { check_mutated_assignments(200, 100, 25, |m| { - m.core.0 += 100; + m.cores = CoreIndex(100).into(); Some(false) }); } @@ -799,7 +1102,7 @@ mod tests { #[test] fn check_rejects_in_backing_group() { check_mutated_assignments(200, 100, 25, |m| { - m.group = m.own_group; + m.groups[0] = m.own_group; Some(false) }); } @@ -814,10 +1117,11 @@ mod tests { #[test] fn check_rejects_delay_bad_vrf() { - check_mutated_assignments(40, 10, 8, |m| { + check_mutated_assignments(40, 100, 8, |m| { + let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFDelay { .. } => { - m.cert.vrf = garbage_vrf_signature(); + AssignmentCertKindV2::RelayVRFDelay { .. } => { + m.cert.vrf = vrf_signature; Some(false) }, _ => None, // skip everything else. @@ -828,9 +1132,14 @@ mod tests { #[test] fn check_rejects_modulo_bad_vrf() { check_mutated_assignments(200, 100, 25, |m| { + let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } => { - m.cert.vrf = garbage_vrf_signature(); + AssignmentCertKindV2::RelayVRFModulo { .. } => { + m.cert.vrf = vrf_signature; + Some(false) + }, + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { + m.cert.vrf = vrf_signature; Some(false) }, _ => None, // skip everything else. @@ -842,10 +1151,11 @@ mod tests { fn check_rejects_modulo_sample_out_of_bounds() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { sample } => { + AssignmentCertKindV2::RelayVRFModulo { sample } => { m.config.relay_vrf_modulo_samples = sample; Some(false) }, + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: _ } => Some(true), _ => None, // skip everything else. } }); @@ -855,8 +1165,11 @@ mod tests { fn check_rejects_delay_claimed_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFDelay { .. } => { - m.core = CoreIndex((m.core.0 + 1) % 100); + AssignmentCertKindV2::RelayVRFDelay { .. } => { + // for core in &mut m.cores { + // core.0 = (core.0 + 1) % 100; + // } + m.cores = CoreIndex((m.cores.first_one().unwrap() + 1) as u32 % 100).into(); Some(false) }, _ => None, // skip everything else. @@ -868,12 +1181,53 @@ mod tests { fn check_rejects_modulo_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } => { - m.core = CoreIndex((m.core.0 + 1) % 100); + AssignmentCertKindV2::RelayVRFModulo { .. } | + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { + m.cores = CoreIndex((m.cores.first_one().unwrap() + 1) as u32 % 100).into(); + Some(false) }, _ => None, // skip everything else. } }); } + + #[test] + fn generate_samples_invariant() { + let seed = [ + 1, 0, 52, 0, 0, 0, 0, 0, 1, 0, 10, 0, 22, 32, 0, 0, 2, 0, 55, 49, 0, 11, 0, 0, 3, 0, 0, + 0, 0, 0, 2, 92, + ]; + let rand_chacha = ChaCha20Rng::from_seed(seed); + + let samples = generate_samples(rand_chacha.clone(), 6, 100); + let expected = vec![19, 79, 17, 75, 66, 30].into_iter().map(Into::into).collect_vec(); + assert_eq!(samples, expected); + + let samples = generate_samples(rand_chacha.clone(), 6, 7); + let expected = vec![0, 3, 6, 5, 4, 2].into_iter().map(Into::into).collect_vec(); + assert_eq!(samples, expected); + + let samples = generate_samples(rand_chacha.clone(), 6, 12); + let expected = vec![2, 4, 7, 5, 11, 3].into_iter().map(Into::into).collect_vec(); + assert_eq!(samples, expected); + + let samples = generate_samples(rand_chacha.clone(), 1, 100); + let expected = vec![30].into_iter().map(Into::into).collect_vec(); + assert_eq!(samples, expected); + + let samples = generate_samples(rand_chacha.clone(), 0, 100); + let expected = vec![]; + assert_eq!(samples, expected); + + let samples = generate_samples(rand_chacha, MAX_MODULO_SAMPLES + 1, 100); + let expected = vec![ + 42, 54, 55, 93, 64, 27, 49, 15, 83, 71, 62, 1, 43, 77, 97, 41, 7, 69, 0, 88, 59, 14, + 23, 87, 47, 4, 51, 12, 74, 56, 50, 44, 9, 82, 19, 79, 17, 75, 66, 30, + ] + .into_iter() + .map(Into::into) + .collect_vec(); + assert_eq!(samples, expected); + } } diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index df712fc45a53d4cd21266e348247ec7f99ec09b8..d7667e8e405a4cd8c5f437ff3374b4da0ebc4ec2 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -30,7 +30,10 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ - approval::{self as approval_types, BlockApprovalMeta, RelayVRFStory}, + approval::{ + self as approval_types, + v1::{BlockApprovalMeta, RelayVRFStory}, + }, MAX_FINALITY_LAG, }; use polkadot_node_subsystem::{ @@ -53,7 +56,7 @@ use futures::{channel::oneshot, prelude::*}; use std::collections::HashMap; -use super::approval_db::v1; +use super::approval_db::v2; use crate::{ backend::{Backend, OverlayedBackend}, criteria::{AssignmentCriteria, OurAssignment}, @@ -92,7 +95,7 @@ enum ImportedBlockInfoError { FutureCancelled(&'static str, futures::channel::oneshot::Canceled), #[error(transparent)] - ApprovalError(approval_types::ApprovalError), + ApprovalError(approval_types::v1::ApprovalError), #[error("block is already finalized")] BlockAlreadyFinalized, @@ -216,7 +219,7 @@ async fn imported_block_info( .ok_or(ImportedBlockInfoError::SessionInfoUnavailable)?; let (assignments, slot, relay_vrf_story) = { - let unsafe_vrf = approval_types::babe_unsafe_vrf_info(&block_header); + let unsafe_vrf = approval_types::v1::babe_unsafe_vrf_info(&block_header); match unsafe_vrf { Some(unsafe_vrf) => { @@ -497,7 +500,7 @@ pub(crate) async fn handle_new_head( ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; } - let block_entry = v1::BlockEntry { + let block_entry = v2::BlockEntry { block_hash, parent_hash: block_header.parent_hash, block_number: block_header.number, @@ -510,6 +513,7 @@ pub(crate) async fn handle_new_head( .collect(), approved_bitfield, children: Vec::new(), + distributed_assignments: Default::default(), }; gum::trace!( @@ -588,11 +592,11 @@ pub(crate) async fn handle_new_head( #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{approval_db::v1::DbBackend, RuntimeInfo, RuntimeInfoConfig}; + use crate::{approval_db::v2::DbBackend, RuntimeInfo, RuntimeInfoConfig}; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_primitives::{ - approval::{VrfSignature, VrfTranscript}, + approval::v1::{VrfSignature, VrfTranscript}, DISPUTE_WINDOW, }; use polkadot_node_subsystem::messages::{AllMessages, ApprovalVotingMessage}; @@ -610,7 +614,7 @@ pub(crate) mod tests { pub(crate) use sp_runtime::{Digest, DigestItem}; use std::{pin::Pin, sync::Arc}; - use crate::{approval_db::v1::Config as DatabaseConfig, criteria, BlockEntry}; + use crate::{approval_db::v2::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; @@ -656,7 +660,7 @@ pub(crate) mod tests { fn compute_assignments( &self, _keystore: &LocalKeystore, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _config: &criteria::Config, _leaving_cores: Vec<( CandidateHash, @@ -669,13 +673,14 @@ pub(crate) mod tests { fn check_assignment_cert( &self, - _claimed_core_index: polkadot_primitives::CoreIndex, + _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, _validator_index: polkadot_primitives::ValidatorIndex, _config: &criteria::Config, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCert, - _backing_group: polkadot_primitives::GroupIndex, - ) -> Result { + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, + _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, + _backing_groups: Vec, + ) -> Result + { Ok(0) } } @@ -1296,7 +1301,7 @@ pub(crate) mod tests { let (state, mut session_info_provider) = single_session_state(); overlay_db.write_block_entry( - v1::BlockEntry { + v2::BlockEntry { block_hash: parent_hash, parent_hash: Default::default(), block_number: 4, @@ -1306,6 +1311,7 @@ pub(crate) mod tests { candidates: Vec::new(), approved_bitfield: Default::default(), children: Vec::new(), + distributed_assignments: Default::default(), } .into(), ); @@ -1338,7 +1344,7 @@ pub(crate) mod tests { // the first candidate should be insta-approved // the second should not let entry: BlockEntry = - v1::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) + v2::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) .unwrap() .unwrap() .into(); diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index ddef736feab7c9cdc5b79e3508efef47e7bf76af..ef01727b7eb681a180608144cee26d91d0bbda44 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -25,7 +25,11 @@ use jaeger::{hash_to_trace_identifier, PerLeafSpan}; use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - BlockApprovalMeta, DelayTranche, IndirectAssignmentCert, IndirectSignedApprovalVote, + v1::{BlockApprovalMeta, DelayTranche, IndirectSignedApprovalVote}, + v2::{ + AssignmentCertKindV2, BitfieldError, CandidateBitfield, CoreBitfield, + IndirectAssignmentCertV2, + }, }, ValidationResult, DISPUTE_WINDOW, }; @@ -50,7 +54,7 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, + ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; @@ -76,12 +80,13 @@ use std::{ use schnellru::{ByLength, LruMap}; use approval_checking::RequiredTranches; +use bitvec::{order::Lsb0, vec::BitVec}; use criteria::{AssignmentCriteria, RealAssignmentCriteria}; use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; use time::{slot_number_to_tick, Clock, ClockExt, SystemClock, Tick}; mod approval_checking; -mod approval_db; +pub mod approval_db; mod backend; mod criteria; mod import; @@ -90,8 +95,9 @@ mod persisted_entries; mod time; use crate::{ - approval_db::v1::{Config as DatabaseConfig, DbBackend}, + approval_db::v2::{Config as DatabaseConfig, DbBackend}, backend::{Backend, OverlayedBackend}, + criteria::InvalidAssignmentReason, }; #[cfg(test)] @@ -107,7 +113,7 @@ const APPROVAL_CACHE_SIZE: u32 = 1024; const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds. const APPROVAL_DELAY: Tick = 2; -const LOG_TARGET: &str = "parachain::approval-voting"; +pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; /// Configuration for the approval voting subsystem #[derive(Debug, Clone)] @@ -377,8 +383,8 @@ impl ApprovalVotingSubsystem { /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { let config = - approval_db::v1::Config { col_approval_data: self.db_config.col_approval_data }; - let mut backend = approval_db::v1::DbBackend::new(self.db.clone(), config); + approval_db::v2::Config { col_approval_data: self.db_config.col_approval_data }; + let mut backend = approval_db::v2::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); ops::revert_to(&mut overlay, hash)?; @@ -754,15 +760,16 @@ enum Action { tick: Tick, }, LaunchApproval { + claimed_candidate_indices: CandidateBitfield, candidate_hash: CandidateHash, - indirect_cert: IndirectAssignmentCert, + indirect_cert: IndirectAssignmentCertV2, assignment_tranche: DelayTranche, relay_block_hash: Hash, - candidate_index: CandidateIndex, session: SessionIndex, executor_params: ExecutorParams, candidate: CandidateReceipt, backing_group: GroupIndex, + distribute_assignment: bool, }, NoteApprovedInChainSelection(Hash), IssueApproval(CandidateHash, ApprovalVoteRequest), @@ -977,15 +984,16 @@ async fn handle_actions( actions_iter = next_actions.into_iter(); }, Action::LaunchApproval { + claimed_candidate_indices, candidate_hash, indirect_cert, assignment_tranche, relay_block_hash, - candidate_index, session, executor_params, candidate, backing_group, + distribute_assignment, } => { // Don't launch approval work if the node is syncing. if let Mode::Syncing(_) = *mode { @@ -1006,10 +1014,12 @@ async fn handle_actions( launch_approval_span.add_string_tag("block-hash", format!("{:?}", block_hash)); let validator_index = indirect_cert.validator; - ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( - indirect_cert, - candidate_index, - )); + if distribute_assignment { + ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( + indirect_cert, + claimed_candidate_indices, + )); + } match approvals_cache.get(&candidate_hash) { Some(ApprovalOutcome::Approved) => { @@ -1078,6 +1088,49 @@ async fn handle_actions( Ok(conclude) } +fn cores_to_candidate_indices( + core_indices: &CoreBitfield, + block_entry: &BlockEntry, +) -> Result { + let mut candidate_indices = Vec::new(); + + // Map from core index to candidate index. + for claimed_core_index in core_indices.iter_ones() { + if let Some(candidate_index) = block_entry + .candidates() + .iter() + .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) + { + candidate_indices.push(candidate_index as CandidateIndex); + } + } + + CandidateBitfield::try_from(candidate_indices) +} + +// Returns the claimed core bitfield from the assignment cert, the candidate hash and a +// `BlockEntry`. Can fail only for VRF Delay assignments for which we cannot find the candidate hash +// in the block entry which indicates a bug or corrupted storage. +fn get_assignment_core_indices( + assignment: &AssignmentCertKindV2, + candidate_hash: &CandidateHash, + block_entry: &BlockEntry, +) -> Option { + match &assignment { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => + Some(core_bitfield.clone()), + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => block_entry + .candidates() + .iter() + .find(|(_core_index, h)| candidate_hash == h) + .map(|(core_index, _candidate_hash)| { + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed") + }), + AssignmentCertKindV2::RelayVRFDelay { core_index } => + Some(CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed")), + } +} + fn distribution_messages_for_activation( db: &OverlayedBackend<'_, impl Backend>, state: &State, @@ -1142,33 +1195,95 @@ fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCert { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - i as _, - )); + if let Some(claimed_core_indices) = get_assignment_core_indices( + &assignment.cert().kind, + &candidate_hash, + &block_entry, + ) { + match cores_to_candidate_indices( + &claimed_core_indices, + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), + ), + Err(err) => { + // Should never happen. If we fail here it means the + // assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Cannot get assignment claimed core indices", + ); + } }, (Some(assignment), Some(approval_sig)) => { - messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCert { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - i as _, - )); + if let Some(claimed_core_indices) = get_assignment_core_indices( + &assignment.cert().kind, + &candidate_hash, + &block_entry, + ) { + match cores_to_candidate_indices( + &claimed_core_indices, + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), + ), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue + }, + } - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: i as _, - validator: assignment.validator_index(), - signature: approval_sig, - }, - )) + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVote { + block_hash, + candidate_index: i as _, + validator: assignment.validator_index(), + signature: approval_sig, + }, + )); + } else { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Cannot get assignment claimed core indices", + ); + } }, } }, @@ -1288,14 +1403,14 @@ async fn handle_from_overseer( vec![Action::Conclude] }, FromOrchestra::Communication { msg } => match msg { - ApprovalVotingMessage::CheckAndImportAssignment(a, claimed_core, res) => { + ApprovalVotingMessage::CheckAndImportAssignment(a, claimed_cores, res) => { let (check_outcome, actions) = check_and_import_assignment( ctx.sender(), state, db, session_info_provider, a, - claimed_core, + claimed_cores, ) .await?; let _ = res.send(check_outcome); @@ -1465,7 +1580,6 @@ async fn handle_approved_ancestor( let mut span = span .child("handle-approved-ancestor") .with_stage(jaeger::Stage::ApprovalChecking); - use bitvec::{order::Lsb0, vec::BitVec}; let mut all_approved_max = None; @@ -1804,8 +1918,8 @@ async fn check_and_import_assignment( state: &State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, - assignment: IndirectAssignmentCert, - candidate_index: CandidateIndex, + assignment: IndirectAssignmentCertV2, + candidate_indices: CandidateBitfield, ) -> SubsystemResult<(AssignmentCheckResult, Vec)> where Sender: SubsystemSender, @@ -1818,9 +1932,12 @@ where .map(|span| span.child("check-and-import-assignment")) .unwrap_or_else(|| jaeger::Span::new(assignment.block_hash, "check-and-import-assignment")) .with_relay_parent(assignment.block_hash) - .with_uint_tag("candidate-index", candidate_index as u64) .with_stage(jaeger::Stage::ApprovalChecking); + for candidate_index in candidate_indices.iter_ones() { + check_and_import_assignment_span.add_uint_tag("candidate-index", candidate_index as u64); + } + let block_entry = match db.load_block_entry(&assignment.block_hash)? { Some(b) => b, None => @@ -1850,39 +1967,64 @@ where )), }; - let (claimed_core_index, assigned_candidate_hash) = - match block_entry.candidate(candidate_index as usize) { - Some((c, h)) => (*c, *h), + let n_cores = session_info.n_cores as usize; + + // Early check the candidate bitfield and core bitfields lengths < `n_cores`. + // Core bitfield length is checked later in `check_assignment_cert`. + if candidate_indices.len() > n_cores { + gum::debug!( + target: LOG_TARGET, + validator = assignment.validator.0, + n_cores, + candidate_bitfield_len = ?candidate_indices.len(), + "Oversized bitfield", + ); + + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield( + candidate_indices.len(), + )), + Vec::new(), + )) + } + + // The Compact VRF modulo assignment cert has multiple core assignments. + let mut backing_groups = Vec::new(); + let mut claimed_core_indices = Vec::new(); + let mut assigned_candidate_hashes = Vec::new(); + + for candidate_index in candidate_indices.iter_ones() { + let (claimed_core_index, assigned_candidate_hash) = + match block_entry.candidate(candidate_index) { + Some((c, h)) => (*c, *h), + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidateIndex( + candidate_index as _, + )), + Vec::new(), + )), // no candidate at core. + }; + + let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { + Some(c) => c, None => return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidateIndex( - candidate_index, + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( + candidate_index as _, + assigned_candidate_hash, )), Vec::new(), )), // no candidate at core. }; - check_and_import_assignment_span - .add_string_tag("candidate-hash", format!("{:?}", assigned_candidate_hash)); - check_and_import_assignment_span.add_string_tag( - "traceID", - format!("{:?}", jaeger::hash_to_trace_identifier(assigned_candidate_hash.0)), - ); - - let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { - Some(c) => c, - None => - return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( - candidate_index, - assigned_candidate_hash, - )), - Vec::new(), - )), - }; + check_and_import_assignment_span + .add_string_tag("candidate-hash", format!("{:?}", assigned_candidate_hash)); + check_and_import_assignment_span.add_string_tag( + "traceID", + format!("{:?}", jaeger::hash_to_trace_identifier(assigned_candidate_hash.0)), + ); - let res = { - // import the assignment. let approval_entry = match candidate_entry.approval_entry_mut(&assignment.block_hash) { Some(a) => a, None => @@ -1895,79 +2037,144 @@ where )), }; - let res = state.assignment_criteria.check_assignment_cert( - claimed_core_index, - assignment.validator, - &criteria::Config::from(session_info), - block_entry.relay_vrf_story(), - &assignment.cert, - approval_entry.backing_group(), - ); + backing_groups.push(approval_entry.backing_group()); + claimed_core_indices.push(claimed_core_index); + assigned_candidate_hashes.push(assigned_candidate_hash); + } - let tranche = match res { - Err(crate::criteria::InvalidAssignment(reason)) => - return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( - assignment.validator, - format!("{:?}", reason), - )), - Vec::new(), + // Error on null assignments. + if claimed_core_indices.is_empty() { + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( + assignment.validator, + format!("{:?}", InvalidAssignmentReason::NullAssignment), + )), + Vec::new(), + )) + } + + // Check the assignment certificate. + let res = state.assignment_criteria.check_assignment_cert( + claimed_core_indices + .clone() + .try_into() + .expect("Checked for null assignment above; qed"), + assignment.validator, + &criteria::Config::from(session_info), + block_entry.relay_vrf_story(), + &assignment.cert, + backing_groups, + ); + + let tranche = match res { + Err(crate::criteria::InvalidAssignment(reason)) => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( + assignment.validator, + format!("{:?}", reason), )), - Ok(tranche) => { - let current_tranche = - state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); + Vec::new(), + )), + Ok(tranche) => { + let current_tranche = + state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); - let too_far_in_future = current_tranche + TICK_TOO_FAR_IN_FUTURE as DelayTranche; + let too_far_in_future = current_tranche + TICK_TOO_FAR_IN_FUTURE as DelayTranche; - if tranche >= too_far_in_future { - return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) - } + if tranche >= too_far_in_future { + return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) + } - tranche - }, - }; + tranche + }, + }; - check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); + let mut actions = Vec::new(); + let res = { + let mut is_duplicate = true; + // Import the assignments for all cores in the cert. + for (assigned_candidate_hash, candidate_index) in + assigned_candidate_hashes.iter().zip(candidate_indices.iter_ones()) + { + let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { + Some(c) => c, + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( + candidate_index as _, + *assigned_candidate_hash, + )), + Vec::new(), + )), + }; - let is_duplicate = approval_entry.is_assigned(assignment.validator); - approval_entry.import_assignment(tranche, assignment.validator, tick_now); + let approval_entry = match candidate_entry.approval_entry_mut(&assignment.block_hash) { + Some(a) => a, + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::Internal( + assignment.block_hash, + *assigned_candidate_hash, + )), + Vec::new(), + )), + }; + is_duplicate &= approval_entry.is_assigned(assignment.validator); + approval_entry.import_assignment(tranche, assignment.validator, tick_now); + check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); + + // We've imported a new assignment, so we need to schedule a wake-up for when that might + // no-show. + if let Some((approval_entry, status)) = state + .approval_status(sender, session_info_provider, &block_entry, &candidate_entry) + .await + { + actions.extend(schedule_wakeup_action( + approval_entry, + block_entry.block_hash(), + block_entry.block_number(), + *assigned_candidate_hash, + status.block_tick, + tick_now, + status.required_tranches, + )); + } + + // We also write the candidate entry as it now contains the new candidate. + db.write_candidate_entry(candidate_entry.into()); + } + // Since we don't account for tranche in distribution message fingerprinting, some + // validators can be assigned to the same core (VRF modulo vs VRF delay). These can be + // safely ignored. However, if an assignment is for multiple cores (these are only + // tranche0), we cannot ignore it, because it would mean ignoring other non duplicate + // assignments. if is_duplicate { AssignmentCheckResult::AcceptedDuplicate + } else if candidate_indices.count_ones() > 1 { + gum::trace!( + target: LOG_TARGET, + validator = assignment.validator.0, + candidate_hashes = ?assigned_candidate_hashes, + assigned_cores = ?claimed_core_indices, + ?tranche, + "Imported assignments for multiple cores.", + ); + + AssignmentCheckResult::Accepted } else { gum::trace!( target: LOG_TARGET, validator = assignment.validator.0, - candidate_hash = ?assigned_candidate_hash, - para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, - "Imported assignment.", + candidate_hashes = ?assigned_candidate_hashes, + assigned_cores = ?claimed_core_indices, + "Imported assignment for a single core.", ); AssignmentCheckResult::Accepted } }; - let mut actions = Vec::new(); - - // We've imported a new approval, so we need to schedule a wake-up for when that might no-show. - if let Some((approval_entry, status)) = state - .approval_status(sender, session_info_provider, &block_entry, &candidate_entry) - .await - { - actions.extend(schedule_wakeup_action( - approval_entry, - block_entry.block_hash(), - block_entry.block_number(), - assigned_candidate_hash, - status.block_tick, - tick_now, - status.required_tranches, - )); - } - - // We also write the candidate entry as it now contains the new candidate. - db.write_candidate_entry(candidate_entry.into()); - Ok((res, actions)) } @@ -2341,7 +2548,7 @@ async fn process_wakeup( let candidate_entry = db.load_candidate_entry(&candidate_hash)?; // If either is not present, we have nothing to wakeup. Might have lost a race with finality - let (block_entry, mut candidate_entry) = match (block_entry, candidate_entry) { + let (mut block_entry, mut candidate_entry) = match (block_entry, candidate_entry) { (Some(b), Some(c)) => (b, c), _ => return Ok(Vec::new()), }; @@ -2422,32 +2629,59 @@ async fn process_wakeup( if let Some((cert, val_index, tranche)) = maybe_cert { let indirect_cert = - IndirectAssignmentCert { block_hash: relay_block, validator: val_index, cert }; + IndirectAssignmentCertV2 { block_hash: relay_block, validator: val_index, cert }; - let index_in_candidate = - block_entry.candidates().iter().position(|(_, h)| &candidate_hash == h); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + para_id = ?candidate_receipt.descriptor.para_id, + block_hash = ?relay_block, + "Launching approval work.", + ); - if let Some(i) = index_in_candidate { - gum::trace!( + if let Some(claimed_core_indices) = + get_assignment_core_indices(&indirect_cert.cert.kind, &candidate_hash, &block_entry) + { + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(claimed_candidate_indices) => { + // Ensure we distribute multiple core assignments just once. + let distribute_assignment = if claimed_candidate_indices.count_ones() > 1 { + !block_entry.mark_assignment_distributed(claimed_candidate_indices.clone()) + } else { + true + }; + db.write_block_entry(block_entry.clone()); + + actions.push(Action::LaunchApproval { + claimed_candidate_indices, + candidate_hash, + indirect_cert, + assignment_tranche: tranche, + relay_block_hash: relay_block, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_receipt, + backing_group, + distribute_assignment, + }); + }, + Err(err) => { + // Never happens, it should only happen if no cores are claimed, which is a bug. + gum::warn!( + target: LOG_TARGET, + block_hash = ?relay_block, + ?err, + "Failed to create assignment bitfield" + ); + }, + }; + } else { + gum::warn!( target: LOG_TARGET, - ?candidate_hash, - para_id = ?candidate_receipt.descriptor.para_id, block_hash = ?relay_block, - "Launching approval work.", + ?candidate_hash, + "Cannot get assignment claimed core indices", ); - - // sanity: should always be present. - actions.push(Action::LaunchApproval { - candidate_hash, - indirect_cert, - assignment_tranche: tranche, - relay_block_hash: relay_block, - candidate_index: i as _, - session: block_entry.session(), - executor_params: executor_params.clone(), - candidate: candidate_receipt, - backing_group, - }); } } // Although we checked approval earlier in this function, @@ -2627,15 +2861,15 @@ async fn launch_approval( let (val_tx, val_rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - available_data.validation_data, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data: available_data.validation_data, validation_code, - candidate.clone(), - available_data.pov, + candidate_receipt: candidate.clone(), + pov: available_data.pov, executor_params, - PvfExecTimeoutKind::Approval, - val_tx, - )) + exec_kind: PvfExecKind::Approval, + response_sender: val_tx, + }) .await; match val_rx.await { diff --git a/polkadot/node/core/approval-voting/src/ops.rs b/polkadot/node/core/approval-voting/src/ops.rs index 6f57b2f80e8a47facdc664a874d9ee0879889189..a6f0ecf9d1f027ee4f4ef1d0d5480fe26f175607 100644 --- a/polkadot/node/core/approval-voting/src/ops.rs +++ b/polkadot/node/core/approval-voting/src/ops.rs @@ -25,7 +25,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupInd use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use super::{ - approval_db::v1::{OurAssignment, StoredBlockRange}, + approval_db::v2::{OurAssignment, StoredBlockRange}, backend::{Backend, OverlayedBackend}, persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}, LOG_TARGET, diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 9b6592220275dedcde80e902c65a8d19d1b47ce2..9cfe1c4cf8da9bb62ec16822ef1bb419f71eb96f 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -20,16 +20,21 @@ //! Within that context, things are plain-old-data. Within this module, //! data and logic are intertwined. -use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche, RelayVRFStory}; +use polkadot_node_primitives::approval::{ + v1::{DelayTranche, RelayVRFStory}, + v2::{AssignmentCertV2, CandidateBitfield}, +}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; -use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec}; +use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice}; use std::collections::BTreeMap; +use crate::approval_db::v2::Bitfield; + use super::{criteria::OurAssignment, time::Tick}; /// Metadata regarding a specific tranche of assignments for a specific candidate. @@ -53,8 +58,8 @@ impl TrancheEntry { } } -impl From for TrancheEntry { - fn from(entry: crate::approval_db::v1::TrancheEntry) -> Self { +impl From for TrancheEntry { + fn from(entry: crate::approval_db::v2::TrancheEntry) -> Self { TrancheEntry { tranche: entry.tranche, assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(), @@ -62,7 +67,7 @@ impl From for TrancheEntry { } } -impl From for crate::approval_db::v1::TrancheEntry { +impl From for crate::approval_db::v2::TrancheEntry { fn from(entry: TrancheEntry) -> Self { Self { tranche: entry.tranche, @@ -80,7 +85,7 @@ pub struct ApprovalEntry { our_assignment: Option, our_approval_sig: Option, // `n_validators` bits. - assignments: BitVec, + assigned_validators: Bitfield, approved: bool, } @@ -92,10 +97,17 @@ impl ApprovalEntry { our_assignment: Option, our_approval_sig: Option, // `n_validators` bits. - assignments: BitVec, + assigned_validators: Bitfield, approved: bool, ) -> Self { - Self { tranches, backing_group, our_assignment, our_approval_sig, assignments, approved } + Self { + tranches, + backing_group, + our_assignment, + our_approval_sig, + assigned_validators, + approved, + } } // Access our assignment for this approval entry. @@ -107,7 +119,7 @@ impl ApprovalEntry { pub fn trigger_our_assignment( &mut self, tick_now: Tick, - ) -> Option<(AssignmentCert, ValidatorIndex, DelayTranche)> { + ) -> Option<(AssignmentCertV2, ValidatorIndex, DelayTranche)> { let our = self.our_assignment.as_mut().and_then(|a| { if a.triggered() { return None @@ -131,7 +143,10 @@ impl ApprovalEntry { /// Whether a validator is already assigned. pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool { - self.assignments.get(validator_index.0 as usize).map(|b| *b).unwrap_or(false) + self.assigned_validators + .get(validator_index.0 as usize) + .map(|b| *b) + .unwrap_or(false) } /// Import an assignment. No-op if already assigned on the same tranche. @@ -158,14 +173,14 @@ impl ApprovalEntry { }; self.tranches[idx].assignments.push((validator_index, tick_now)); - self.assignments.set(validator_index.0 as _, true); + self.assigned_validators.set(validator_index.0 as _, true); } // Produce a bitvec indicating the assignments of all validators up to and // including `tranche`. - pub fn assignments_up_to(&self, tranche: DelayTranche) -> BitVec { + pub fn assignments_up_to(&self, tranche: DelayTranche) -> Bitfield { self.tranches.iter().take_while(|e| e.tranche <= tranche).fold( - bitvec::bitvec![u8, BitOrderLsb0; 0; self.assignments.len()], + bitvec::bitvec![u8, BitOrderLsb0; 0; self.assigned_validators.len()], |mut a, e| { for &(v, _) in &e.assignments { a.set(v.0 as _, true); @@ -193,12 +208,12 @@ impl ApprovalEntry { /// Get the number of validators in this approval entry. pub fn n_validators(&self) -> usize { - self.assignments.len() + self.assigned_validators.len() } /// Get the number of assignments by validators, including the local validator. pub fn n_assignments(&self) -> usize { - self.assignments.count_ones() + self.assigned_validators.count_ones() } /// Get the backing group index of the approval entry. @@ -219,27 +234,27 @@ impl ApprovalEntry { } } -impl From for ApprovalEntry { - fn from(entry: crate::approval_db::v1::ApprovalEntry) -> Self { +impl From for ApprovalEntry { + fn from(entry: crate::approval_db::v2::ApprovalEntry) -> Self { ApprovalEntry { tranches: entry.tranches.into_iter().map(Into::into).collect(), backing_group: entry.backing_group, our_assignment: entry.our_assignment.map(Into::into), our_approval_sig: entry.our_approval_sig.map(Into::into), - assignments: entry.assignments, + assigned_validators: entry.assigned_validators, approved: entry.approved, } } } -impl From for crate::approval_db::v1::ApprovalEntry { +impl From for crate::approval_db::v2::ApprovalEntry { fn from(entry: ApprovalEntry) -> Self { Self { tranches: entry.tranches.into_iter().map(Into::into).collect(), backing_group: entry.backing_group, our_assignment: entry.our_assignment.map(Into::into), our_approval_sig: entry.our_approval_sig.map(Into::into), - assignments: entry.assignments, + assigned_validators: entry.assigned_validators, approved: entry.approved, } } @@ -253,7 +268,7 @@ pub struct CandidateEntry { // Assignments are based on blocks, so we need to track assignments separately // based on the block we are looking at. pub block_assignments: BTreeMap, - pub approvals: BitVec, + pub approvals: Bitfield, } impl CandidateEntry { @@ -290,8 +305,8 @@ impl CandidateEntry { } } -impl From for CandidateEntry { - fn from(entry: crate::approval_db::v1::CandidateEntry) -> Self { +impl From for CandidateEntry { + fn from(entry: crate::approval_db::v2::CandidateEntry) -> Self { CandidateEntry { candidate: entry.candidate, session: entry.session, @@ -305,7 +320,7 @@ impl From for CandidateEntry { } } -impl From for crate::approval_db::v1::CandidateEntry { +impl From for crate::approval_db::v2::CandidateEntry { fn from(entry: CandidateEntry) -> Self { Self { candidate: entry.candidate, @@ -336,8 +351,12 @@ pub struct BlockEntry { // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. // The i'th bit is `true` iff the candidate has been approved in the context of this // block. The block can be considered approved if the bitfield has all bits set to `true`. - pub approved_bitfield: BitVec, + pub approved_bitfield: Bitfield, pub children: Vec, + // A list of assignments for which we already distributed the assignment. + // We use this to ensure we don't distribute multiple core assignments twice as we track + // individual wakeups for each core. + distributed_assignments: Bitfield, } impl BlockEntry { @@ -412,6 +431,39 @@ impl BlockEntry { pub fn parent_hash(&self) -> Hash { self.parent_hash } + + /// Mark distributed assignment for many candidate indices. + /// Returns `true` if an assignment was already distributed for the `candidates`. + pub fn mark_assignment_distributed(&mut self, candidates: CandidateBitfield) -> bool { + let bitfield = candidates.into_inner(); + let total_one_bits = self.distributed_assignments.count_ones(); + + let new_len = std::cmp::max(self.distributed_assignments.len(), bitfield.len()); + self.distributed_assignments.resize(new_len, false); + self.distributed_assignments |= bitfield; + + // If the an operation did not change our current bitfied, we return true. + let distributed = total_one_bits == self.distributed_assignments.count_ones(); + + distributed + } +} + +impl From for BlockEntry { + fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { + BlockEntry { + block_hash: entry.block_hash, + parent_hash: entry.parent_hash, + block_number: entry.block_number, + session: entry.session, + slot: entry.slot, + relay_vrf_story: RelayVRFStory(entry.relay_vrf_story), + candidates: entry.candidates, + approved_bitfield: entry.approved_bitfield, + children: entry.children, + distributed_assignments: entry.distributed_assignments, + } + } } impl From for BlockEntry { @@ -426,11 +478,12 @@ impl From for BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + distributed_assignments: Default::default(), } } } -impl From for crate::approval_db::v1::BlockEntry { +impl From for crate::approval_db::v2::BlockEntry { fn from(entry: BlockEntry) -> Self { Self { block_hash: entry.block_hash, @@ -442,6 +495,36 @@ impl From for crate::approval_db::v1::BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + distributed_assignments: entry.distributed_assignments, + } + } +} + +/// Migration helpers. +impl From for CandidateEntry { + fn from(value: crate::approval_db::v1::CandidateEntry) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ae.into())) + .collect(), + candidate: value.candidate, + session: value.session, + } + } +} + +impl From for ApprovalEntry { + fn from(value: crate::approval_db::v1::ApprovalEntry) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value.our_approval_sig, + assigned_validators: value.assignments, + approved: value.approved, } } } diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 0b98f28fbbf0439cd0e9eee4aae537a6956a7cab..11bcba9c388277fa47a305106f30aac253913045 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -16,10 +16,14 @@ use self::test_helpers::mock::new_leaf; use super::*; +use crate::backend::V1ReadBackend; use polkadot_node_primitives::{ approval::{ - AssignmentCert, AssignmentCertKind, DelayTranche, VrfOutput, VrfProof, VrfSignature, - RELAY_VRF_MODULO_CONTEXT, + v1::{ + AssignmentCert, AssignmentCertKind, DelayTranche, VrfOutput, VrfProof, VrfSignature, + RELAY_VRF_MODULO_CONTEXT, + }, + v2::{AssignmentCertKindV2, AssignmentCertV2}, }, AvailableData, BlockData, PoV, }; @@ -52,7 +56,7 @@ use std::{ }; use super::{ - approval_db::v1::StoredBlockRange, + approval_db::v2::StoredBlockRange, backend::BackendWriteOp, import::tests::{ garbage_vrf_signature, AllowedSlots, BabeEpoch, BabeEpochConfiguration, @@ -112,7 +116,7 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan #[cfg(test)] pub mod test_constants { - use crate::approval_db::v1::Config as DatabaseConfig; + use crate::approval_db::v2::Config as DatabaseConfig; const DATA_COL: u32 = 0; pub(crate) const NUM_COLUMNS: u32 = 1; @@ -162,7 +166,7 @@ impl Clock for MockClock { // This mock clock allows us to manipulate the time and // be notified when wakeups have been triggered. -#[derive(Default)] +#[derive(Default, Debug)] struct MockClockInner { tick: Tick, wakeups: Vec<(Tick, oneshot::Sender<()>)>, @@ -232,7 +236,7 @@ where fn compute_assignments( &self, _keystore: &LocalKeystore, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _config: &criteria::Config, _leaving_cores: Vec<( CandidateHash, @@ -245,13 +249,13 @@ where fn check_assignment_cert( &self, - _claimed_core_index: polkadot_primitives::CoreIndex, + _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, validator_index: ValidatorIndex, _config: &criteria::Config, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCert, - _backing_group: polkadot_primitives::GroupIndex, - ) -> Result { + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, + _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, + _backing_groups: Vec, + ) -> Result { self.1(validator_index) } } @@ -272,6 +276,18 @@ struct TestStoreInner { candidate_entries: HashMap, } +impl V1ReadBackend for TestStoreInner { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult> { + self.load_block_entry(block_hash) + } +} + impl Backend for TestStoreInner { fn load_block_entry(&self, block_hash: &Hash) -> SubsystemResult> { Ok(self.block_entries.get(block_hash).cloned()) @@ -343,6 +359,18 @@ pub struct TestStore { store: Arc>, } +impl V1ReadBackend for TestStore { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult> { + self.load_block_entry(block_hash) + } +} + impl Backend for TestStore { fn load_block_entry(&self, block_hash: &Hash) -> SubsystemResult> { let store = self.store.lock(); @@ -392,6 +420,17 @@ fn garbage_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } } +fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + AssignmentCertV2 { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } +} + fn sign_approval( key: Sr25519Keyring, candidate_hash: CandidateHash, @@ -468,6 +507,12 @@ fn test_harness>( config: HarnessConfig, test: impl FnOnce(TestHarness) -> T, ) { + let _ = env_logger::builder() + .is_test(true) + .filter(Some("polkadot_node_core_approval_voting"), log::LevelFilter::Trace) + .filter(Some(LOG_TARGET), log::LevelFilter::Trace) + .try_init(); + let HarnessConfig { sync_oracle, sync_oracle_handle, clock, backend, assignment_criteria } = config; @@ -617,12 +662,13 @@ async fn check_and_import_assignment( overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash, validator, - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), }, - candidate_index, + candidate_index.into(), tx, ), }, @@ -631,6 +677,38 @@ async fn check_and_import_assignment( rx } +async fn check_and_import_assignment_v2( + overseer: &mut VirtualOverseer, + block_hash: Hash, + core_indices: Vec, + validator: ValidatorIndex, +) -> oneshot::Receiver { + let (tx, rx) = oneshot::channel(); + overseer_send( + overseer, + FromOrchestra::Communication { + msg: ApprovalVotingMessage::CheckAndImportAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: core_indices + .clone() + .into_iter() + .map(|c| CoreIndex(c)) + .collect::>() + .try_into() + .unwrap(), + }), + }, + core_indices.try_into().unwrap(), + tx, + ), + }, + ) + .await; + rx +} struct BlockConfig { slot: Slot, candidates: Option>, @@ -743,7 +821,7 @@ fn session_info(keys: &[Sr25519Keyring]) -> SessionInfo { vec![ValidatorIndex(0)], vec![ValidatorIndex(1)], ]), - n_cores: keys.len() as _, + n_cores: 10, needed_approvals: 2, zeroth_delay_tranche_width: 5, relay_vrf_modulo_samples: 3, @@ -1068,14 +1146,15 @@ fn blank_subsystem_act_on_bad_block() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: bad_block_hash, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - 0u32, + 0u32.into(), tx, ), }, @@ -1331,9 +1410,22 @@ fn subsystem_accepts_duplicate_assignment() { } ); - let block_hash = Hash::repeat_byte(0x01); - let candidate_index = 0; let validator = ValidatorIndex(0); + let candidate_index = 0; + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index1 = 0; + let candidate_index2 = 1; // Add block hash 00. ChainBuilder::new() @@ -1341,21 +1433,30 @@ fn subsystem_accepts_duplicate_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![ + (candidate_receipt1, CoreIndex(0), GroupIndex(1)), + (candidate_receipt2, CoreIndex(1), GroupIndex(1)), + ]), + session_info: None, + }, ) .build(&mut virtual_overseer) .await; - let rx = check_and_import_assignment( + // Initial assignment. + let rx = check_and_import_assignment_v2( &mut virtual_overseer, block_hash, - candidate_index, + vec![candidate_index1, candidate_index2], validator, ) .await; assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + // Test with single assigned core. let rx = check_and_import_assignment( &mut virtual_overseer, block_hash, @@ -1366,6 +1467,18 @@ fn subsystem_accepts_duplicate_assignment() { assert_eq!(rx.await, Ok(AssignmentCheckResult::AcceptedDuplicate)); + // Test with multiple assigned cores. This cannot happen in practice, as tranche0 + // assignments are sent first, but we should still ensure correct behavior. + let rx = check_and_import_assignment_v2( + &mut virtual_overseer, + block_hash, + vec![candidate_index1, candidate_index2], + validator, + ) + .await; + + assert_eq!(rx.await, Ok(AssignmentCheckResult::AcceptedDuplicate)); + virtual_overseer }); } @@ -1416,6 +1529,63 @@ fn subsystem_rejects_assignment_with_unknown_candidate() { }); } +#[test] +fn subsystem_rejects_oversized_bitfields() { + test_harness(HarnessConfig::default(), |test_harness| async move { + let TestHarness { mut virtual_overseer, sync_oracle_handle: _sync_oracle_handle, .. } = + test_harness; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_index = 10; + let validator = ValidatorIndex(0); + + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + ) + .build(&mut virtual_overseer) + .await; + + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + ) + .await; + + assert_eq!( + rx.await, + Ok(AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield( + candidate_index as usize + 1 + ))), + ); + + let rx = check_and_import_assignment_v2( + &mut virtual_overseer, + block_hash, + vec![1, 2, 10, 50], + validator, + ) + .await; + + assert_eq!( + rx.await, + Ok(AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield(51))), + ); + virtual_overseer + }); +} + #[test] fn subsystem_accepts_and_imports_approval_after_assignment() { test_harness(HarnessConfig::default(), |test_harness| async move { @@ -1736,14 +1906,15 @@ fn linear_import_act_on_leaf() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: head, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - 0u32, + 0u32.into(), tx, ), }, @@ -1806,14 +1977,15 @@ fn forkful_import_at_same_height_act_on_leaf() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: head, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - 0u32, + 0u32.into(), tx, ), }, @@ -2257,8 +2429,24 @@ fn subsystem_validate_approvals_cache() { let mut assignments = HashMap::new(); let _ = assignments.insert( CoreIndex(0), - approval_db::v1::OurAssignment { - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), tranche: 0, validator_index: ValidatorIndex(0), triggered: false, @@ -2355,6 +2543,137 @@ fn subsystem_validate_approvals_cache() { }); } +#[test] +fn subsystem_doesnt_distribute_duplicate_compact_assignments() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let cert = garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1)].try_into().unwrap(), + }); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert, + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + sync_oracle_handle: _sync_oracle_handle, + clock, + .. + } = test_harness; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index1 = 0; + let candidate_index2 = 1; + + // Add block hash 00. + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(0), + candidates: Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(1)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]), + session_info: None, + }, + ) + .build(&mut virtual_overseer) + .await; + + // Activate the wakeup present above, and sleep to allow process_wakeups to execute.. + assert_eq!(Some(2), clock.inner.lock().next_wakeup()); + gum::trace!("clock \n{:?}\n", clock.inner.lock()); + + clock.inner.lock().wakeup_all(100); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Assignment is distributed only once from `approval-voting` + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(c_indices, vec![candidate_index1, candidate_index2].try_into().unwrap()); + } + ); + + // Candidate 1 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 2 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Check if assignment was triggered for candidate 1. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt1.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 2. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt2.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + virtual_overseer + }); +} + /// Ensure that when two assignments are imported, only one triggers the Approval Checking work async fn handle_double_assignment_import( virtual_overseer: &mut VirtualOverseer, @@ -2364,9 +2683,9 @@ async fn handle_double_assignment_import( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( _, - c_index, + c_indices, )) => { - assert_eq!(candidate_index, c_index); + assert_eq!(Into::::into(candidate_index), c_indices); } ); @@ -2379,14 +2698,18 @@ async fn handle_double_assignment_import( _, c_index )) => { - assert_eq!(candidate_index, c_index); + assert_eq!(Into::::into(candidate_index), c_index); } ); assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx)) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) .unwrap(); } ); @@ -2400,7 +2723,6 @@ async fn handle_double_assignment_import( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) ); - // Assert that there are no more messages being sent by the subsystem assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); } @@ -2475,8 +2797,9 @@ where let mut assignments = HashMap::new(); let _ = assignments.insert( CoreIndex(0), - approval_db::v1::OurAssignment { - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), tranche: our_assigned_tranche, validator_index: ValidatorIndex(0), triggered: false, @@ -2610,14 +2933,12 @@ async fn step_until_done(clock: &MockClock) { futures_timer::Delay::new(Duration::from_millis(200)).await; let mut clock = clock.inner.lock(); if let Some(tick) = clock.next_wakeup() { - println!("TICK: {:?}", tick); relevant_ticks.push(tick); clock.set_tick(tick); } else { break } } - println!("relevant_ticks: {:?}", relevant_ticks); } #[test] diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index 34132dc22b2338c168dd18a21b31fd812d49229e..a45866402c827e7e91ad6fd44bc7561be35e5d30 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -17,7 +17,7 @@ //! Time utilities for approval voting. use futures::prelude::*; -use polkadot_node_primitives::approval::DelayTranche; +use polkadot_node_primitives::approval::v1::DelayTranche; use sp_consensus_slots::Slot; use std::{ pin::Pin, diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 955fe37d7c39e0cc4a840fe7bdd8e2a98be364db..3fa81d064a883608b556343b5c07f4e55af1b8c9 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-av-store" +description = "The Availability Store subsystem. Wrapper over the DB that stores availability data and chunks." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index e7e6358e8a46ea546fe1f97d4e01a094db7fce63..7a6ce5de8cb18777c8832c561327aa15d127b9cc 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 97efb3ba8089fc9ece8a53b5e8c705dd66c5dd1e..434051f1b00f490504f1384cda6ecb03f4fc7703 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -106,7 +106,7 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecTimeoutKind, SigningContext, ValidationCode, ValidatorId, + PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; @@ -551,8 +551,8 @@ async fn request_pov( async fn request_candidate_validation( sender: &mut impl overseer::CandidateBackingSenderTrait, - pvd: PersistedValidationData, - code: ValidationCode, + validation_data: PersistedValidationData, + validation_code: ValidationCode, candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, @@ -560,15 +560,15 @@ async fn request_candidate_validation( let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - pvd, - code, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, pov, executor_params, - PvfExecTimeoutKind::Backing, - tx, - )) + exec_kind: PvfExecKind::Backing, + response_sender: tx, + }) .await; match rx.await { @@ -1574,7 +1574,7 @@ async fn post_import_statement_actions( ctx: &mut Context, rp_state: &mut PerRelayParentState, summary: Option<&TableSummary>, -) -> Result<(), Error> { +) { if let Some(attested) = summary.as_ref().and_then(|s| { rp_state.table.attested_candidate( &s.candidate, @@ -1630,8 +1630,6 @@ async fn post_import_statement_actions( } issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); - - Ok(()) } /// Check if there have happened any new misbehaviors and issue necessary messages. @@ -1674,7 +1672,7 @@ async fn sign_import_and_distribute_statement( let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); ctx.send_unbounded_message(smsg); - post_import_statement_actions(ctx, rp_state, summary.as_ref()).await?; + post_import_statement_actions(ctx, rp_state, summary.as_ref()).await; Ok(Some(signed_statement)) } else { @@ -1800,7 +1798,7 @@ async fn maybe_validate_and_import( } let summary = res?; - post_import_statement_actions(ctx, rp_state, summary.as_ref()).await?; + post_import_statement_actions(ctx, rp_state, summary.as_ref()).await; if let Some(summary) = summary { // import_statement already takes care of communicating with the diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index bdc8b3fa1af84ddba22afd719976b6e0dfb9f831..c12be72556e36dfe62af71056bea379aa934de35 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,7 +33,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use sp_application_crypto::AppCrypto; @@ -329,32 +329,32 @@ async fn assert_validation_requests( async fn assert_validate_from_exhaustive( virtual_overseer: &mut VirtualOverseer, - pvd: &PersistedValidationData, - pov: &PoV, - validation_code: &ValidationCode, - candidate: &CommittedCandidateReceipt, + assert_pvd: &PersistedValidationData, + assert_pov: &PoV, + assert_validation_code: &ValidationCode, + assert_candidate: &CommittedCandidateReceipt, expected_head_data: &HeadData, result_validation_data: PersistedValidationData, ) { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + pov, + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == *pvd && - _validation_code == *validation_code && - *_pov == *pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && - candidate.commitments.hash() == candidate_receipt.commitments_hash => + exec_kind, + response_sender, + .. + }, + ) if validation_data == *assert_pvd && + validation_code == *assert_validation_code && + *pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() && + exec_kind == PvfExecKind::Backing && + candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), @@ -461,11 +461,11 @@ fn backing_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_ab = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd_ab = dummy_pvd(); + let validation_code_ab = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_ab.hash(); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -474,8 +474,8 @@ fn backing_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_ab.clone(), pvd_ab.clone()), + validation_code: validation_code_ab.0.clone(), ..Default::default() } .build(); @@ -498,7 +498,7 @@ fn backing_works() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_ab.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -523,7 +523,7 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. @@ -536,7 +536,7 @@ fn backing_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_ab.clone()).unwrap(); } ); @@ -545,22 +545,22 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && - candidate_a_commitments_hash == candidate_receipt.commitments_hash => + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_ab && + validation_code == validation_code_ab && + *pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_kind == PvfExecKind::Backing && + candidate_receipt.commitments_hash == candidate_a_commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -623,11 +623,11 @@ fn backing_works_while_validation_ongoing() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_abc = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd_abc = dummy_pvd(); + let validation_code_abc = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_abc.hash(); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -636,8 +636,8 @@ fn backing_works_while_validation_ongoing() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_abc.clone(), pvd_abc.clone()), + validation_code: validation_code_abc.0.clone(), ..Default::default() } .build(); @@ -666,7 +666,7 @@ fn backing_works_while_validation_ongoing() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_abc.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -701,7 +701,7 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the @@ -715,7 +715,7 @@ fn backing_works_while_validation_ongoing() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_abc.clone()).unwrap(); } ); @@ -724,24 +724,24 @@ fn backing_works_while_validation_ongoing() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_abc && + validation_code == validation_code_abc && + *pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { // we never validate the candidate. our local node // shouldn't issue any statements. - std::mem::forget(tx); + std::mem::forget(response_sender); } ); @@ -812,11 +812,11 @@ fn backing_misbehavior_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pov_hash = pov.hash(); - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_hash = pov_a.hash(); + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -824,9 +824,9 @@ fn backing_misbehavior_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), head_data: expected_head_data.clone(), - validation_code: validation_code.0.clone(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -842,7 +842,7 @@ fn backing_misbehavior_works() { .expect("Insert key into keystore"); let seconded_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -867,7 +867,7 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -878,29 +878,29 @@ fn backing_misbehavior_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -1052,22 +1052,22 @@ fn backing_dont_second_invalid() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd_a && - _validation_code == validation_code_a && - *_pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_kind == PvfExecKind::Backing && candidate_a.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1092,22 +1092,22 @@ fn backing_dont_second_invalid() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if pvd == pvd_b && - _validation_code == validation_code_b && - *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_b && + validation_code == validation_code_b && + *pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && + exec_kind == PvfExecKind::Backing && candidate_b.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -1158,19 +1158,19 @@ fn backing_second_after_first_fails_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1184,7 +1184,7 @@ fn backing_second_after_first_fails_works() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &validator2.into(), @@ -1199,7 +1199,7 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1211,7 +1211,7 @@ fn backing_second_after_first_fails_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); @@ -1219,22 +1219,22 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1243,8 +1243,8 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), - pvd.clone(), - pov.clone(), + pvd_a.clone(), + pov_a.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; @@ -1287,7 +1287,7 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, pov, ..), + CandidateValidationMessage::ValidateFromExhaustive { pov, .. }, ) => { assert_eq!(&*pov, &pov_to_second); } @@ -1304,18 +1304,18 @@ fn backing_works_after_failed_validation() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1328,7 +1328,7 @@ fn backing_works_after_failed_validation() { .expect("Insert key into keystore"); let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1343,7 +1343,7 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1355,7 +1355,7 @@ fn backing_works_after_failed_validation() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); @@ -1363,22 +1363,22 @@ fn backing_works_after_failed_validation() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); + response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); } ); @@ -1475,19 +1475,19 @@ fn retry_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1512,7 +1512,7 @@ fn retry_works() { .expect("Insert key into keystore"); let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1546,7 +1546,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. @@ -1584,8 +1584,8 @@ fn retry_works() { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(hash, tx), - )) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); + )) if hash == validation_code_a.hash() => { + tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, @@ -1595,8 +1595,8 @@ fn retry_works() { }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, - RuntimeApiRequest::SessionExecutorParams(sess_idx, tx), - )) if sess_idx == 1 => { + RuntimeApiRequest::SessionExecutorParams(1, tx), + )) => { tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, msg => { @@ -1609,7 +1609,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -1622,26 +1622,25 @@ fn retry_works() { // Subsystem requests PoV and requests validation. // Now we pass. ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, + pov, + exec_kind, .. - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer @@ -1863,9 +1862,9 @@ fn cannot_second_multiple_candidates_per_parent() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(.., tx), + CandidateValidationMessage::ValidateFromExhaustive { response_sender, .. }, ) => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index b79515ed37a6ef590680fb834402f6ed3bef73be..e7c29e11bb4702a1af46d414af0620af39910e1f 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -202,13 +202,13 @@ async fn assert_validate_seconded_candidate( virtual_overseer: &mut VirtualOverseer, relay_parent: Hash, candidate: &CommittedCandidateReceipt, - pov: &PoV, - pvd: &PersistedValidationData, - validation_code: &ValidationCode, + assert_pov: &PoV, + assert_pvd: &PersistedValidationData, + assert_validation_code: &ValidationCode, expected_head_data: &HeadData, fetch_pov: bool, ) { - assert_validation_requests(virtual_overseer, validation_code.clone()).await; + assert_validation_requests(virtual_overseer, assert_validation_code.clone()).await; if fetch_pov { assert_matches!( @@ -220,29 +220,29 @@ async fn assert_validate_seconded_candidate( .. } ) if hash == relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(assert_pov.clone()).unwrap(); } ); } assert_matches!( virtual_overseer.recv().await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - )) if &_pvd == pvd && - &_validation_code == validation_code && - &*_pov == pov && + pov, + exec_kind, + response_sender, + .. + }) if &validation_data == assert_pvd && + &validation_code == assert_validation_code && + &*pov == assert_pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), @@ -251,7 +251,7 @@ async fn assert_validate_seconded_candidate( processed_downward_messages: 0, hrmp_watermark: 0, }, - pvd.clone(), + assert_pvd.clone(), ))) .unwrap(); } @@ -1293,9 +1293,13 @@ fn concurrent_dependent_candidates() { tx.send(pov.clone()).unwrap(); }, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(.., candidate, _, _, _, tx), + CandidateValidationMessage::ValidateFromExhaustive { + candidate_receipt, + response_sender, + .. + }, ) => { - let candidate_hash = candidate.hash(); + let candidate_hash = candidate_receipt.hash(); let (head_data, pvd) = if candidate_hash == candidate_a_hash { (&head_data[1], &pvd_a) } else if candidate_hash == candidate_b_hash { @@ -1303,18 +1307,19 @@ fn concurrent_dependent_candidates() { } else { panic!("unknown candidate hash") }; - tx.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - pvd.clone(), - ))) - .unwrap(); + response_sender + .send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); }, AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { tx, diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index de38d18d970623dd6dc14fdcecb5dd4447232481..712a01b46b1cf9df13220accaad7ca1dde6cbee2 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Bitfield signing subsystem for the Polkadot node" [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index f29e827e109025de2541c932df55192a21e3e03a..0fc0bb3d27887d1257b3473d4ff2b80aff8db42d 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -32,8 +32,8 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityStoreMessage, BitfieldDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, }, - overseer, ActivatedLeaf, FromOrchestra, LeafStatus, OverseerSignal, PerLeafSpan, - SpawnedSubsystem, SubsystemError, SubsystemResult, SubsystemSender, + overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, + SubsystemError, SubsystemResult, SubsystemSender, }; use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; @@ -257,16 +257,6 @@ async fn handle_active_leaves_update( where Sender: overseer::BitfieldSigningSenderTrait, { - if let LeafStatus::Stale = leaf.status { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?leaf.hash, - block_number = ?leaf.number, - "Skip bitfield signing for stale leaf" - ); - return Ok(()) - } - let span = PerLeafSpan::new(leaf.span, "bitfield-signing"); let span_delay = span.child("delay"); let wait_until = Instant::now() + SPAWNED_TASK_DELAY; diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 3281fcc59a158cc49c07009d43e9de29d42b9d02..9f7b17f6129968dca8d85f45ae9ca2f30e261455 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -24,8 +24,8 @@ #![warn(missing_docs)] use polkadot_node_core_pvf::{ - InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError, - PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, + InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PossiblyInvalidError, + PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, }; use polkadot_node_primitives::{ BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT, @@ -44,9 +44,13 @@ use polkadot_parachain_primitives::primitives::{ ValidationParams, ValidationResult as WasmValidationResult, }; use polkadot_primitives::{ + executor_params::{ + DEFAULT_APPROVAL_EXECUTION_TIMEOUT, DEFAULT_BACKING_EXECUTION_TIMEOUT, + DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, + }, CandidateCommitments, CandidateDescriptor, CandidateReceipt, ExecutorParams, Hash, - OccupiedCoreAssumption, PersistedValidationData, PvfExecTimeoutKind, PvfPrepTimeoutKind, - ValidationCode, ValidationCodeHash, + OccupiedCoreAssumption, PersistedValidationData, PvfExecKind, PvfPrepKind, ValidationCode, + ValidationCodeHash, }; use parity_scale_codec::Encode; @@ -69,12 +73,6 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; -/// The amount of time to wait before retrying after a retry-able backing validation error. We use a -/// lower value for the backing case, to fit within the lower backing timeout. -#[cfg(not(test))] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500); -#[cfg(test)] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); /// The amount of time to wait before retrying after a retry-able approval validation error. We use /// a higher value for the approval case since we have more time, and if we wait longer it is more /// likely that transient conditions will resolve. @@ -83,13 +81,6 @@ const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3); #[cfg(test)] const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); -// Default PVF timeouts. Must never be changed! Use executor environment parameters in -// `session_info` pallet to adjust them. See also `PvfTimeoutKind` docs. -const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60); -const DEFAULT_LENIENT_PREPARATION_TIMEOUT: Duration = Duration::from_secs(360); -const DEFAULT_BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2); -const DEFAULT_APPROVAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(12); - /// Configuration for the candidate validation subsystem #[derive(Clone)] pub struct Config { @@ -152,7 +143,8 @@ async fn run( exec_worker_path, ), pvf_metrics, - ); + ) + .await?; ctx.spawn_blocking("pvf-validation-host", task.boxed())?; loop { @@ -161,13 +153,14 @@ async fn run( FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => match msg { - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ) => { + .. + } => { let bg = { let mut sender = ctx.sender().clone(); let metrics = metrics.clone(); @@ -181,7 +174,7 @@ async fn run( candidate_receipt, pov, executor_params, - timeout, + exec_kind, &metrics, ) .await; @@ -193,15 +186,16 @@ async fn run( ctx.spawn("validate-from-chain-state", bg.boxed())?; }, - CandidateValidationMessage::ValidateFromExhaustive( - persisted_validation_data, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ) => { + .. + } => { let bg = { let metrics = metrics.clone(); let validation_host = validation_host.clone(); @@ -210,12 +204,12 @@ async fn run( let _timer = metrics.time_validate_from_exhaustive(); let res = validate_candidate_exhaustive( validation_host, - persisted_validation_data, + validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, + exec_kind, &metrics, ) .await; @@ -227,11 +221,12 @@ async fn run( ctx.spawn("validate-from-exhaustive", bg.boxed())?; }, - CandidateValidationMessage::PreCheck( + CandidateValidationMessage::PreCheck { relay_parent, validation_code_hash, response_sender, - ) => { + .. + } => { let bg = { let mut sender = ctx.sender().clone(); let validation_host = validation_host.clone(); @@ -356,7 +351,7 @@ where return PreCheckOutcome::Invalid }; - let timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Precheck); + let timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Precheck); let pvf = match sp_maybe_compressed_blob::decompress( &validation_code.0, @@ -500,7 +495,7 @@ async fn validate_from_chain_state( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result where @@ -520,7 +515,7 @@ where candidate_receipt.clone(), pov, executor_params, - exec_timeout_kind, + exec_kind, metrics, ) .await; @@ -556,7 +551,7 @@ async fn validate_candidate_exhaustive( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); @@ -615,22 +610,39 @@ async fn validate_candidate_exhaustive( relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root, }; - let result = validation_backend - .validate_candidate_with_retry( - raw_validation_code.to_vec(), - pvf_exec_timeout(&executor_params, exec_timeout_kind), - exec_timeout_kind, - params, - executor_params, - ) - .await; + let result = match exec_kind { + // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and + // honest backers getting slashed. + PvfExecKind::Backing => { + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); + let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind); + let pvf = PvfPrepData::from_code( + raw_validation_code.to_vec(), + executor_params, + prep_timeout, + PrepareJobKind::Compilation, + ); + + validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await + }, + PvfExecKind::Approval => + validation_backend + .validate_candidate_with_retry( + raw_validation_code.to_vec(), + pvf_exec_timeout(&executor_params, exec_kind), + params, + executor_params, + PVF_APPROVAL_EXECUTION_RETRY_DELAY, + ) + .await, + }; if let Err(ref error) = result { gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate"); } match result { - Err(ValidationError::InternalError(e)) => { + Err(ValidationError::Internal(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, @@ -639,29 +651,29 @@ async fn validate_candidate_exhaustive( ); Err(ValidationFailed(e.to_string())) }, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => + Err(ValidationError::Invalid(WasmInvalidCandidate::HardTimeout)) => Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedError(e))) => + Err(ValidationError::Invalid(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), ))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(err))) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => { - // In principle if preparation of the `WASM` fails, the current candidate can not be the - // reason for that. So we can't say whether it is invalid or not. In addition, with - // pre-checking enabled only valid runtimes should ever get enacted, so we can be - // reasonably sure that this is some local problem on the current node. However, as this - // particular error *seems* to indicate a deterministic error, we raise a warning. + + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( + "ambiguous job death: {err}" + )))), + Err(ValidationError::Preparation(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, ?e, "Deterministic error occurred during preparation (should have been ruled out by pre-checking phase)", ); - Err(ValidationFailed(e)) + Err(ValidationFailed(e.to_string())) }, Ok(res) => if res.head_data.hash() != candidate_receipt.descriptor.para_head { @@ -703,8 +715,8 @@ trait ValidationBackend { encoded_params: Vec, ) -> Result; - /// Tries executing a PVF. Will retry once if an error is encountered that may have been - /// transient. + /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered + /// that may have been transient. /// /// NOTE: Should retry only on errors that are a result of execution itself, and not of /// preparation. @@ -712,11 +724,11 @@ trait ValidationBackend { &mut self, raw_validation_code: Vec, exec_timeout: Duration, - exec_timeout_kind: PvfExecTimeoutKind, params: ValidationParams, executor_params: ExecutorParams, + retry_delay: Duration, ) -> Result { - let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Lenient); + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. let pvf = PvfPrepData::from_code( raw_validation_code, @@ -734,15 +746,10 @@ trait ValidationBackend { return validation_result } - let retry_delay = match exec_timeout_kind { - PvfExecTimeoutKind::Backing => PVF_BACKING_EXECUTION_RETRY_DELAY, - PvfExecTimeoutKind::Approval => PVF_APPROVAL_EXECUTION_RETRY_DELAY, - }; - // Allow limited retries for each kind of error. + let mut num_death_retries_left = 1; + let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; - let mut num_awd_retries_left = 1; - let mut num_panic_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { @@ -750,15 +757,31 @@ trait ValidationBackend { } match validation_result { - Err(ValidationError::InvalidCandidate( - WasmInvalidCandidate::AmbiguousWorkerDeath, - )) if num_awd_retries_left > 0 => num_awd_retries_left -= 1, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(_))) - if num_panic_retries_left > 0 => - num_panic_retries_left -= 1, - Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 => - num_internal_retries_left -= 1, - _ => break, + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::AmbiguousWorkerDeath | + PossiblyInvalidError::AmbiguousJobDeath(_), + )) => + if num_death_retries_left > 0 { + num_death_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => + if num_job_error_retries_left > 0 { + num_job_error_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::Internal(_)) => + if num_internal_retries_left > 0 { + num_internal_retries_left -= 1; + } else { + break; + }, + + Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } // If we got a possibly transient error, retry once after a brief delay, on the @@ -787,7 +810,7 @@ trait ValidationBackend { validation_result } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result; + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError>; } #[async_trait] @@ -817,7 +840,7 @@ impl ValidationBackend for ValidationHost { })? } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError> { let (tx, rx) = oneshot::channel(); if let Err(err) = self.precheck_pvf(pvf, tx).await { // Return an IO error if there was an error communicating with the host. @@ -860,22 +883,41 @@ fn perform_basic_checks( Ok(()) } -fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Precheck +/// The time period after which the preparation worker is considered +/// unresponsive and will be killed. +/// +/// Prepare +///The time period after which the preparation worker is considered +/// unresponsive and will be killed. +fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepKind) -> Duration { if let Some(timeout) = executor_params.pvf_prep_timeout(kind) { return timeout } match kind { - PvfPrepTimeoutKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, - PvfPrepTimeoutKind::Lenient => DEFAULT_LENIENT_PREPARATION_TIMEOUT, + PvfPrepKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, + PvfPrepKind::Prepare => DEFAULT_LENIENT_PREPARATION_TIMEOUT, } } -fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Backing subsystem +/// The amount of time to spend on execution during backing. +/// +/// Approval subsystem +/// The amount of time to spend on execution during approval or disputes. +/// This should be much longer than the backing execution timeout to ensure that in the +/// absence of extremely large disparities between hardware, blocks that pass backing are +/// considered executable by approval checkers or dispute participants. +fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecKind) -> Duration { if let Some(timeout) = executor_params.pvf_exec_timeout(kind) { return timeout } match kind { - PvfExecTimeoutKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, - PvfExecTimeoutKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, + PvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, + PvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, } } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index af530a20c4e0ca61fcf44f4ce0e3121ffeab1363..11078580465263a35588fc860b6f4f7a53161cd3 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -377,7 +377,7 @@ impl ValidationBackend for MockValidateCandidateBackend { result } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { unreachable!() } } @@ -436,7 +436,7 @@ fn candidate_validation_ok_is_ok() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -480,15 +480,15 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -496,23 +496,20 @@ fn candidate_validation_bad_return_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::Timeout)); } -// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. -#[test] -fn candidate_validation_one_ambiguous_error_is_valid() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let head_data = HeadData(vec![1, 1, 1]); - let validation_code = ValidationCode(vec![2; 16]); - +fn perform_basic_checks_on_valid_candidate( + pov: &PoV, + validation_code: &ValidationCode, + validation_data: &PersistedValidationData, + head_data_hash: Hash, +) -> CandidateDescriptor { let descriptor = make_valid_candidate_descriptor( ParaId::from(1_u32), dummy_hash(), validation_data.hash(), pov.hash(), validation_code.hash(), - head_data.hash(), - dummy_hash(), + head_data_hash, + head_data_hash, Sr25519Keyring::Alice, ); @@ -523,6 +520,24 @@ fn candidate_validation_one_ambiguous_error_is_valid() { &validation_code.hash(), ); assert!(check.is_ok()); + descriptor +} + +// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. +#[test] +fn candidate_validation_one_ambiguous_error_is_valid() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = perform_basic_checks_on_valid_candidate( + &pov, + &validation_code, + &validation_data, + head_data.hash(), + ); let validation_result = WasmValidationResult { head_data, @@ -546,7 +561,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), ]), validation_data.clone(), @@ -554,7 +569,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -576,38 +591,26 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let pov = PoV { block_data: BlockData(vec![1; 32]) }; let validation_code = ValidationCode(vec![2; 16]); - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, + let descriptor = perform_basic_checks_on_valid_candidate( &pov, - &validation_code.hash(), + &validation_code, + &validation_data, + dummy_hash(), ); - assert!(check.is_ok()); let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), ]), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -615,58 +618,79 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_))); } -// Test that we retry on internal errors. +// Test that we retry for approval on internal errors. #[test] fn candidate_validation_retry_internal_errors() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let validation_code = ValidationCode(vec![2; 16]); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, - &pov, - &validation_code.hash(), + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(InternalValidationError::HostCommunication("foo".into()).into()), + // Throw an AJD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath( + "baz".into(), + ))), + // Throw another internal error. + Err(InternalValidationError::HostCommunication("bar".into()).into()), + ], ); - assert!(check.is_ok()); - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); +} - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ +// Test that we don't retry for backing on internal errors. +#[test] +fn candidate_validation_dont_retry_internal_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), // Throw another internal error. Err(InternalValidationError::HostCommunication("bar".into()).into()), - ]), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecTimeoutKind::Backing, - &Default::default(), - )); + ], + ); - assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("foo")); } -// Test that we retry on panic errors. +// Test that we retry for approval on panic errors. #[test] fn candidate_validation_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); +} + +// Test that we don't retry for backing on panic errors. +#[test] +fn candidate_validation_dont_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "foo".to_string()); +} + +fn candidate_validation_retry_on_error_helper( + exec_kind: PvfExecKind, + mock_errors: Vec>, +) -> Result { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -693,24 +717,16 @@ fn candidate_validation_retry_panic_errors() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("foo".into()))), - // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), - // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("bar".into()))), - ]), + return executor::block_on(validate_candidate_exhaustive( + MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + exec_kind, &Default::default(), )); - - assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); } #[test] @@ -742,15 +758,15 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -795,7 +811,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -836,15 +852,15 @@ fn candidate_validation_code_mismatch_is_invalid() { let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -901,7 +917,7 @@ fn compressed_code_works() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -952,7 +968,7 @@ fn code_decompression_failure_is_error() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -1004,7 +1020,7 @@ fn pov_decompression_failure_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -1012,11 +1028,11 @@ fn pov_decompression_failure_is_invalid() { } struct MockPreCheckBackend { - result: Result, + result: Result<(), PrepareError>, } impl MockPreCheckBackend { - fn with_hardcoded_result(result: Result) -> Self { + fn with_hardcoded_result(result: Result<(), PrepareError>) -> Self { Self { result } } } @@ -1032,7 +1048,7 @@ impl ValidationBackend for MockPreCheckBackend { unreachable!() } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { self.result.clone() } } @@ -1049,7 +1065,7 @@ fn precheck_works() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) @@ -1111,7 +1127,7 @@ fn precheck_invalid_pvf_blob_compression() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) @@ -1216,7 +1232,7 @@ fn precheck_properly_classifies_outcomes() { inner(Err(PrepareError::Prevalidation("foo".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::Preparation("bar".to_owned())), PreCheckOutcome::Invalid); - inner(Err(PrepareError::Panic("baz".to_owned())), PreCheckOutcome::Invalid); + inner(Err(PrepareError::JobError("baz".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::TimedOut), PreCheckOutcome::Failed); inner(Err(PrepareError::IoErr("fizz".to_owned())), PreCheckOutcome::Failed); diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 2178be028bd408297613b75730f35fde7277603a..154fa20e75d0b1016a82758663bba27ff98dcbb9 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 6061c52b7e810e75cd810dd76287ea1337166ed2..e2086db708f9a616ee348738fcf706aef86ccd92 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-dispute-coordinator" version = "1.0.0" +description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index cf51ae4d3655e935859878fbc67564dfafbf1a9e..837ad7856e735c47f38b90d85a98dbf3a39f5122 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -117,7 +117,7 @@ pub enum OwnVoteState { } impl OwnVoteState { - fn new(votes: &CandidateVotes, env: &CandidateEnvironment<'_>) -> Self { + fn new(votes: &CandidateVotes, env: &CandidateEnvironment) -> Self { let controlled_indices = env.controlled_indices(); if controlled_indices.is_empty() { return Self::CannotVote diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 3f0fd4013141925fb57e462934b0312698c89291..e96fee8124099bf000b09863b1a46da98256c5e6 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -27,6 +27,7 @@ use std::sync::Arc; +use error::FatalError; use futures::FutureExt; use gum::CandidateHash; @@ -431,7 +432,7 @@ impl DisputeCoordinatorSubsystem { #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] async fn wait_for_first_leaf(ctx: &mut Context) -> Result> { loop { - match ctx.recv().await? { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(None), FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { if let Some(activated) = update.activated { diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 35d07b411e8f644ed5491a2e6344fd8f99a71c60..05ea7323af1419d770cb7b68e15ee7887ceeaab8 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecTimeoutKind, SessionIndex, + BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecKind, SessionIndex, }; use crate::LOG_TARGET; @@ -380,15 +380,15 @@ async fn participate( // same level of leeway. let (validation_tx, validation_rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - available_data.validation_data, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data: available_data.validation_data, validation_code, - req.candidate_receipt().clone(), - available_data.pov, - req.executor_params(), - PvfExecTimeoutKind::Approval, - validation_tx, - )) + candidate_receipt: req.candidate_receipt().clone(), + pov: available_data.pov, + executor_params: req.executor_params(), + exec_kind: PvfExecKind::Approval, + response_sender: validation_tx, + }) .await; // we cast votes (either positive or negative) depending on the outcome of diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index ee6b950720e5f672d2e33a1f22e367b08d829d6f..012df51d0cd3eac291e17354a45ae18e347b121d 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -115,12 +115,12 @@ pub async fn participation_full_happy_path( assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, candidate_receipt, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { if expected_commitments_hash != candidate_receipt.commitments_hash { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); } else { - tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); } }, "overseer did not receive candidate validation message", @@ -450,9 +450,9 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", ); @@ -487,9 +487,9 @@ fn cast_invalid_vote_if_commitments_dont_match() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); }, "overseer did not receive candidate validation message", ); @@ -524,9 +524,9 @@ fn cast_valid_vote_if_validation_passes() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); }, "overseer did not receive candidate validation message", ); diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 18d91dcfb565abcc0011777d18b881966013701a..c783f21e24df335838259952d6cc0831193e6184 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Parachains inherent data provider for Polkadot node" [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 77a59d87f3fa77a42347550e7538d21b8ceda5b0..9db1259e61d0105d1156f68fe1702f2505bf30c6 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." [dependencies] futures = "0.3.19" diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index fcca0dd0b536ba69d27805ae1cf37b54d834fd02..dabcfb80e02ee7943885888aef183a327579e3bd 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -290,6 +290,14 @@ async fn handle_active_leaves_update( ) .expect("ancestors are provided in reverse order and correctly; qed"); + gum::debug!( + target: LOG_TARGET, + relay_parent = ?hash, + min_relay_parent = scope.earliest_relay_parent().number, + para_id = ?para, + "Creating fragment tree" + ); + let tree = FragmentTree::populate(scope, &*candidate_storage); fragment_trees.insert(para, tree); diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 05ea92caa976d99c9274313821cfdce212d74b4b..13ecb356f2c467fc8dad85ade54f71cf981d8e38 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-provisioner" version = "1.0.0" +description="Responsible for assembling a relay chain block from a set of available parachain candidates" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index f81e5550b15da98ccab276c55a7dc0877eed27e6..8893bdc6549d28c74220950217698d120cf3370e 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, }, - overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, - PerLeafSpan, RuntimeApiError, SpawnedSubsystem, SubsystemError, + overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, + RuntimeApiError, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ request_availability_cores, request_persisted_validation_data, @@ -421,13 +421,7 @@ async fn send_inherent_data( "Selected disputes" ); - // Only include bitfields on fresh leaves. On chain reversions, we want to make sure that - // there will be at least one block, which cannot get disputed, so the chain can make progress. - let bitfields = match leaf.status { - LeafStatus::Fresh => - select_availability_bitfields(&availability_cores, bitfields, &leaf.hash), - LeafStatus::Stale => Vec::new(), - }; + let bitfields = select_availability_bitfields(&availability_cores, bitfields, &leaf.hash); gum::trace!( target: LOG_TARGET, diff --git a/polkadot/node/core/pvf-checker/src/lib.rs b/polkadot/node/core/pvf-checker/src/lib.rs index 2946f3f78861cccdbac8f94a97a4693fce37a09f..043da00eba15bded8009eb0df47d493381c922c7 100644 --- a/polkadot/node/core/pvf-checker/src/lib.rs +++ b/polkadot/node/core/pvf-checker/src/lib.rs @@ -535,7 +535,11 @@ async fn initiate_precheck( let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::PreCheck(relay_parent, validation_code_hash, tx)) + .send_message(CandidateValidationMessage::PreCheck { + relay_parent, + validation_code_hash, + response_sender: tx, + }) .await; let timer = metrics.time_pre_check_judgement(); diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index 4ac6e5eba31d7a98b24b4acc69d645147f107bc8..b0401ecdc3bd2368e34a263842ea566b712884a0 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -267,11 +267,12 @@ impl TestState { handle: &mut VirtualOverseer, ) -> ExpectCandidatePrecheck { match self.recv_timeout(handle).await.expect("timeout waiting for a message") { - AllMessages::CandidateValidation(CandidateValidationMessage::PreCheck( + AllMessages::CandidateValidation(CandidateValidationMessage::PreCheck { relay_parent, validation_code_hash, - tx, - )) => ExpectCandidatePrecheck { relay_parent, validation_code_hash, tx }, + response_sender, + .. + }) => ExpectCandidatePrecheck { relay_parent, validation_code_hash, tx: response_sender }, msg => panic!("Unexpected message was received: {:#?}", msg), } } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 27f4df117e578ebb8f86dd5eb6570af0b247492b..05509a5f8536e7250827c2f149f733c83a409ab5 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -8,10 +8,12 @@ license.workspace = true [dependencies] always-assert = "0.1" +blake3 = "1.5" cfg-if = "1.0" futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } +is_executable = "1.0.1" libc = "0.2.139" pin-project = "1.0.9" rand = "0.8.5" @@ -26,6 +28,7 @@ polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } @@ -34,19 +37,29 @@ sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-comp polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } -[build-dependencies] -substrate-build-script-utils = { path = "../../../../substrate/utils/build-script-utils" } - [dev-dependencies] assert_matches = "1.4.0" +criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support", "async_tokio"] } hex-literal = "0.4.1" + polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } -# For the puppet worker, depend on ourselves with the test-utils feature. +# For benches and integration tests, depend on ourselves with the test-utils +# feature. polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } +rococo-runtime = { path = "../../../runtime/rococo" } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } +[target.'cfg(target_os = "linux")'.dev-dependencies] +procfs = "0.16.0" +rusty-fork = "0.3.0" +sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } + +[[bench]] +name = "host_prepare_rococo_runtime" +harness = false + [features] ci-only-tests = [] jemalloc-allocator = [ "polkadot-node-core-pvf-common/jemalloc-allocator" ] diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2551b701c2ca7bed839a05f2a3b8828770cd7fc --- /dev/null +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -0,0 +1,129 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarks for preparation through the host. We use a real PVF to get realistic results. + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, SamplingMode}; +use polkadot_node_core_pvf::{ + start, testing, Config, Metrics, PrepareError, PrepareJobKind, PvfPrepData, ValidationHost, +}; +use polkadot_primitives::ExecutorParams; +use rococo_runtime::WASM_BINARY; +use std::time::Duration; +use tokio::{runtime::Handle, sync::Mutex}; + +const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); + +struct TestHost { + host: Mutex, +} + +impl TestHost { + async fn new_with_config(handle: &Handle, f: F) -> Self + where + F: FnOnce(&mut Config), + { + let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(true); + + let cache_dir = tempfile::tempdir().unwrap(); + let mut config = Config::new( + cache_dir.path().to_owned(), + None, + prepare_worker_path, + execute_worker_path, + ); + f(&mut config); + let (host, task) = start(config, Metrics::default()).await.unwrap(); + let _ = handle.spawn(task); + Self { host: Mutex::new(host) } + } + + async fn precheck_pvf( + &self, + code: &[u8], + executor_params: ExecutorParams, + ) -> Result<(), PrepareError> { + let (result_tx, result_rx) = futures::channel::oneshot::channel(); + + let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) + .expect("Compression works"); + + self.host + .lock() + .await + .precheck_pvf( + PvfPrepData::from_code( + code.into(), + executor_params, + TEST_PREPARATION_TIMEOUT, + PrepareJobKind::Prechecking, + ), + result_tx, + ) + .await + .unwrap(); + result_rx.await.unwrap() + } +} + +fn host_prepare_rococo_runtime(c: &mut Criterion) { + polkadot_node_core_pvf_common::sp_tracing::try_init_simple(); + + let rt = tokio::runtime::Runtime::new().unwrap(); + + let blob = WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"); + let pvf = match sp_maybe_compressed_blob::decompress(&blob, 64 * 1024 * 1024) { + Ok(code) => PvfPrepData::from_code( + code.into_owned(), + ExecutorParams::default(), + Duration::from_secs(360), + PrepareJobKind::Compilation, + ), + Err(e) => { + panic!("Cannot decompress blob: {:?}", e); + }, + }; + + let mut group = c.benchmark_group("prepare rococo"); + group.sampling_mode(SamplingMode::Flat); + group.sample_size(20); + group.measurement_time(Duration::from_secs(240)); + group.bench_function("host: prepare Rococo runtime", |b| { + b.to_async(&rt).iter_batched( + || async { + ( + TestHost::new_with_config(rt.handle(), |cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await, + pvf.clone().code(), + ) + }, + |result| async move { + let (host, pvf_code) = result.await; + + // `PvfPrepData` is designed to be cheap to clone, so cloning shouldn't affect the + // benchmark accuracy. + let _stats = host.precheck_pvf(&pvf_code, Default::default()).await.unwrap(); + }, + BatchSize::SmallInput, + ) + }); + group.finish(); +} + +criterion_group!(prepare, host_prepare_rococo_runtime); +criterion_main!(prepare); diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 0f7308396d8098293ad1f3e56d3199f4064d0f9d..bfe1be9156fc2980a35b74f1c15a11a011605774 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,7 +12,7 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" -tokio = { version = "1.24.2", features = ["fs", "process", "io-util"] } +thiserror = "1.0.31" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -29,14 +29,17 @@ sp-io = { path = "../../../../../substrate/primitives/io" } sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] -landlock = "0.2.0" +landlock = "0.3.0" +seccompiler = "0.4.0" [dev-dependencies] assert_matches = "1.4.0" tempfile = "3.3.0" +[build-dependencies] +substrate-build-script-utils = { path = "../../../../../substrate/utils/build-script-utils" } + [features] # This feature is used to export test code to other crates without putting it in the production build. -# Also used for building the puppet worker. test-utils = [] jemalloc-allocator = [] diff --git a/polkadot/node/core/pvf/bin/puppet_worker.rs b/polkadot/node/core/pvf/common/build.rs similarity index 90% rename from polkadot/node/core/pvf/bin/puppet_worker.rs rename to polkadot/node/core/pvf/common/build.rs index 7f93519d845400684a8e3a044ea5ecac50566ac5..5531ad411da80ebb51cec8e84f675495edf22bdd 100644 --- a/polkadot/node/core/pvf/bin/puppet_worker.rs +++ b/polkadot/node/core/pvf/common/build.rs @@ -14,4 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -polkadot_node_core_pvf::decl_puppet_worker_main!(); +fn main() { + substrate_build_script_utils::generate_wasmtime_version(); +} diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 6fdd06057c8b021cc89b29eb0265c78e316eed02..7db7f9a5945179e16733c6e50b157d36369b61e1 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -14,47 +14,74 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::prepare::PrepareStats; +use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; use std::fmt; -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation -/// if successful -pub type PrepareResult = Result; +/// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the +/// preparation if successful. +pub type PrepareWorkerResult = Result; + +/// Result of PVF preparation propagated all the way back to the host, with path to the concluded +/// artifact and stats of the preparation if successful. +pub type PrepareResult = Result; + +/// Result of prechecking PVF performed by the validation host. Contains stats about the preparation +/// if successful. +pub type PrecheckResult = Result<(), PrepareError>; /// An error that occurred during the prepare part of the PVF pipeline. +// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD`) #[derive(Debug, Clone, Encode, Decode)] pub enum PrepareError { /// During the prevalidation stage of preparation an issue was found with the PVF. + #[codec(index = 0)] Prevalidation(String), /// Compilation failed for the given PVF. + #[codec(index = 1)] Preparation(String), /// Instantiation of the WASM module instance failed. + #[codec(index = 2)] RuntimeConstruction(String), - /// An unexpected panic has occurred in the preparation worker. - Panic(String), + /// An unexpected error has occurred in the preparation job. + #[codec(index = 3)] + JobError(String), /// Failed to prepare the PVF due to the time limit. + #[codec(index = 4)] TimedOut, /// An IO error occurred. This state is reported by either the validation host or by the /// worker. + #[codec(index = 5)] IoErr(String), /// The temporary file for the artifact could not be created at the given cache path. This /// state is reported by the validation host (not by the worker). - CreateTmpFileErr(String), + #[codec(index = 6)] + CreateTmpFile(String), /// The response from the worker is received, but the file cannot be renamed (moved) to the /// final destination location. This state is reported by the validation host (not by the /// worker). - RenameTmpFileErr { + #[codec(index = 7)] + RenameTmpFile { err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. src: Option, dest: Option, }, + /// Memory limit reached + #[codec(index = 8)] + OutOfMemory, /// The response from the worker is received, but the worker cache could not be cleared. The /// worker has to be killed to avoid jobs having access to data from other jobs. This state is /// reported by the validation host (not by the worker). + #[codec(index = 9)] ClearWorkerDir(String), + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + JobDied { err: String, job_pid: i32 }, + #[codec(index = 10)] + /// Some error occurred when interfacing with the kernel. + #[codec(index = 11)] + Kernel(String), } impl PrepareError { @@ -67,12 +94,15 @@ impl PrepareError { pub fn is_deterministic(&self) -> bool { use PrepareError::*; match self { - Prevalidation(_) | Preparation(_) | Panic(_) => true, - TimedOut | + Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, IoErr(_) | - CreateTmpFileErr(_) | - RenameTmpFileErr { .. } | - ClearWorkerDir(_) => false, + JobDied { .. } | + CreateTmpFile(_) | + RenameTmpFile { .. } | + ClearWorkerDir(_) | + Kernel(_) => false, + // Can occur due to issues with the PVF, but also due to factors like local load. + TimedOut => false, // Can occur due to issues with the PVF, but also due to local errors. RuntimeConstruction(_) => false, } @@ -86,13 +116,17 @@ impl fmt::Display for PrepareError { Prevalidation(err) => write!(f, "prevalidation: {}", err), Preparation(err) => write!(f, "preparation: {}", err), RuntimeConstruction(err) => write!(f, "runtime construction: {}", err), - Panic(err) => write!(f, "panic: {}", err), + JobError(err) => write!(f, "panic: {}", err), TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr { err, src, dest } => + JobDied { err, job_pid } => + write!(f, "prepare: prepare job with pid {job_pid} died: {err}"), + CreateTmpFile(err) => write!(f, "prepare: error creating tmp file: {}", err), + RenameTmpFile { err, src, dest } => write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), + OutOfMemory => write!(f, "prepare: out of memory"), ClearWorkerDir(err) => write!(f, "prepare: error clearing worker cache: {}", err), + Kernel(err) => write!(f, "prepare: error interfacing with the kernel: {}", err), } } } @@ -116,9 +150,9 @@ pub enum InternalValidationError { // conversion to `Option`. path: Option, }, - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to - /// validation. - CpuTimeMonitorThread(String), + /// Some error occurred when interfacing with the kernel. + Kernel(String), + /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), } @@ -141,8 +175,7 @@ impl fmt::Display for InternalValidationError { "validation: host could not clear the worker cache ({:?}) after a job: {}", path, err ), - CpuTimeMonitorThread(err) => - write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), + Kernel(err) => write!(f, "validation: error interfacing with the kernel: {}", err), NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), } } diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index b89ab089af1c02eba401517e8248c292dd7040f8..aa1c1c5396823c5f313f007724522adb67ae44cb 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -28,9 +28,9 @@ pub struct Handshake { pub executor_params: ExecutorParams, } -/// The response from an execution job on the worker. +/// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum Response { +pub enum WorkerResponse { /// The job completed successfully. Ok { /// The result of parachain validation. @@ -41,14 +41,38 @@ pub enum Response { /// The candidate is invalid. InvalidCandidate(String), /// The job timed out. - TimedOut, - /// An unexpected panic has occurred in the execution worker. - Panic(String), + JobTimedOut, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have killed the job. + /// We still retry it, because in the non-malicious case it is likely spurious. + JobDied { err: String, job_pid: i32 }, + /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, + /// etc. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. We + /// still retry it, because in the non-malicious case it is likely spurious. + JobError(String), + /// Some internal error occurred. InternalError(InternalValidationError), } -impl Response { +/// The result of a job on the execution worker. +pub type JobResult = Result; + +/// The successful response from a job on the execution worker. +#[derive(Debug, Encode, Decode)] +pub enum JobResponse { + Ok { + /// The result of parachain validation. + result_descriptor: ValidationResult, + }, + /// The candidate is invalid. + InvalidCandidate(String), +} + +impl JobResponse { /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { if msg.is_empty() { @@ -58,3 +82,18 @@ impl Response { } } } + +/// An unexpected error occurred in the execution job process. Because this comes from the job, +/// which executes untrusted code, this error must likewise be treated as untrusted. That is, we +/// cannot raise an internal error based on this. +#[derive(thiserror::Error, Debug, Encode, Decode)] +pub enum JobError { + #[error("The job timed out")] + TimedOut, + #[error("An unexpected panic has occurred in the execution job: {0}")] + Panic(String), + #[error("Could not spawn the requested thread: {0}")] + CouldNotSpawnThread(String), + #[error("An error occurred in the CPU time monitor thread: {0}")] + CpuTimeMonitorThread(String), +} diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_intf.rs index 79839149ebdf1c09b80b346d31ba0ebf9dc6e299..4bafc3f4291a60a1fd30750176035493c0c4eb0c 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_intf.rs @@ -16,7 +16,10 @@ //! Interface to the Substrate Executor -use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use polkadot_primitives::{ + executor_params::{DEFAULT_LOGICAL_STACK_MAX, DEFAULT_NATIVE_STACK_MAX}, + ExecutorParam, ExecutorParams, +}; use sc_executor_common::{ error::WasmError, runtime_blob::RuntimeBlob, @@ -42,9 +45,6 @@ use std::any::{Any, TypeId}; const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32; const EXTRA_HEAP_PAGES: u32 = 2048; -/// The number of bytes devoted for the stack during wasm execution of a PVF. -pub const NATIVE_STACK_MAX: u32 = 256 * 1024 * 1024; - // VALUES OF THE DEFAULT CONFIGURATION SHOULD NEVER BE CHANGED // They are used as base values for the execution environment parametrization. // To overwrite them, add new ones to `EXECUTOR_PARAMS` in the `session_info` pallet and perform @@ -73,8 +73,8 @@ pub const DEFAULT_CONFIG: Config = Config { // also increase the native 256x. This hopefully should preclude wasm code from reaching // the stack limit set by the wasmtime. deterministic_stack_limit: Some(DeterministicStackLimit { - logical_max: 65536, - native_stack_max: NATIVE_STACK_MAX, + logical_max: DEFAULT_LOGICAL_STACK_MAX, + native_stack_max: DEFAULT_NATIVE_STACK_MAX, }), canonicalize_nans: true, // Rationale for turning the multi-threaded compilation off is to make the preparation time @@ -95,95 +95,102 @@ pub const DEFAULT_CONFIG: Config = Config { }, }; -pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result { +/// Executes the given PVF in the form of a compiled artifact and returns the result of +/// execution upon success. +/// +/// # Safety +/// +/// The caller must ensure that the compiled artifact passed here was: +/// 1) produced by `prepare`, +/// 2) was not modified, +/// +/// Failure to adhere to these requirements might lead to crashes and arbitrary code execution. +pub unsafe fn execute_artifact( + compiled_artifact_blob: &[u8], + executor_params: &ExecutorParams, + params: &[u8], +) -> Result, String> { + let mut extensions = sp_externalities::Extensions::new(); + + extensions.register(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion)); + + let mut ext = ValidationExternalities(extensions); + + match sc_executor::with_externalities_safe(&mut ext, || { + let runtime = create_runtime_from_artifact_bytes(compiled_artifact_blob, executor_params)?; + runtime.new_instance()?.call(InvokeMethod::Export("validate_block"), params) + }) { + Ok(Ok(ok)) => Ok(ok), + Ok(Err(err)) | Err(err) => Err(err), + } + .map_err(|err| format!("execute error: {:?}", err)) +} + +/// Constructs the runtime for the given PVF, given the artifact bytes. +/// +/// # Safety +/// +/// The caller must ensure that the compiled artifact passed here was: +/// 1) produced by `prepare`, +/// 2) was not modified, +/// +/// Failure to adhere to these requirements might lead to crashes and arbitrary code execution. +pub unsafe fn create_runtime_from_artifact_bytes( + compiled_artifact_blob: &[u8], + executor_params: &ExecutorParams, +) -> Result { + let mut config = DEFAULT_CONFIG.clone(); + config.semantics = params_to_wasmtime_semantics(executor_params); + + sc_executor_wasmtime::create_runtime_from_artifact_bytes::( + compiled_artifact_blob, + config, + ) +} + +pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Semantics { let mut sem = DEFAULT_CONFIG.semantics.clone(); - let mut stack_limit = if let Some(stack_limit) = sem.deterministic_stack_limit.clone() { - stack_limit - } else { - return Err("No default stack limit set".to_owned()) - }; + let mut stack_limit = sem + .deterministic_stack_limit + .expect("There is a comment to not change the default stack limit; it should always be available; qed") + .clone(); for p in par.iter() { match p { ExecutorParam::MaxMemoryPages(max_pages) => - sem.heap_alloc_strategy = - HeapAllocStrategy::Dynamic { maximum_pages: Some(*max_pages) }, + sem.heap_alloc_strategy = HeapAllocStrategy::Dynamic { + maximum_pages: Some((*max_pages).saturating_add(DEFAULT_HEAP_PAGES_ESTIMATE)), + }, ExecutorParam::StackLogicalMax(slm) => stack_limit.logical_max = *slm, ExecutorParam::StackNativeMax(snm) => stack_limit.native_stack_max = *snm, ExecutorParam::WasmExtBulkMemory => sem.wasm_bulk_memory = true, - // TODO: Not implemented yet; . - ExecutorParam::PrecheckingMaxMemory(_) => (), - ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), /* Not used here */ + ExecutorParam::PrecheckingMaxMemory(_) | + ExecutorParam::PvfPrepTimeout(_, _) | + ExecutorParam::PvfExecTimeout(_, _) => (), /* Not used here */ } } sem.deterministic_stack_limit = Some(stack_limit); - Ok(sem) + sem } -/// A WASM executor with a given configuration. It is instantiated once per execute worker and is -/// specific to that worker. -#[derive(Clone)] -pub struct Executor { - config: Config, +/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. +pub fn prevalidate(code: &[u8]) -> Result { + let blob = RuntimeBlob::new(code)?; + // It's assumed this function will take care of any prevalidation logic + // that needs to be done. + // + // Do nothing for now. + Ok(blob) } -impl Executor { - pub fn new(params: ExecutorParams) -> Result { - let mut config = DEFAULT_CONFIG.clone(); - config.semantics = params_to_wasmtime_semantics(¶ms)?; - - Ok(Self { config }) - } - - /// Executes the given PVF in the form of a compiled artifact and returns the result of - /// execution upon success. - /// - /// # Safety - /// - /// The caller must ensure that the compiled artifact passed here was: - /// 1) produced by `prepare`, - /// 2) was not modified, - /// - /// Failure to adhere to these requirements might lead to crashes and arbitrary code execution. - pub unsafe fn execute( - &self, - compiled_artifact_blob: &[u8], - params: &[u8], - ) -> Result, String> { - let mut extensions = sp_externalities::Extensions::new(); - - extensions.register(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion)); - - let mut ext = ValidationExternalities(extensions); - - match sc_executor::with_externalities_safe(&mut ext, || { - let runtime = self.create_runtime_from_bytes(compiled_artifact_blob)?; - runtime.new_instance()?.call(InvokeMethod::Export("validate_block"), params) - }) { - Ok(Ok(ok)) => Ok(ok), - Ok(Err(err)) | Err(err) => Err(err), - } - .map_err(|err| format!("execute error: {:?}", err)) - } - - /// Constructs the runtime for the given PVF, given the artifact bytes. - /// - /// # Safety - /// - /// The caller must ensure that the compiled artifact passed here was: - /// 1) produced by `prepare`, - /// 2) was not modified, - /// - /// Failure to adhere to these requirements might lead to crashes and arbitrary code execution. - pub unsafe fn create_runtime_from_bytes( - &self, - compiled_artifact_blob: &[u8], - ) -> Result { - sc_executor_wasmtime::create_runtime_from_artifact_bytes::( - compiled_artifact_blob, - self.config.clone(), - ) - } +/// Runs preparation on the given runtime blob. If successful, it returns a serialized compiled +/// artifact which can then be used to pass into `Executor::execute` after writing it to the disk. +pub fn prepare( + blob: RuntimeBlob, + executor_params: &ExecutorParams, +) -> Result, sc_executor_common::error::WasmError> { + let semantics = params_to_wasmtime_semantics(executor_params); + sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) } /// Available host functions. We leave out: diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 53c287ea970907cb8884ec47c74bd4e03e38d7e7..c041c60aaf9c766cc5cfbc5c84d88091e0dda5a4 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -31,11 +31,12 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; +pub const RUNTIME_VERSION: &str = env!("SUBSTRATE_WASMTIME_VERSION"); + use std::{ - io::{Read, Write}, + io::{self, Read, Write}, mem, }; -use tokio::io; #[cfg(feature = "test-utils")] pub mod tests { @@ -46,11 +47,13 @@ pub mod tests { } /// Status of security features on the current system. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct SecurityStatus { /// Whether the landlock features we use are fully available on this system. pub can_enable_landlock: bool, - // Whether we are able to unshare the user namespace and change the filesystem root. + /// Whether the seccomp features we use are fully available on this system. + pub can_enable_seccomp: bool, + /// Whether we are able to unshare the user namespace and change the filesystem root. pub can_unshare_user_namespace_and_change_root: bool, } diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index c205eddfb8b1fbeaf5842d8b468ca94f4806f23a..28ab682ec136d962364019f2623f80d8e7ad6194 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -15,6 +15,25 @@ // along with Polkadot. If not, see . use parity_scale_codec::{Decode, Encode}; +use std::path::PathBuf; + +/// Result from prepare worker if successful. +#[derive(Debug, Clone, Default, Encode, Decode)] +pub struct PrepareWorkerSuccess { + /// Checksum of the compiled PVF. + pub checksum: String, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} + +/// Result of PVF preparation if successful. +#[derive(Debug, Clone, Default)] +pub struct PrepareSuccess { + /// Canonical path to the compiled artifact. + pub path: PathBuf, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} /// Preparation statistics, including the CPU time and memory taken. #[derive(Debug, Clone, Default, Encode, Decode)] @@ -29,12 +48,14 @@ pub struct PrepareStats { /// supported by the OS, `ru_maxrss`. #[derive(Clone, Debug, Default, Encode, Decode)] pub struct MemoryStats { - /// Memory stats from `tikv_jemalloc_ctl`. + /// Memory stats from `tikv_jemalloc_ctl`, polling-based and not very precise. #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] pub memory_tracker_stats: Option, /// `ru_maxrss` from `getrusage`. `None` if an error occurred. #[cfg(target_os = "linux")] pub max_rss: Option, + /// Peak allocation in bytes measured by tracking allocator + pub peak_tracked_alloc: u64, } /// Statistics of collected memory metrics. diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 0cc86434c19526396b67565bee30c3a38dae9ac6..2d8f6430187b2505e82517a0b93daf30b4e3a504 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -115,7 +115,7 @@ impl fmt::Debug for PvfPrepData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Pvf {{ code, code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", + "Pvf {{ code: [...], code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", self.code_hash, self.executor_params, self.prep_timeout ) } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index d0bd5b6bd7c56495efa8fe64730e9fadba9c066b..d4f9bbc27ea62e930dd98ef3a277f9eac6ada7d8 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -23,21 +23,25 @@ use cpu_time::ProcessTime; use futures::never::Never; use std::{ any::Any, - fmt, + fmt, io, os::unix::net::UnixStream, path::PathBuf, sync::mpsc::{Receiver, RecvTimeoutError}, time::Duration, }; -use tokio::{io, runtime::Runtime}; /// Use this macro to declare a `fn main() {}` that will create an executable that can be used for /// spawning the desired worker. #[macro_export] macro_rules! decl_worker_main { - ($expected_command:expr, $entrypoint:expr, $worker_version:expr $(,)*) => { + ($expected_command:expr, $entrypoint:expr, $worker_version:expr, $worker_version_hash:expr $(,)*) => { + fn get_full_version() -> String { + format!("{}-{}", $worker_version, $worker_version_hash) + } + fn print_help(expected_command: &str) { println!("{} {}", expected_command, $worker_version); + println!("commit: {}", $worker_version_hash); println!(); println!("PVF worker that is called by polkadot."); } @@ -67,6 +71,11 @@ macro_rules! decl_worker_main { println!("{}", $worker_version); return }, + // Useful for debugging. --version is used for version checks. + "--full-version" => { + println!("{}", get_full_version()); + return + }, "--check-can-enable-landlock" => { #[cfg(target_os = "linux")] @@ -75,14 +84,21 @@ macro_rules! decl_worker_main { let status = -1; std::process::exit(status) }, + "--check-can-enable-seccomp" => { + #[cfg(all(target_os = "linux", target_arch = "x86_64"))] + let status = if security::seccomp::check_is_fully_enabled() { 0 } else { -1 }; + #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] + let status = -1; + std::process::exit(status) + }, "--check-can-unshare-user-namespace-and-change-root" => { + #[cfg(target_os = "linux")] + let cache_path_tempdir = std::path::Path::new(&args[2]); #[cfg(target_os = "linux")] let status = if let Err(err) = security::unshare_user_namespace_and_change_root( $crate::worker::WorkerKind::CheckPivotRoot, worker_pid, - // We're not accessing any files, so we can try to pivot_root in the temp - // dir without conflicts with other processes. - &std::env::temp_dir(), + &cache_path_tempdir, ) { // Write the error to stderr, log it on the host-side. eprintln!("{}", err); @@ -119,6 +135,7 @@ macro_rules! decl_worker_main { let mut worker_dir_path = None; let mut node_version = None; let mut can_enable_landlock = false; + let mut can_enable_seccomp = false; let mut can_unshare_user_namespace_and_change_root = false; let mut i = 2; @@ -137,6 +154,7 @@ macro_rules! decl_worker_main { i += 1 }, "--can-enable-landlock" => can_enable_landlock = true, + "--can-enable-seccomp" => can_enable_seccomp = true, "--can-unshare-user-namespace-and-change-root" => can_unshare_user_namespace_and_change_root = true, arg => panic!("Unexpected argument found: {}", arg), @@ -151,6 +169,7 @@ macro_rules! decl_worker_main { let worker_dir_path = std::path::Path::new(worker_dir_path).to_owned(); let security_status = $crate::SecurityStatus { can_enable_landlock, + can_enable_seccomp, can_unshare_user_namespace_and_change_root, }; @@ -186,19 +205,24 @@ impl fmt::Display for WorkerKind { } } -// The worker version must be passed in so that we accurately get the version of the worker, and not -// the version that this crate was compiled with. -pub fn worker_event_loop( +// NOTE: The worker version must be passed in so that we accurately get the version of the worker, +// and not the version that this crate was compiled with. +// +// NOTE: This must not spawn any threads due to safety requirements in `event_loop` and to avoid +// errors in [`security::unshare_user_namespace_and_change_root`]. +// +/// Initializes the worker process, then runs the given event loop, which spawns a new job process +/// to securely handle each incoming request. +pub fn run_worker( worker_kind: WorkerKind, socket_path: PathBuf, #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] security_status: &SecurityStatus, + security_status: &SecurityStatus, mut event_loop: F, ) where - F: FnMut(UnixStream, PathBuf) -> Fut, - Fut: futures::Future>, + F: FnMut(UnixStream, PathBuf) -> io::Result, { let worker_pid = std::process::id(); gum::debug!( @@ -252,7 +276,7 @@ pub fn worker_event_loop( } // Connect to the socket. - let stream = || -> std::io::Result { + let stream = || -> io::Result { let stream = UnixStream::connect(&socket_path)?; let _ = std::fs::remove_file(&socket_path); Ok(stream) @@ -307,6 +331,24 @@ pub fn worker_event_loop( let landlock_status = security::landlock::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); if !matches!(landlock_status, Ok(landlock::RulesetStatus::FullyEnforced)) { + // We previously were able to enable, so this should never happen. + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "could not fully enable landlock: {:?}. This should not happen, please report an issue", + landlock_status + ); + } + } + + // TODO: We can enable the seccomp networking blacklist on aarch64 as well, but we need a CI + // job to catch regressions. See . + #[cfg(all(target_os = "linux", target_arch = "x86_64"))] + if security_status.can_enable_seccomp { + let seccomp_status = + security::seccomp::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); + if !matches!(seccomp_status, Ok(())) { // We previously were able to enable, so this should never happen. // // TODO: Make this a real error in secure-mode. See: @@ -315,8 +357,8 @@ pub fn worker_event_loop( target: LOG_TARGET, %worker_kind, %worker_pid, - "could not fully enable landlock: {:?}. This should not happen, please report to the Polkadot devs", - landlock_status + "could not fully enable seccomp: {:?}. This should not happen, please report an issue", + seccomp_status ); } } @@ -336,18 +378,11 @@ pub fn worker_event_loop( } // Run the main worker loop. - let rt = Runtime::new().expect("Creates tokio runtime. If this panics the worker will die and the host will detect that and deal with it."); - let err = rt - .block_on(event_loop(stream, worker_dir_path)) + let err = event_loop(stream, worker_dir_path) // It's never `Ok` because it's `Ok(Never)`. .unwrap_err(); worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); - - // We don't want tokio to wait for the tasks to finish. We want to bring down the worker as fast - // as possible and not wait for stalled validation to finish. This isn't strictly necessary now, - // but may be in the future. - rt.shutdown_background(); } /// Provide a consistent message on worker shutdown. @@ -428,7 +463,7 @@ fn kill_parent_node_in_emergency() { /// The motivation for this module is to coordinate worker threads without using async Rust. pub mod thread { use std::{ - panic, + io, panic, sync::{Arc, Condvar, Mutex}, thread, time::Duration, @@ -469,7 +504,7 @@ pub mod thread { f: F, cond: Cond, outcome: WaitOutcome, - ) -> std::io::Result> + ) -> io::Result> where F: FnOnce() -> R, F: Send + 'static + panic::UnwindSafe, @@ -487,7 +522,7 @@ pub mod thread { cond: Cond, outcome: WaitOutcome, stack_size: usize, - ) -> std::io::Result> + ) -> io::Result> where F: FnOnce() -> R, F: Send + 'static + panic::UnwindSafe, diff --git a/polkadot/node/core/pvf/common/src/worker/security.rs b/polkadot/node/core/pvf/common/src/worker/security.rs deleted file mode 100644 index b7abf028f94108c8d36c257f140936cabc0e237f..0000000000000000000000000000000000000000 --- a/polkadot/node/core/pvf/common/src/worker/security.rs +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Functionality for securing workers. -//! -//! This is needed because workers are used to compile and execute untrusted code (PVFs). -//! -//! We currently employ the following security measures: -//! -//! - Restrict filesystem -//! - Use Landlock to remove all unnecessary FS access rights. -//! - Unshare the user and mount namespaces. -//! - Change the root directory to a worker-specific temporary directory. -//! - Remove env vars - -use crate::{worker::WorkerKind, LOG_TARGET}; - -/// Unshare the user namespace and change root to be the artifact directory. -/// -/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: -/// "CLONE_NEWUSER requires that the calling process is not threaded." -#[cfg(target_os = "linux")] -pub fn unshare_user_namespace_and_change_root( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &std::path::Path, -) -> Result<(), String> { - use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; - - // The following was copied from the `cstr_core` crate. - // - // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 - #[inline] - #[doc(hidden)] - const fn cstr_is_valid(bytes: &[u8]) -> bool { - if bytes.is_empty() || bytes[bytes.len() - 1] != 0 { - return false - } - - let mut index = 0; - while index < bytes.len() - 1 { - if bytes[index] == 0 { - return false - } - index += 1; - } - true - } - - macro_rules! cstr { - ($e:expr) => {{ - const STR: &[u8] = concat!($e, "\0").as_bytes(); - const STR_VALID: bool = cstr_is_valid(STR); - let _ = [(); 0 - (!(STR_VALID) as usize)]; - #[allow(unused_unsafe)] - unsafe { - core::ffi::CStr::from_bytes_with_nul_unchecked(STR) - } - }} - } - - gum::debug!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "unsharing the user namespace and calling pivot_root", - ); - - let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) - .expect("on unix; the path will never contain 0 bytes; qed"); - - // Wrapper around all the work to prevent repetitive error handling. - // - // # Errors - // - // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does - // not give the context of which call failed, so we return a &str error. - || -> Result<(), &'static str> { - // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps - // (2) and (3) are adapted from the example in pivot_root(2), with the additional - // change described in the `pivot_root(".", ".")` section. - unsafe { - // 1. `unshare` the user and the mount namespaces. - if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { - return Err("unshare user and mount namespaces") - } - - // 2. Setup mounts. - // - // Ensure that new root and its parent mount don't have shared propagation (which would - // cause pivot_root() to return an error), and prevent propagation of mount events to - // the initial mount namespace. - if libc::mount( - ptr::null(), - cstr!("/").as_ptr(), - ptr::null(), - libc::MS_REC | libc::MS_PRIVATE, - ptr::null(), - ) < 0 - { - return Err("mount MS_PRIVATE") - } - // Ensure that the new root is a mount point. - let additional_flags = - if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { - libc::MS_RDONLY - } else { - 0 - }; - if libc::mount( - worker_dir_path_c.as_ptr(), - worker_dir_path_c.as_ptr(), - ptr::null(), // ignored when MS_BIND is used - libc::MS_BIND | - libc::MS_REC | libc::MS_NOEXEC | - libc::MS_NODEV | libc::MS_NOSUID | - libc::MS_NOATIME | additional_flags, - ptr::null(), // ignored when MS_BIND is used - ) < 0 - { - return Err("mount MS_BIND") - } - - // 3. `pivot_root` to the artifact directory. - if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { - return Err("chdir to worker dir path") - } - if libc::syscall(libc::SYS_pivot_root, cstr!(".").as_ptr(), cstr!(".").as_ptr()) < 0 { - return Err("pivot_root") - } - if libc::umount2(cstr!(".").as_ptr(), libc::MNT_DETACH) < 0 { - return Err("umount the old root mount point") - } - } - - Ok(()) - }() - .map_err(|err_ctx| { - let err = std::io::Error::last_os_error(); - format!("{}: {}", err_ctx, err) - })?; - - // Do some assertions. - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected current dir after pivot_root to be `/`".into()) - } - env::set_current_dir("..").map_err(|err| err.to_string())?; - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected not to be able to break out of new root by doing `..`".into()) - } - - Ok(()) -} - -/// Require env vars to have been removed when spawning the process, to prevent malicious code from -/// accessing them. -pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { - let mut ok = true; - - for (key, value) in std::env::vars_os() { - // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of - // randomness for malicious code. In the future we can remove it also and log in the host; - // see . - if key == "RUST_LOG" { - continue - } - // An exception for MacOS. This is not a secure platform anyway, so we let it slide. - #[cfg(target_os = "macos")] - if key == "__CF_USER_TEXT_ENCODING" { - continue - } - - gum::error!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?key, - ?value, - "env var was present that should have been removed", - ); - - ok = false; - } - - ok -} - -/// The [landlock] docs say it best: -/// -/// > "Landlock is a security feature available since Linux 5.13. The goal is to enable to restrict -/// ambient rights (e.g., global filesystem access) for a set of processes by creating safe security -/// sandboxes as new security layers in addition to the existing system-wide access-controls. This -/// kind of sandbox is expected to help mitigate the security impact of bugs, unexpected or -/// malicious behaviors in applications. Landlock empowers any process, including unprivileged ones, -/// to securely restrict themselves." -/// -/// [landlock]: https://docs.rs/landlock/latest/landlock/index.html -#[cfg(target_os = "linux")] -pub mod landlock { - pub use landlock::RulesetStatus; - - use crate::{worker::WorkerKind, LOG_TARGET}; - use landlock::*; - use std::{ - fmt, - path::{Path, PathBuf}, - }; - - /// Landlock ABI version. We use ABI V1 because: - /// - /// 1. It is supported by our reference kernel version. - /// 2. Later versions do not (yet) provide additional security. - /// - /// # Versions (as of June 2023) - /// - /// - Polkadot reference kernel version: 5.16+ - /// - ABI V1: 5.13 - introduces landlock, including full restrictions on file reads - /// - ABI V2: 5.19 - adds ability to configure file renaming (not used by us) - /// - /// # Determinism - /// - /// You may wonder whether we could always use the latest ABI instead of only the ABI supported - /// by the reference kernel version. It seems plausible, since landlock provides a best-effort - /// approach to enabling sandboxing. For example, if the reference version only supported V1 and - /// we were on V2, then landlock would use V2 if it was supported on the current machine, and - /// just fall back to V1 if not. - /// - /// The issue with this is indeterminacy. If half of validators were on V2 and half were on V1, - /// they may have different semantics on some PVFs. So a malicious PVF now has a new attack - /// vector: they can exploit this indeterminism between landlock ABIs! - /// - /// On the other hand we do want validators to be as secure as possible and protect their keys - /// from attackers. And, the risk with indeterminacy is low and there are other indeterminacy - /// vectors anyway. So we will only upgrade to a new ABI if either the reference kernel version - /// supports it or if it introduces some new feature that is beneficial to security. - pub const LANDLOCK_ABI: ABI = ABI::V1; - - #[derive(Debug)] - pub enum TryRestrictError { - InvalidExceptionPath(PathBuf), - RulesetError(RulesetError), - } - - impl From for TryRestrictError { - fn from(err: RulesetError) -> Self { - Self::RulesetError(err) - } - } - - impl fmt::Display for TryRestrictError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::InvalidExceptionPath(path) => write!(f, "invalid exception path: {:?}", path), - Self::RulesetError(err) => write!(f, "ruleset error: {}", err.to_string()), - } - } - } - - impl std::error::Error for TryRestrictError {} - - /// Try to enable landlock for the given kind of worker. - pub fn enable_for_worker( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &Path, - ) -> Result> { - let exceptions: Vec<(PathBuf, BitFlags)> = match worker_kind { - WorkerKind::Prepare => { - vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] - }, - WorkerKind::Execute => { - vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] - }, - WorkerKind::CheckPivotRoot => - panic!("this should only be passed for checking pivot_root; qed"), - }; - - gum::debug!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "enabling landlock with exceptions: {:?}", - exceptions, - ); - - Ok(try_restrict(exceptions)?) - } - - // TODO: - /// Runs a check for landlock and returns a single bool indicating whether the given landlock - /// ABI is fully enabled on the current Linux environment. - pub fn check_is_fully_enabled() -> bool { - let status_from_thread: Result> = - match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())) - .join() - { - Ok(Ok(status)) => Ok(status), - Ok(Err(ruleset_err)) => Err(ruleset_err.into()), - Err(_err) => Err("a panic occurred in try_restrict".into()), - }; - - matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) - } - - /// Tries to restrict the current thread (should only be called in a process' main thread) with - /// the following landlock access controls: - /// - /// 1. all global filesystem access restricted, with optional exceptions - /// 2. ... more sandbox types (e.g. networking) may be supported in the future. - /// - /// If landlock is not supported in the current environment this is simply a noop. - /// - /// # Returns - /// - /// The status of the restriction (whether it was fully, partially, or not-at-all enforced). - fn try_restrict(fs_exceptions: I) -> Result - where - I: IntoIterator, - P: AsRef, - A: Into>, - { - let mut ruleset = - Ruleset::new().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?; - for (fs_path, access_bits) in fs_exceptions { - let paths = &[fs_path.as_ref().to_owned()]; - let mut rules = path_beneath_rules(paths, access_bits).peekable(); - if rules.peek().is_none() { - // `path_beneath_rules` silently ignores missing paths, so check for it manually. - return Err(TryRestrictError::InvalidExceptionPath(fs_path.as_ref().to_owned())) - } - ruleset = ruleset.add_rules(rules)?; - } - let status = ruleset.restrict_self()?; - Ok(status.ruleset) - } - - #[cfg(test)] - mod tests { - use super::*; - use std::{fs, io::ErrorKind, thread}; - - #[test] - fn restricted_thread_cannot_read_file() { - // TODO: This would be nice: . - if !check_is_fully_enabled() { - return - } - - // Restricted thread cannot read from FS. - let handle = - thread::spawn(|| { - // Create, write, and read two tmp files. This should succeed before any - // landlock restrictions are applied. - const TEXT: &str = "foo"; - let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); - let path1 = tmpfile1.path(); - let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); - let path2 = tmpfile2.path(); - - fs::write(path1, TEXT).unwrap(); - let s = fs::read_to_string(path1).unwrap(); - assert_eq!(s, TEXT); - fs::write(path2, TEXT).unwrap(); - let s = fs::read_to_string(path2).unwrap(); - assert_eq!(s, TEXT); - - // Apply Landlock with a read exception for only one of the files. - let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to read from both files, only tmpfile1 should succeed. - let result = fs::read_to_string(path1); - assert!(matches!( - result, - Ok(s) if s == TEXT - )); - let result = fs::read_to_string(path2); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - - // Apply Landlock for all files. - let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to read from tmpfile1 after landlock, it should fail. - let result = fs::read_to_string(path1); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); - - assert!(handle.join().is_ok()); - } - - #[test] - fn restricted_thread_cannot_write_file() { - // TODO: This would be nice: . - if !check_is_fully_enabled() { - return - } - - // Restricted thread cannot write to FS. - let handle = - thread::spawn(|| { - // Create and write two tmp files. This should succeed before any landlock - // restrictions are applied. - const TEXT: &str = "foo"; - let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); - let path1 = tmpfile1.path(); - let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); - let path2 = tmpfile2.path(); - - fs::write(path1, TEXT).unwrap(); - fs::write(path2, TEXT).unwrap(); - - // Apply Landlock with a write exception for only one of the files. - let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to write to both files, only tmpfile1 should succeed. - let result = fs::write(path1, TEXT); - assert!(matches!(result, Ok(_))); - let result = fs::write(path2, TEXT); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - - // Apply Landlock for all files. - let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to write to tmpfile1 after landlock, it should fail. - let result = fs::write(path1, TEXT); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); - - assert!(handle.join().is_ok()); - } - } -} diff --git a/polkadot/node/core/pvf/common/src/worker/security/landlock.rs b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs new file mode 100644 index 0000000000000000000000000000000000000000..51500c733b8cea52805f0f6acdfdeb99ef4d7b68 --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs @@ -0,0 +1,325 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The [landlock] docs say it best: +//! +//! > "Landlock is a security feature available since Linux 5.13. The goal is to enable to restrict +//! ambient rights (e.g., global filesystem access) for a set of processes by creating safe security +//! sandboxes as new security layers in addition to the existing system-wide access-controls. This +//! kind of sandbox is expected to help mitigate the security impact of bugs, unexpected or +//! malicious behaviors in applications. Landlock empowers any process, including unprivileged ones, +//! to securely restrict themselves." +//! +//! [landlock]: https://docs.rs/landlock/latest/landlock/index.html + +pub use landlock::RulesetStatus; + +use crate::{ + worker::{stringify_panic_payload, WorkerKind}, + LOG_TARGET, +}; +use landlock::*; +use std::path::{Path, PathBuf}; + +/// Landlock ABI version. We use ABI V1 because: +/// +/// 1. It is supported by our reference kernel version. +/// 2. Later versions do not (yet) provide additional security that would benefit us. +/// +/// # Versions (as of October 2023) +/// +/// - Polkadot reference kernel version: 5.16+ +/// +/// - ABI V1: kernel 5.13 - Introduces landlock, including full restrictions on file reads. +/// +/// - ABI V2: kernel 5.19 - Adds ability to prevent file renaming. Does not help us. During +/// execution an attacker can only affect the name of a symlinked artifact and not the original +/// one. +/// +/// - ABI V3: kernel 6.2 - Adds ability to prevent file truncation. During execution, can +/// prevent attackers from affecting a symlinked artifact. We don't strictly need this as we +/// plan to check for file integrity anyway; see +/// . +/// +/// # Determinism +/// +/// You may wonder whether we could always use the latest ABI instead of only the ABI supported +/// by the reference kernel version. It seems plausible, since landlock provides a best-effort +/// approach to enabling sandboxing. For example, if the reference version only supported V1 and +/// we were on V2, then landlock would use V2 if it was supported on the current machine, and +/// just fall back to V1 if not. +/// +/// The issue with this is indeterminacy. If half of validators were on V2 and half were on V1, +/// they may have different semantics on some PVFs. So a malicious PVF now has a new attack +/// vector: they can exploit this indeterminism between landlock ABIs! +/// +/// On the other hand we do want validators to be as secure as possible and protect their keys +/// from attackers. And, the risk with indeterminacy is low and there are other indeterminacy +/// vectors anyway. So we will only upgrade to a new ABI if either the reference kernel version +/// supports it or if it introduces some new feature that is beneficial to security. +pub const LANDLOCK_ABI: ABI = ABI::V1; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid exception path: {0:?}")] + InvalidExceptionPath(PathBuf), + #[error(transparent)] + RulesetError(#[from] RulesetError), + #[error("A panic occurred in try_restrict: {0}")] + Panic(String), +} + +pub type Result = std::result::Result; + +/// Try to enable landlock for the given kind of worker. +pub fn enable_for_worker( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &Path, +) -> Result { + let exceptions: Vec<(PathBuf, BitFlags)> = match worker_kind { + WorkerKind::Prepare => { + vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] + }, + WorkerKind::Execute => { + vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] + }, + WorkerKind::CheckPivotRoot => + panic!("this should only be passed for checking pivot_root; qed"), + }; + + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "enabling landlock with exceptions: {:?}", + exceptions, + ); + + try_restrict(exceptions) +} + +// TODO: +/// Runs a check for landlock and returns a single bool indicating whether the given landlock +/// ABI is fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> bool { + let status_from_thread: Result = + match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())).join() + { + Ok(Ok(status)) => Ok(status), + Ok(Err(ruleset_err)) => Err(ruleset_err.into()), + Err(err) => Err(Error::Panic(stringify_panic_payload(err))), + }; + + matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) +} + +/// Tries to restrict the current thread (should only be called in a process' main thread) with +/// the following landlock access controls: +/// +/// 1. all global filesystem access restricted, with optional exceptions +/// 2. ... more sandbox types (e.g. networking) may be supported in the future. +/// +/// If landlock is not supported in the current environment this is simply a noop. +/// +/// # Returns +/// +/// The status of the restriction (whether it was fully, partially, or not-at-all enforced). +fn try_restrict(fs_exceptions: I) -> Result +where + I: IntoIterator, + P: AsRef, + A: Into>, +{ + let mut ruleset = + Ruleset::default().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?; + for (fs_path, access_bits) in fs_exceptions { + let paths = &[fs_path.as_ref().to_owned()]; + let mut rules = path_beneath_rules(paths, access_bits).peekable(); + if rules.peek().is_none() { + // `path_beneath_rules` silently ignores missing paths, so check for it manually. + return Err(Error::InvalidExceptionPath(fs_path.as_ref().to_owned())) + } + ruleset = ruleset.add_rules(rules)?; + } + let status = ruleset.restrict_self()?; + Ok(status.ruleset) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{fs, io::ErrorKind, thread}; + + #[test] + fn restricted_thread_cannot_read_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread cannot read from FS. + let handle = thread::spawn(|| { + // Create, write, and read two tmp files. This should succeed before any + // landlock restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + let s = fs::read_to_string(path1).unwrap(); + assert_eq!(s, TEXT); + fs::write(path2, TEXT).unwrap(); + let s = fs::read_to_string(path2).unwrap(); + assert_eq!(s, TEXT); + + // Apply Landlock with a read exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to read from both files, only tmpfile1 should succeed. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Ok(s) if s == TEXT + )); + let result = fs::read_to_string(path2); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to read from tmpfile1 after landlock, it should fail. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); + + assert!(handle.join().is_ok()); + } + + #[test] + fn restricted_thread_cannot_write_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread cannot write to FS. + let handle = thread::spawn(|| { + // Create and write two tmp files. This should succeed before any landlock + // restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + fs::write(path2, TEXT).unwrap(); + + // Apply Landlock with a write exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to write to both files, only tmpfile1 should succeed. + let result = fs::write(path1, TEXT); + assert!(matches!(result, Ok(_))); + let result = fs::write(path2, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to write to tmpfile1 after landlock, it should fail. + let result = fs::write(path1, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); + + assert!(handle.join().is_ok()); + } + + // Test that checks whether landlock under our ABI version is able to truncate files. + #[test] + fn restricted_thread_can_truncate_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread can truncate file. + let handle = thread::spawn(|| { + // Create and write a file. This should succeed before any landlock + // restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile = tempfile::NamedTempFile::new().unwrap(); + let path = tmpfile.path(); + + fs::write(path, TEXT).unwrap(); + + // Apply Landlock with all exceptions under the current ABI. + let status = try_restrict(vec![(path, AccessFs::from_all(LANDLOCK_ABI))]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to truncate the file. + let result = tmpfile.as_file().set_len(0); + assert!(result.is_ok()); + }); + + assert!(handle.join().is_ok()); + } +} diff --git a/polkadot/node/core/pvf/common/src/worker/security/mod.rs b/polkadot/node/core/pvf/common/src/worker/security/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9a38ed172773dff10f533434c1c928d8ed99868d --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/mod.rs @@ -0,0 +1,189 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality for securing workers. +//! +//! This is needed because workers are used to compile and execute untrusted code (PVFs). +//! +//! We currently employ the following security measures: +//! +//! - Restrict filesystem +//! - Use Landlock to remove all unnecessary FS access rights. +//! - Unshare the user and mount namespaces. +//! - Change the root directory to a worker-specific temporary directory. +//! - Restrict networking by blocking socket creation and io_uring. +//! - Remove env vars + +use crate::{worker::WorkerKind, LOG_TARGET}; + +#[cfg(target_os = "linux")] +pub mod landlock; + +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] +pub mod seccomp; + +/// Unshare the user namespace and change root to be the artifact directory. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +#[cfg(target_os = "linux")] +pub fn unshare_user_namespace_and_change_root( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &std::path::Path, +) -> Result<(), String> { + use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; + + // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 + macro_rules! cstr_ptr { + ($e:expr) => { + concat!($e, "\0").as_ptr().cast::() + }; + } + + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "unsharing the user namespace and calling pivot_root", + ); + + let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) + .expect("on unix; the path will never contain 0 bytes; qed"); + + // Wrapper around all the work to prevent repetitive error handling. + // + // # Errors + // + // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does + // not give the context of which call failed, so we return a &str error. + || -> Result<(), &'static str> { + // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps + // (2) and (3) are adapted from the example in pivot_root(2), with the additional + // change described in the `pivot_root(".", ".")` section. + unsafe { + // 1. `unshare` the user and the mount namespaces. + if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { + return Err("unshare user and mount namespaces") + } + + // 2. Setup mounts. + // + // Ensure that new root and its parent mount don't have shared propagation (which would + // cause pivot_root() to return an error), and prevent propagation of mount events to + // the initial mount namespace. + if libc::mount( + ptr::null(), + cstr_ptr!("/"), + ptr::null(), + libc::MS_REC | libc::MS_PRIVATE, + ptr::null(), + ) < 0 + { + return Err("mount MS_PRIVATE") + } + // Ensure that the new root is a mount point. + let additional_flags = + if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { + libc::MS_RDONLY + } else { + 0 + }; + if libc::mount( + worker_dir_path_c.as_ptr(), + worker_dir_path_c.as_ptr(), + ptr::null(), // ignored when MS_BIND is used + libc::MS_BIND | + libc::MS_REC | libc::MS_NOEXEC | + libc::MS_NODEV | libc::MS_NOSUID | + libc::MS_NOATIME | additional_flags, + ptr::null(), // ignored when MS_BIND is used + ) < 0 + { + return Err("mount MS_BIND") + } + + // 3. `pivot_root` to the artifact directory. + if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { + return Err("chdir to worker dir path") + } + if libc::syscall(libc::SYS_pivot_root, cstr_ptr!("."), cstr_ptr!(".")) < 0 { + return Err("pivot_root") + } + if libc::umount2(cstr_ptr!("."), libc::MNT_DETACH) < 0 { + return Err("umount the old root mount point") + } + } + + Ok(()) + }() + .map_err(|err_ctx| { + let err = std::io::Error::last_os_error(); + format!("{}: {}", err_ctx, err) + })?; + + // Do some assertions. + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected current dir after pivot_root to be `/`".into()) + } + env::set_current_dir("..").map_err(|err| err.to_string())?; + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected not to be able to break out of new root by doing `..`".into()) + } + + Ok(()) +} + +/// Require env vars to have been removed when spawning the process, to prevent malicious code from +/// accessing them. +pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "clearing env vars in worker", + ); + + let mut ok = true; + + for (key, value) in std::env::vars_os() { + // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of + // randomness for malicious code. In the future we can remove it also and log in the host; + // see . + if key == "RUST_LOG" { + continue + } + // An exception for MacOS. This is not a secure platform anyway, so we let it slide. + #[cfg(target_os = "macos")] + if key == "__CF_USER_TEXT_ENCODING" { + continue + } + + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?key, + ?value, + "env var was present that should have been removed", + ); + + ok = false; + } + + ok +} diff --git a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs new file mode 100644 index 0000000000000000000000000000000000000000..c3822d3c4c698834c4a92de5a9204398641e016f --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs @@ -0,0 +1,199 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality for sandboxing workers by restricting their capabilities by blocking certain +//! syscalls with seccomp. +//! +//! For security we block the following: +//! +//! - creation of new sockets - these are unneeded in PVF jobs, and we can safely block them without +//! affecting consensus. +//! +//! - `io_uring` - allows for networking and needs to be blocked. See below for a discussion on the +//! safety of doing this. +//! +//! # Safety of blocking io_uring +//! +//! `io_uring` is just a way of issuing system calls in an async manner, and there is nothing +//! stopping wasmtime from legitimately using it. Fortunately, at the moment it does not. Generally, +//! not many applications use `io_uring` in production yet, because of the numerous kernel CVEs +//! discovered. It's still under a lot of development. Android outright banned `io_uring` for these +//! reasons. +//! +//! Considering `io_uring`'s status discussed above, and that it very likely would get detected +//! either by our [static analysis](https://github.com/paritytech/polkadot-sdk/pull/1663) or by +//! testing, we think it is safe to block it. +//! +//! ## Consensus analysis +//! +//! If execution hits an edge case code path unique to a given machine, it's already taken a +//! non-deterministic branch anyway. After all, we just care that the majority of validators reach +//! the same result and preserve consensus. So worst-case scenario, there's a dispute, and we can +//! always admit fault and refund the wrong validator. On the other hand, if all validators take the +//! code path that results in a seccomp violation, then they would all vote against the current +//! candidate, which is also fine. The violation would get logged (in big scary letters) and +//! hopefully some validator reports it to us. +//! +//! Actually, a worst-worse-case scenario is that 50% of validators vote against, so that there is +//! no consensus. But so many things would have to go wrong for that to happen: +//! +//! 1. An update to `wasmtime` is introduced that uses io_uring (unlikely as io_uring is mainly for +//! IO-heavy applications) +//! +//! 2. The new syscall is not detected by our static analysis +//! +//! 3. It is never triggered in any of our tests +//! +//! 4. It then gets triggered on some super edge case in production on 50% of validators causing a +//! stall (bad but very unlikely) +//! +//! 5. Or, it triggers on only a few validators causing a dispute (more likely but not as bad) +//! +//! Considering how many things would have to go wrong here, we believe it's safe to block +//! `io_uring`. +//! +//! # Action on syscall violations +//! +//! When a forbidden syscall is attempted we immediately kill the process in order to prevent the +//! attacker from doing anything else. In execution, this will result in voting against the +//! candidate. + +use crate::{ + worker::{stringify_panic_payload, WorkerKind}, + LOG_TARGET, +}; +use seccompiler::*; +use std::{collections::BTreeMap, path::Path}; + +/// The action to take on caught syscalls. +#[cfg(not(test))] +const CAUGHT_ACTION: SeccompAction = SeccompAction::KillProcess; +/// Don't kill the process when testing. +#[cfg(test)] +const CAUGHT_ACTION: SeccompAction = SeccompAction::Errno(libc::EACCES as u32); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + Seccomp(#[from] seccompiler::Error), + #[error(transparent)] + Backend(#[from] seccompiler::BackendError), + #[error("A panic occurred in try_restrict: {0}")] + Panic(String), +} + +pub type Result = std::result::Result; + +/// Try to enable seccomp for the given kind of worker. +pub fn enable_for_worker( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &Path, +) -> Result<()> { + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "enabling seccomp", + ); + + try_restrict() +} + +/// Runs a check for seccomp and returns a single bool indicating whether seccomp with our rules is +/// fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> bool { + let status_from_thread: Result<()> = match std::thread::spawn(|| try_restrict()).join() { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err.into()), + Err(err) => Err(Error::Panic(stringify_panic_payload(err))), + }; + + matches!(status_from_thread, Ok(())) +} + +/// Applies a `seccomp` filter to disable networking for the PVF threads. +pub fn try_restrict() -> Result<()> { + // Build a `seccomp` filter which by default allows all syscalls except those blocked in the + // blacklist. + let mut blacklisted_rules = BTreeMap::default(); + + // Restrict the creation of sockets. + blacklisted_rules.insert(libc::SYS_socketpair, vec![]); + blacklisted_rules.insert(libc::SYS_socket, vec![]); + + // Prevent connecting to sockets for extra safety. + blacklisted_rules.insert(libc::SYS_connect, vec![]); + + // Restrict io_uring. + blacklisted_rules.insert(libc::SYS_io_uring_setup, vec![]); + blacklisted_rules.insert(libc::SYS_io_uring_enter, vec![]); + blacklisted_rules.insert(libc::SYS_io_uring_register, vec![]); + + let filter = SeccompFilter::new( + blacklisted_rules, + // Mismatch action: what to do if not in rule list. + SeccompAction::Allow, + // Match action: what to do if in rule list. + CAUGHT_ACTION, + TargetArch::x86_64, + )?; + + let bpf_prog: BpfProgram = filter.try_into()?; + + // Applies filter (runs seccomp) to the calling thread. + seccompiler::apply_filter(&bpf_prog)?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{io::ErrorKind, net::TcpListener, thread}; + + #[test] + fn sandboxed_thread_cannot_use_sockets() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + let handle = thread::spawn(|| { + // Open a socket, this should succeed before seccomp is applied. + TcpListener::bind("127.0.0.1:0").unwrap(); + + let status = try_restrict(); + if !matches!(status, Ok(())) { + panic!("Ruleset should be enforced since we checked if seccomp is enabled"); + } + + // Try to open a socket after seccomp. + assert!(matches!( + TcpListener::bind("127.0.0.1:0"), + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Other syscalls should still work. + unsafe { + assert!(libc::getppid() > 0); + } + }); + + assert!(handle.join().is_ok()); + } +} diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 23678d95696e7ec10f7ac9a69a6d433b5e9275bb..40e0ff4f0a195cc06c41f9fa4295ea4d42acb569 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -8,10 +8,10 @@ license.workspace = true [dependencies] cpu-time = "1.0.0" -futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } -rayon = "1.5.1" -tokio = { version = "1.24.2", features = ["fs", "process"] } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} +libc = "0.2.139" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -19,9 +19,5 @@ polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } - [features] builder = [] diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 02eaedb96f28e8f04e06d0f034c7657ddcb4620a..d82a8fca65d34c73c085b2ad2e2b87eb91fa3d47 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,33 +16,45 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::{executor_intf::Executor, worker_dir, SecurityStatus}; +pub use polkadot_node_core_pvf_common::{ + executor_intf::execute_artifact, worker_dir, SecurityStatus, +}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. const LOG_TARGET: &str = "parachain::pvf-execute-worker"; use cpu_time::ProcessTime; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, - executor_intf::NATIVE_STACK_MAX, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + WorkerKind, }, }; use polkadot_parachain_primitives::primitives::ValidationResult; +use polkadot_primitives::{executor_params::DEFAULT_NATIVE_STACK_MAX, ExecutorParams}; use std::{ + io::{self, Read}, os::unix::net::UnixStream, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::io; // Wasmtime powers the Substrate Executor. It compiles the wasm bytecode into native code. // That native code does not create any stacks and just reuses the stack of the thread that @@ -69,7 +81,7 @@ use tokio::io; // // Typically on Linux the main thread gets the stack size specified by the `ulimit` and // typically it's configured to 8 MiB. Rust's spawned threads are 2 MiB. OTOH, the -// NATIVE_STACK_MAX is set to 256 MiB. Not nearly enough. +// DEFAULT_NATIVE_STACK_MAX is set to 256 MiB. Not nearly enough. // // Hence we need to increase it. The simplest way to fix that is to spawn a thread with the desired // stack limit. @@ -78,7 +90,7 @@ use tokio::io; // // The default Rust thread stack limit 2 MiB + 256 MiB wasm stack. /// The stack size for the execute thread. -pub const EXECUTE_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024 + NATIVE_STACK_MAX as usize; +pub const EXECUTE_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024 + DEFAULT_NATIVE_STACK_MAX as usize; fn recv_handshake(stream: &mut UnixStream) -> io::Result { let handshake_enc = framed_recv_blocking(stream)?; @@ -103,7 +115,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { +fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { framed_send_blocking(stream, &response.encode()) } @@ -129,21 +141,18 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Execute, socket_path, worker_dir_path, node_version, worker_version, &security_status, - |mut stream, worker_dir_path| async move { - let worker_pid = std::process::id(); + |mut stream, worker_dir_path| { + let worker_pid = process::id(); let artifact_path = worker_dir::execute_artifact(&worker_dir_path); let Handshake { executor_params } = recv_handshake(&mut stream)?; - let executor = Executor::new(executor_params).map_err(|e| { - io::Error::new(io::ErrorKind::Other, format!("cannot create executor: {}", e)) - })?; loop { let (params, execution_timeout) = recv_request(&mut stream)?; @@ -158,7 +167,7 @@ pub fn worker_entrypoint( let compiled_artifact_blob = match std::fs::read(&artifact_path) { Ok(bytes) => bytes, Err(err) => { - let response = Response::InternalError( + let response = WorkerResponse::InternalError( InternalValidationError::CouldNotOpenFile(err.to_string()), ); send_response(&mut stream, response)?; @@ -166,89 +175,53 @@ pub fn worker_entrypoint( }, }; - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; + + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let response = internal_error_from_errno("getrusage before", errno); + send_response(&mut stream, response)?; + continue + }, + }; - let cpu_time_start = ProcessTime::now(); + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let response = match unsafe { nix::unistd::fork() } { + Err(errno) => internal_error_from_errno("fork", errno), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, + handle_child_process( + pipe_writer, + compiled_artifact_blob, + executor_params, + params, execution_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - let executor_2 = executor.clone(); - let execute_thread = thread::spawn_worker_thread_with_stack_size( - "execute thread", - move || { - validate_using_artifact( - &compiled_artifact_blob, - ¶ms, - executor_2, - cpu_time_start, ) }, - Arc::clone(&condvar), - WaitOutcome::Finished, - EXECUTE_THREAD_STACK_SIZE, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let response = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - execute_thread - .join() - .unwrap_or_else(|e| Response::Panic(stringify_panic_payload(e))) - }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "execute job took {}ms cpu time, exceeded execute timeout {}ms", - cpu_time_elapsed.as_millis(), - execution_timeout.as_millis(), - ); - Response::TimedOut - }, - Ok(None) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - "error communicating over finished channel".into(), - ), - ), - Err(e) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - stringify_panic_payload(e), - ), - ), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + worker_pid, + usage_before, + execution_timeout, + )? }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; - gum::trace!( - target: LOG_TARGET, - %worker_pid, - "worker: sending response to host: {:?}", - response - ); send_response(&mut stream, response)?; } }, @@ -257,29 +230,283 @@ pub fn worker_entrypoint( fn validate_using_artifact( compiled_artifact_blob: &[u8], + executor_params: &ExecutorParams, params: &[u8], - executor: Executor, - cpu_time_start: ProcessTime, -) -> Response { +) -> JobResponse { let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling // [`executor_intf::prepare`]. - executor.execute(compiled_artifact_blob, params) + execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return Response::format_invalid("execute", &err), + Err(err) => return JobResponse::format_invalid("execute", &err), Ok(d) => d, }; let result_descriptor = match ValidationResult::decode(&mut &descriptor_bytes[..]) { Err(err) => - return Response::format_invalid("validation result decoding failed", &err.to_string()), + return JobResponse::format_invalid( + "validation result decoding failed", + &err.to_string(), + ), Ok(r) => r, }; - // Include the decoding in the measured time, to prevent any potential attacks exploiting some - // bug in decoding. - let duration = cpu_time_start.elapsed(); + JobResponse::Ok { result_descriptor } +} + +/// This is used to handle child process during pvf execute worker. +/// It execute the artifact and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `compiled_artifact_blob`: The artifact bytes from compiled by the prepare worker`. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// - `params`: Validation parameters. +/// +/// - `execution_timeout`: The timeout in `Duration`. +/// +/// # Returns +/// +/// - pipe back `JobResponse` to the parent process. +fn handle_child_process( + mut pipe_write: PipeWriter, + compiled_artifact_blob: Vec, + executor_params: ExecutorParams, + params: Vec, + execution_timeout: Duration, +) -> ! { + gum::debug!( + target: LOG_TARGET, + worker_job_pid = %process::id(), + "worker job: executing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, execution_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let executor_params_2 = executor_params.clone(); + let execute_thread = thread::spawn_worker_thread_with_stack_size( + "execute thread", + move || validate_using_artifact(&compiled_artifact_blob, &executor_params_2, ¶ms), + Arc::clone(&condvar), + WaitOutcome::Finished, + EXECUTE_THREAD_STACK_SIZE, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let response = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + execute_thread.join().map_err(|e| JobError::Panic(stringify_panic_payload(e))) + }, + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(JobError::TimedOut), + Ok(None) => Err(JobError::CpuTimeMonitorThread( + "error communicating over finished channel".into(), + )), + Err(e) => Err(JobError::CpuTimeMonitorThread(stringify_panic_payload(e))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, response); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - The response, either `Ok` or some error state. +fn handle_parent_process( + mut pipe_read: PipeReader, + job_pid: Pid, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> io::Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + // Could not decode job response. There is either a bug or the job was hijacked. + // Should retry at any rate. + .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; + + let status = nix::sys::wait::waitpid(job_pid, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute worker received wait status from job: {:?}", + status, + ); + + let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + }; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute job took {}ms cpu time, exceeded execute timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Ok(WorkerResponse::JobTimedOut) + } + + match status { + Ok(WaitStatus::Exited(_, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = match recv_child_response(&mut reader) { + Ok(result) => result, + Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), + }; + + match result { + Ok(JobResponse::Ok { result_descriptor }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Ok(WorkerResponse::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + }, + Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Err(job_error) => { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute job error: {}", + job_error, + ); + if matches!(job_error, JobError::TimedOut) { + Ok(WorkerResponse::JobTimedOut) + } else { + Ok(WorkerResponse::JobError(job_error.to_string())) + } + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(WorkerResponse::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), + Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + + // It is within an attacker's power to send an unexpected exit status. So we cannot treat + // this as an internal error (which would make us abstain), but must vote against. + Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("execute pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response, or error. +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} - Response::Ok { result_descriptor, duration } +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { + WorkerResponse::InternalError(InternalValidationError::Kernel(format!( + "{}: {}: {}", + context, + errno, + io::Error::last_os_error() + ))) } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 886209b78c329f062e674a96992509d0390a4220..005f2e935117a329f817aac89c0b8f712c4e57c1 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -7,33 +7,42 @@ edition.workspace = true license.workspace = true [dependencies] +blake3 = "1.5" cfg-if = "1.0" -futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" rayon = "1.5.1" +tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } -tokio = { version = "1.24.2", features = ["fs", "process"] } +tikv-jemallocator = { version = "0.5.0", optional = true } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } -polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } -sc-executor = { path = "../../../../../substrate/client/executor" } sc-executor-common = { path = "../../../../../substrate/client/executor/common" } sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } -sp-io = { path = "../../../../../substrate/primitives/io" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] +tikv-jemallocator = "0.5.0" tikv-jemalloc-ctl = "0.5.0" [features] builder = [] jemalloc-allocator = [ "dep:tikv-jemalloc-ctl", + "dep:tikv-jemallocator", "polkadot-node-core-pvf-common/jemalloc-allocator", ] + +[dev-dependencies] +criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } +rococo-runtime = { path = "../../../../runtime/rococo" } +sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } + +[[bench]] +name = "prepare_rococo_runtime" +harness = false diff --git a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..ba2568cd80cc634c16e43065a9ffd1fd31137e6c --- /dev/null +++ b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use criterion::{criterion_group, criterion_main, Criterion, SamplingMode}; +use polkadot_node_core_pvf_common::{ + executor_intf::{prepare, prevalidate}, + prepare::PrepareJobKind, + pvf::PvfPrepData, +}; +use polkadot_primitives::ExecutorParams; +use std::time::Duration; + +fn do_prepare_runtime(pvf: PvfPrepData) { + let blob = match prevalidate(&pvf.code()) { + Err(err) => panic!("{:?}", err), + Ok(b) => b, + }; + + match prepare(blob, &pvf.executor_params()) { + Ok(_) => (), + Err(err) => panic!("{:?}", err), + } +} + +fn prepare_rococo_runtime(c: &mut Criterion) { + let blob = rococo_runtime::WASM_BINARY.unwrap(); + let pvf = match sp_maybe_compressed_blob::decompress(&blob, 64 * 1024 * 1024) { + Ok(code) => PvfPrepData::from_code( + code.into_owned(), + ExecutorParams::default(), + Duration::from_secs(360), + PrepareJobKind::Compilation, + ), + Err(e) => { + panic!("Cannot decompress blob: {:?}", e); + }, + }; + + let mut group = c.benchmark_group("rococo"); + group.sampling_mode(SamplingMode::Flat); + group.sample_size(20); + group.measurement_time(Duration::from_secs(240)); + group.bench_function("prepare Rococo runtime", |b| { + // `PvfPrepData` is designed to be cheap to clone, so cloning shouldn't affect the + // benchmark accuracy + b.iter(|| do_prepare_runtime(pvf.clone())) + }); + group.finish(); +} + +criterion_group!(preparation, prepare_rococo_runtime); +criterion_main!(preparation); diff --git a/polkadot/node/core/pvf/prepare-worker/src/executor_intf.rs b/polkadot/node/core/pvf/prepare-worker/src/executor_intf.rs deleted file mode 100644 index 1f88f6a6dd6e9a07b99c4832bd3f4dda1a71cf20..0000000000000000000000000000000000000000 --- a/polkadot/node/core/pvf/prepare-worker/src/executor_intf.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Interface to the Substrate Executor - -use polkadot_node_core_pvf_common::executor_intf::params_to_wasmtime_semantics; -use polkadot_primitives::ExecutorParams; -use sc_executor_common::runtime_blob::RuntimeBlob; - -/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. -pub fn prevalidate(code: &[u8]) -> Result { - let blob = RuntimeBlob::new(code)?; - // It's assumed this function will take care of any prevalidation logic - // that needs to be done. - // - // Do nothing for now. - Ok(blob) -} - -/// Runs preparation on the given runtime blob. If successful, it returns a serialized compiled -/// artifact which can then be used to pass into `Executor::execute` after writing it to the disk. -pub fn prepare( - blob: RuntimeBlob, - executor_params: &ExecutorParams, -) -> Result, sc_executor_common::error::WasmError> { - let semantics = params_to_wasmtime_semantics(executor_params) - .map_err(|e| sc_executor_common::error::WasmError::Other(e))?; - sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) -} diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index fcc7f6754a7e2e8236dcc73b9d0a978814acc492..499e87c6e207998f9fcf583f37b243ccea1a7921 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -16,10 +16,9 @@ //! Contains the logic for preparing PVFs. Used by the polkadot-prepare-worker binary. -mod executor_intf; mod memory_stats; -pub use executor_intf::{prepare, prevalidate}; +use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-prepare-worker=trace`. @@ -29,30 +28,56 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; +use libc; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, - executor_intf::Executor, + error::{PrepareError, PrepareWorkerResult}, + executor_intf::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, - prepare::{MemoryStats, PrepareJobKind, PrepareStats}, + prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, - thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, + thread::{self, spawn_worker_thread, WaitOutcome}, + WorkerKind, }, worker_dir, ProcessTime, SecurityStatus, }; use polkadot_primitives::ExecutorParams; use std::{ - os::unix::net::UnixStream, + fs, + io::{self, Read}, + os::{ + fd::{AsRawFd, RawFd}, + unix::net::UnixStream, + }, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::io; +use tracking_allocator::TrackingAllocator; + +#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] +#[global_allocator] +static ALLOC: TrackingAllocator = + TrackingAllocator(tikv_jemallocator::Jemalloc); + +#[cfg(not(any(target_os = "linux", feature = "jemalloc-allocator")))] +#[global_allocator] +static ALLOC: TrackingAllocator = TrackingAllocator(std::alloc::System); /// Contains the bytes for a successfully compiled artifact. +#[derive(Encode, Decode)] pub struct CompiledArtifact(Vec); impl CompiledArtifact { @@ -68,6 +93,7 @@ impl AsRef<[u8]> for CompiledArtifact { } } +/// Get a worker request. fn recv_request(stream: &mut UnixStream) -> io::Result { let pvf = framed_recv_blocking(stream)?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { @@ -79,10 +105,53 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } -fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { +/// Send a worker response. +fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> { framed_send_blocking(stream, &result.encode()) } +fn start_memory_tracking(fd: RawFd, limit: Option) { + unsafe { + // SAFETY: Inside the failure handler, the allocator is locked and no allocations or + // deallocations are possible. For Linux, that always holds for the code below, so it's + // safe. For MacOS, that technically holds at the time of writing, but there are no future + // guarantees. + // The arguments of unsafe `libc` calls are valid, the payload validity is covered with + // a test. + ALLOC.start_tracking( + limit, + Some(Box::new(move || { + #[cfg(target_os = "linux")] + { + // Syscalls never allocate or deallocate, so this is safe. + libc::syscall(libc::SYS_write, fd, OOM_PAYLOAD.as_ptr(), OOM_PAYLOAD.len()); + libc::syscall(libc::SYS_close, fd); + // Make sure we exit from all threads. Copied from glibc. + libc::syscall(libc::SYS_exit_group, 1); + loop { + libc::syscall(libc::SYS_exit, 1); + } + } + #[cfg(not(target_os = "linux"))] + { + // Syscalls are not available on MacOS, so we have to use `libc` wrappers. + // Technically, there may be allocations inside, although they shouldn't be + // there. In that case, we'll see deadlocks on MacOS after the OOM condition + // triggered. As we consider running a validator on MacOS unsafe, and this + // code is only run by a validator, it's a lesser evil. + libc::write(fd, OOM_PAYLOAD.as_ptr().cast(), OOM_PAYLOAD.len()); + libc::close(fd); + libc::_exit(1); + } + })), + ); + } +} + +fn end_memory_tracking() -> isize { + ALLOC.end_tracking() +} + /// The entrypoint that the spawned prepare worker should start with. /// /// # Parameters @@ -105,18 +174,20 @@ fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<( /// /// 1. Get the code and parameters for preparation from the host. /// -/// 2. Start a memory tracker in a separate thread. +/// 2. Start a new child process /// -/// 3. Start the CPU time monitor loop and the actual preparation in two separate threads. +/// 3. Start the memory tracker and the actual preparation in two separate threads. /// /// 4. Wait on the two threads created in step 3. /// /// 5. Stop the memory tracker and get the stats. /// -/// 6. If compilation succeeded, write the compiled artifact into a temporary file. +/// 6. Pipe the result back to the parent process and exit from child process. /// -/// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we -/// send that in the `PrepareResult`. +/// 7. If compilation succeeded, write the compiled artifact into a temporary file. +/// +/// 8. Send the result of preparation back to the host, including the checksum of the artifact. If +/// any error occurred in the above steps, we send that in the `PrepareWorkerResult`. pub fn worker_entrypoint( socket_path: PathBuf, worker_dir_path: PathBuf, @@ -124,15 +195,15 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Prepare, socket_path, worker_dir_path, node_version, worker_version, &security_status, - |mut stream, worker_dir_path| async move { - let worker_pid = std::process::id(); + |mut stream, worker_dir_path| { + let worker_pid = process::id(); let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); loop { @@ -145,151 +216,60 @@ pub fn worker_entrypoint( let preparation_timeout = pvf.prep_timeout(); let prepare_job_kind = pvf.prep_kind(); - let executor_params = (*pvf.executor_params()).clone(); - - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); - - // Run the memory tracker in a regular, non-worker thread. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let condvar_memory = Arc::clone(&condvar); - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); - - let cpu_time_start = ProcessTime::now(); - - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, - preparation_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - // Spawn another thread for preparation. - let prepare_thread = thread::spawn_worker_thread( - "prepare thread", - move || { - #[allow(unused_mut)] - let mut result = prepare_artifact(pvf, cpu_time_start); - - // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. - #[cfg(target_os = "linux")] - let mut result = result - .map(|(artifact, elapsed)| (artifact, elapsed, get_max_rss_thread())); - - // If we are pre-checking, check for runtime construction errors. - // - // As pre-checking is more strict than just preparation in terms of memory - // and time, it is okay to do extra checks here. This takes negligible time - // anyway. - if let PrepareJobKind::Prechecking = prepare_job_kind { - result = result.and_then(|output| { - runtime_construction_check(output.0.as_ref(), executor_params)?; - Ok(output) - }); - } + let executor_params = pvf.executor_params(); + + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - result + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let result = Err(error_from_errno("getrusage before", errno)); + send_response(&mut stream, result)?; + continue }, - Arc::clone(&condvar), - WaitOutcome::Finished, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let result = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - - match prepare_thread.join().unwrap_or_else(|err| { - Err(PrepareError::Panic(stringify_panic_payload(err))) - }) { - Err(err) => { - // Serialized error will be written into the socket. - Err(err) - }, - Ok(ok) => { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let (artifact, cpu_time_elapsed, max_rss) = ok; - } else { - let (artifact, cpu_time_elapsed) = ok; - } - } - - // Stop the memory stats worker and get its observed memory stats. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid) - .await; - let memory_stats = MemoryStats { - #[cfg(any( - target_os = "linux", - feature = "jemalloc-allocator" - ))] - memory_tracker_stats, - #[cfg(target_os = "linux")] - max_rss: extract_max_rss_stat(max_rss, worker_pid), - }; - - // Write the serialized artifact into a temp file. - // - // PVF host only keeps artifacts statuses in its memory, - // successfully compiled code gets stored on the disk (and - // consequently deserialized by execute-workers). The prepare worker - // is only required to send `Ok` to the pool to indicate the - // success. - - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "worker: writing artifact to {}", - temp_artifact_dest.display(), - ); - tokio::fs::write(&temp_artifact_dest, &artifact).await?; - - Ok(PrepareStats { cpu_time_elapsed, memory_stats }) - }, - } + }; + + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let result = match unsafe { nix::unistd::fork() } { + Err(errno) => Err(error_from_errno("fork", errno)), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); + + handle_child_process( + pvf, + pipe_writer, + preparation_timeout, + prepare_job_kind, + executor_params, + ) }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", - cpu_time_elapsed.as_millis(), - preparation_timeout.as_millis(), - ); - Err(PrepareError::TimedOut) - }, - Ok(None) => Err(PrepareError::IoErr( - "error communicating over closed channel".into(), - )), - // Errors in this thread are independent of the PVF. - Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + worker_pid, + child, + temp_artifact_dest.clone(), + usage_before, + preparation_timeout, + ) }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( target: LOG_TARGET, %worker_pid, - "worker: sending response to host: {:?}", + "worker: sending result to host: {:?}", result ); send_response(&mut stream, result)?; @@ -298,10 +278,7 @@ pub fn worker_entrypoint( ); } -fn prepare_artifact( - pvf: PvfPrepData, - cpu_time_start: ProcessTime, -) -> Result<(CompiledArtifact, Duration), PrepareError> { +fn prepare_artifact(pvf: PvfPrepData) -> Result { let blob = match prevalidate(&pvf.code()) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, @@ -311,20 +288,392 @@ fn prepare_artifact( Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } - .map(|artifact| (artifact, cpu_time_start.elapsed())) } /// Try constructing the runtime to catch any instantiation errors during pre-checking. fn runtime_construction_check( artifact_bytes: &[u8], - executor_params: ExecutorParams, + executor_params: &ExecutorParams, ) -> Result<(), PrepareError> { - let executor = Executor::new(executor_params) - .map_err(|e| PrepareError::RuntimeConstruction(format!("cannot create executor: {}", e)))?; - // SAFETY: We just compiled this artifact. - let result = unsafe { executor.create_runtime_from_bytes(&artifact_bytes) }; + let result = unsafe { create_runtime_from_artifact_bytes(artifact_bytes, executor_params) }; result .map(|_runtime| ()) .map_err(|err| PrepareError::RuntimeConstruction(format!("{:?}", err))) } + +#[derive(Encode, Decode)] +struct JobResponse { + artifact: CompiledArtifact, + memory_stats: MemoryStats, +} + +/// This is used to handle child process during pvf prepare worker. +/// It prepares the artifact and tracks memory stats during preparation +/// and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pvf`: `PvfPrepData` structure, containing data to prepare the artifact +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `preparation_timeout`: The timeout in `Duration`. +/// +/// - `prepare_job_kind`: The kind of prepare job. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// # Returns +/// +/// - If any error occur, pipe response back with `PrepareError`. +/// +/// - If success, pipe back `JobResponse`. +fn handle_child_process( + pvf: PvfPrepData, + mut pipe_write: PipeWriter, + preparation_timeout: Duration, + prepare_job_kind: PrepareJobKind, + executor_params: Arc, +) -> ! { + let worker_job_pid = process::id(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + ?prepare_job_kind, + ?preparation_timeout, + "worker job: preparing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + + // Run the memory tracker in a regular, non-worker thread. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let condvar_memory = Arc::clone(&condvar); + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + + start_memory_tracking( + pipe_write.as_raw_fd(), + executor_params.prechecking_max_memory().map(|v| { + v.try_into().unwrap_or_else(|_| { + gum::warn!( + LOG_TARGET, + %worker_job_pid, + "Illegal pre-checking max memory value {} discarded", + v, + ); + 0 + }) + }), + ); + + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, preparation_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let prepare_thread = spawn_worker_thread( + "prepare worker", + move || { + #[allow(unused_mut)] + let mut result = prepare_artifact(pvf); + + // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. + #[cfg(target_os = "linux")] + let mut result = result.map(|artifact| (artifact, get_max_rss_thread())); + + // If we are pre-checking, check for runtime construction errors. + // + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. + if let PrepareJobKind::Prechecking = prepare_job_kind { + result = result.and_then(|output| { + runtime_construction_check(output.0.as_ref(), &executor_params)?; + Ok(output) + }); + } + result + }, + Arc::clone(&condvar), + WaitOutcome::Finished, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let peak_alloc = { + let peak = end_memory_tracking(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + "prepare job peak allocation is {} bytes", + peak, + ); + peak + }; + + let result = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + + match prepare_thread.join().unwrap_or_else(|err| { + send_child_response( + &mut pipe_write, + Err(PrepareError::JobError(stringify_panic_payload(err))), + ) + }) { + Err(err) => Err(err), + Ok(ok) => { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let (artifact, max_rss) = ok; + } else { + let artifact = ok; + } + } + + // Stop the memory stats worker and get its observed memory stats. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, process::id()); + + let memory_stats = MemoryStats { + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + memory_tracker_stats, + #[cfg(target_os = "linux")] + max_rss: extract_max_rss_stat(max_rss, process::id()), + // Negative peak allocation values are legit; they are narrow + // corner cases and shouldn't affect overall statistics + // significantly + peak_tracked_alloc: if peak_alloc > 0 { peak_alloc as u64 } else { 0u64 }, + }; + + Ok(JobResponse { artifact, memory_stats }) + }, + } + }, + + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(PrepareError::TimedOut), + Ok(None) => Err(PrepareError::IoErr("error communicating over closed channel".into())), + Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, result); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `temp_artifact_dest`: The destination `PathBuf` to write the temporary artifact file. +/// +/// - `worker_pid`: The PID of the child process. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - If the child send response without an error, this function returns `Ok(PrepareStats)` +/// containing memory and CPU usage statistics. +/// +/// - If the child send response with an error, it returns a `PrepareError` with that error. +/// +/// - If the child process timeout, it returns `PrepareError::TimedOut`. +fn handle_parent_process( + mut pipe_read: PipeReader, + worker_pid: u32, + job_pid: Pid, + temp_artifact_dest: PathBuf, + usage_before: Usage, + timeout: Duration, +) -> Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + .map_err(|err| PrepareError::IoErr(err.to_string()))?; + + let status = nix::sys::wait::waitpid(job_pid, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "prepare worker received wait status from job: {:?}", + status, + ); + + let usage_after = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| error_from_errno("getrusage after", errno))?; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Err(PrepareError::TimedOut) + } + + match status { + Ok(WaitStatus::Exited(_pid, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = recv_child_response(&mut reader) + .map_err(|err| PrepareError::JobError(err.to_string()))?; + + match result { + Err(err) => Err(err), + Ok(JobResponse { artifact, memory_stats }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Err(PrepareError::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + // Write the serialized artifact into a temp file. + // + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. + gum::debug!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "worker: writing artifact to {}", + temp_artifact_dest.display(), + ); + // Write to the temp file created by the host. + if let Err(err) = fs::write(&temp_artifact_dest, &artifact) { + return Err(PrepareError::IoErr(err.to_string())) + }; + + let checksum = blake3::hash(&artifact.as_ref()).to_hex().to_string(); + Ok(PrepareWorkerSuccess { + checksum, + stats: PrepareStats { memory_stats, cpu_time_elapsed: cpu_tv }, + }) + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Err(PrepareError::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), + Err(errno) => Err(error_from_errno("waitpid", errno)), + + // An attacker can make the child process return any exit status it wants. So we can treat + // all unexpected cases the same way. + Ok(unexpected_wait_status) => Err(PrepareError::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("prepare pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write a job response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} + +fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { + PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) +} + +type JobResult = Result; + +/// Pre-encoded length-prefixed `JobResult::Err(PrepareError::OutOfMemory)` +const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; + +#[test] +fn pre_encoded_payloads() { + // NOTE: This must match the type of `response` in `send_child_response`. + let oom_unencoded: JobResult = JobResult::Err(PrepareError::OutOfMemory); + let oom_encoded = oom_unencoded.encode(); + // The payload is prefixed with its length in `framed_send`. + let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec(); + oom_payload.extend(oom_encoded); + assert_eq!(oom_payload, OOM_PAYLOAD); +} diff --git a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs index c70ff56fc84dbb00439d0c461179425809749739..5f577b0901c252159aed9035c9f2b0cd7765d3c6 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -122,7 +122,7 @@ pub mod memory_tracker { } /// Helper function to get the stats from the memory tracker. Helps isolate this error handling. - pub async fn get_memory_tracker_loop_stats( + pub fn get_memory_tracker_loop_stats( thread: JoinHandle>, worker_pid: u32, ) -> Option { diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 5a1767af75b778394b31d03f226e016006f3d042..79b53467b4e3305e2148d990b829ee9e77f98a48 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -16,10 +16,10 @@ //! PVF artifacts (final compiled code blobs). //! -//! # Lifecycle of an artifact +//! # Lifecycle of an artifact //! -//! 1. During node start-up, the artifacts cache is cleaned up. This means that all local artifacts -//! stored on-disk are cleared, and we start with an empty [`Artifacts`] table. +//! 1. During node start-up, we will check the cached artifacts, if any. The stale and corrupted +//! ones are pruned. The valid ones are registered in the [`Artifacts`] table. //! //! 2. In order to be executed, a PVF should be prepared first. This means that artifacts should //! have an [`ArtifactState::Prepared`] entry for that artifact in the table. If not, the @@ -55,17 +55,29 @@ //! older by a predefined parameter. This process is run very rarely (say, once a day). Once the //! artifact is expired it is removed from disk eagerly atomically. -use crate::host::PrepareResultSender; +use crate::{host::PrecheckResultSender, LOG_TARGET}; use always_assert::always; -use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; +use polkadot_core_primitives::Hash; +use polkadot_node_core_pvf_common::{ + error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData, RUNTIME_VERSION, +}; +use polkadot_node_primitives::NODE_VERSION; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsHash; use std::{ collections::HashMap, path::{Path, PathBuf}, + str::FromStr as _, time::{Duration, SystemTime}, }; +const RUNTIME_PREFIX: &str = "wasmtime_v"; +const NODE_PREFIX: &str = "polkadot_v"; + +fn artifact_prefix() -> String { + format!("{}{}_{}{}", RUNTIME_PREFIX, RUNTIME_VERSION, NODE_PREFIX, NODE_VERSION) +} + /// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { @@ -74,8 +86,6 @@ pub struct ArtifactId { } impl ArtifactId { - const PREFIX: &'static str = "wasmtime_"; - /// Creates a new artifact ID with the given hash. pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { Self { code_hash, executor_params_hash } @@ -86,26 +96,33 @@ impl ArtifactId { Self::new(pvf.code_hash(), pvf.executor_params().hash()) } - /// Tries to recover the artifact id from the given file name. - #[cfg(test)] - pub fn from_file_name(file_name: &str) -> Option { - use polkadot_core_primitives::Hash; - use std::str::FromStr as _; - - let file_name = file_name.strip_prefix(Self::PREFIX)?; - let (code_hash_str, executor_params_hash_str) = file_name.split_once('_')?; - let code_hash = Hash::from_str(code_hash_str).ok()?.into(); - let executor_params_hash = - ExecutorParamsHash::from_hash(Hash::from_str(executor_params_hash_str).ok()?); - - Some(Self { code_hash, executor_params_hash }) + /// Returns the canonical path to the concluded artifact. + pub(crate) fn path(&self, cache_path: &Path, checksum: &str) -> PathBuf { + let file_name = format!( + "{}_{:#x}_{:#x}_0x{}", + artifact_prefix(), + self.code_hash, + self.executor_params_hash, + checksum + ); + cache_path.join(file_name) } - /// Returns the expected path to this artifact given the root of the cache. - pub fn path(&self, cache_path: &Path) -> PathBuf { - let file_name = - format!("{}{:#x}_{:#x}", Self::PREFIX, self.code_hash, self.executor_params_hash); - cache_path.join(file_name) + /// Tries to recover the artifact id from the given file name. + /// Return `None` if the given file name is invalid. + /// VALID_NAME := _ _ _ + fn from_file_name(file_name: &str) -> Option { + let file_name = file_name.strip_prefix(&artifact_prefix())?.strip_prefix('_')?; + let parts: Vec<&str> = file_name.split('_').collect(); + + if let [code_hash, param_hash, _checksum] = parts[..] { + let code_hash = Hash::from_str(code_hash).ok()?.into(); + let executor_params_hash = + ExecutorParamsHash::from_hash(Hash::from_str(param_hash).ok()?); + return Some(Self { code_hash, executor_params_hash }) + } + + None } } @@ -123,17 +140,20 @@ pub struct ArtifactPathId { } impl ArtifactPathId { - pub(crate) fn new(artifact_id: ArtifactId, cache_path: &Path) -> Self { - Self { path: artifact_id.path(cache_path), id: artifact_id } + pub(crate) fn new(artifact_id: ArtifactId, path: &Path) -> Self { + Self { id: artifact_id, path: path.to_owned() } } } +#[derive(Debug)] pub enum ArtifactState { /// The artifact is ready to be used by the executor. /// /// That means that the artifact should be accessible through the path obtained by the artifact /// id (unless, it was removed externally). Prepared { + /// The path of the compiled artifact. + path: PathBuf, /// The time when the artifact was last needed. /// /// This is updated when we get the heads up for this artifact or when we just discover @@ -145,7 +165,7 @@ pub enum ArtifactState { /// A task to prepare this artifact is scheduled. Preparing { /// List of result senders that are waiting for a response. - waiting_for_response: Vec, + waiting_for_response: Vec, /// The number of times this artifact has failed to prepare. num_failures: u32, }, @@ -163,32 +183,148 @@ pub enum ArtifactState { /// A container of all known artifact ids and their states. pub struct Artifacts { - artifacts: HashMap, + inner: HashMap, } impl Artifacts { - /// Initialize a blank cache at the given path. This will clear everything present at the - /// given path, to be populated over time. - /// - /// The recognized artifacts will be filled in the table and unrecognized will be removed. - pub async fn new(cache_path: &Path) -> Self { - // First delete the entire cache. This includes artifacts and any leftover worker dirs (see - // [`WorkerDir`]). Nodes are long-running so this should populate shortly. - let _ = tokio::fs::remove_dir_all(cache_path).await; + #[cfg(test)] + pub(crate) fn empty() -> Self { + Self { inner: HashMap::new() } + } + + #[cfg(test)] + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + /// Create an empty table and populate it with valid artifacts as [`ArtifactState::Prepared`], + /// if any. The existing caches will be checked by their file name to determine whether they are + /// valid, e.g., matching the current node version. The ones deemed invalid will be pruned. + pub async fn new_and_prune(cache_path: &Path) -> Self { + let mut artifacts = Self { inner: HashMap::new() }; + artifacts.insert_and_prune(cache_path).await; + artifacts + } + + async fn insert_and_prune(&mut self, cache_path: &Path) { + async fn is_corrupted(path: &Path) -> bool { + let checksum = match tokio::fs::read(path).await { + Ok(bytes) => blake3::hash(&bytes), + Err(err) => { + // just remove the file if we cannot read it + gum::warn!( + target: LOG_TARGET, + ?err, + "unable to read artifact {:?} when checking integrity, removing...", + path, + ); + return true + }, + }; + + if let Some(file_name) = path.file_name() { + if let Some(file_name) = file_name.to_str() { + return !file_name.ends_with(checksum.to_hex().as_str()) + } + } + true + } + + // Insert the entry into the artifacts table if it is valid. + // Otherwise, prune it. + async fn insert_or_prune( + artifacts: &mut Artifacts, + entry: &tokio::fs::DirEntry, + cache_path: &Path, + ) { + let file_type = entry.file_type().await; + let file_name = entry.file_name(); + + match file_type { + Ok(file_type) => + if !file_type.is_file() { + return + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?err, + "unable to get file type for {:?}", + file_name, + ); + return + }, + } + + if let Some(file_name) = file_name.to_str() { + let id = ArtifactId::from_file_name(file_name); + let path = cache_path.join(file_name); + + if id.is_none() || is_corrupted(&path).await { + gum::warn!( + target: LOG_TARGET, + "discarding invalid artifact {:?}", + &path, + ); + let _ = tokio::fs::remove_file(&path).await; + return + } + + if let Some(id) = id { + gum::debug!( + target: LOG_TARGET, + "reusing existing {:?} for node version v{}", + &path, + NODE_VERSION, + ); + artifacts.insert_prepared(id, path, SystemTime::now(), Default::default()); + } + } else { + gum::warn!( + target: LOG_TARGET, + "non-Unicode file name {:?} found in {:?}", + file_name, + cache_path, + ); + } + } + // Make sure that the cache path directory and all its parents are created. let _ = tokio::fs::create_dir_all(cache_path).await; - Self { artifacts: HashMap::new() } - } + let mut dir = match tokio::fs::read_dir(cache_path).await { + Ok(dir) => dir, + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "failed to read dir {:?}", + cache_path, + ); + return + }, + }; - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { artifacts: HashMap::new() } + loop { + match dir.next_entry().await { + Ok(Some(entry)) => insert_or_prune(self, &entry, cache_path).await, + Ok(None) => break, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?err, + "error processing artifacts in {:?}", + cache_path, + ); + break + }, + } + } } /// Returns the state of the given artifact by its ID. pub fn artifact_state_mut(&mut self, artifact_id: &ArtifactId) -> Option<&mut ArtifactState> { - self.artifacts.get_mut(artifact_id) + self.inner.get_mut(artifact_id) } /// Inform the table about the artifact with the given ID. The state will be set to "preparing". @@ -198,53 +334,52 @@ impl Artifacts { pub fn insert_preparing( &mut self, artifact_id: ArtifactId, - waiting_for_response: Vec, + waiting_for_response: Vec, ) { // See the precondition. always!(self - .artifacts + .inner .insert(artifact_id, ArtifactState::Preparing { waiting_for_response, num_failures: 0 }) .is_none()); } /// Insert an artifact with the given ID as "prepared". /// - /// This function must be used only for brand-new artifacts and should never be used for - /// replacing existing ones. - #[cfg(test)] - pub fn insert_prepared( + /// This function should only be used to build the artifact table at startup with valid + /// artifact caches. + pub(crate) fn insert_prepared( &mut self, artifact_id: ArtifactId, + path: PathBuf, last_time_needed: SystemTime, prepare_stats: PrepareStats, ) { // See the precondition. always!(self - .artifacts - .insert(artifact_id, ArtifactState::Prepared { last_time_needed, prepare_stats }) + .inner + .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, prepare_stats }) .is_none()); } - /// Remove and retrieve the artifacts from the table that are older than the supplied - /// Time-To-Live. - pub fn prune(&mut self, artifact_ttl: Duration) -> Vec { + /// Remove artifacts older than the given TTL and return id and path of the removed ones. + pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); let mut to_remove = vec![]; - for (k, v) in self.artifacts.iter() { - if let ArtifactState::Prepared { last_time_needed, .. } = *v { + for (k, v) in self.inner.iter() { + if let ArtifactState::Prepared { last_time_needed, ref path, .. } = *v { if now .duration_since(last_time_needed) .map(|age| age > artifact_ttl) .unwrap_or(false) { - to_remove.push(k.clone()); + to_remove.push((k.clone(), path.clone())); } } } for artifact in &to_remove { - self.artifacts.remove(artifact); + self.inner.remove(&artifact.0); } to_remove @@ -253,20 +388,87 @@ impl Artifacts { #[cfg(test)] mod tests { - use super::{ArtifactId, Artifacts}; + use super::{artifact_prefix as prefix, ArtifactId, Artifacts, NODE_VERSION, RUNTIME_VERSION}; use polkadot_primitives::ExecutorParamsHash; + use rand::Rng; use sp_core::H256; - use std::{path::Path, str::FromStr}; + use std::{ + fs, + io::Write, + path::{Path, PathBuf}, + str::FromStr, + }; + + fn rand_hash(len: usize) -> String { + let mut rng = rand::thread_rng(); + let hex: Vec<_> = "0123456789abcdef".chars().collect(); + (0..len).map(|_| hex[rng.gen_range(0..hex.len())]).collect() + } + + fn file_name(code_hash: &str, param_hash: &str, checksum: &str) -> String { + format!("{}_0x{}_0x{}_0x{}", prefix(), code_hash, param_hash, checksum) + } + + fn create_artifact( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> (PathBuf, String) { + fn artifact_path_without_checksum( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> PathBuf { + let mut path = dir.as_ref().to_path_buf(); + let file_name = + format!("{}_0x{}_0x{}", prefix, code_hash.as_ref(), params_hash.as_ref(),); + path.push(file_name); + path + } + + let (code_hash, params_hash) = (code_hash.as_ref(), params_hash.as_ref()); + let path = artifact_path_without_checksum(dir, prefix, code_hash, params_hash); + let mut file = fs::File::create(&path).unwrap(); + + let content = format!("{}{}", code_hash, params_hash).into_bytes(); + file.write_all(&content).unwrap(); + let checksum = blake3::hash(&content).to_hex().to_string(); + + (path, checksum) + } + + fn create_rand_artifact(dir: impl AsRef, prefix: &str) -> (PathBuf, String) { + create_artifact(dir, prefix, rand_hash(64), rand_hash(64)) + } + + fn concluded_path(path: impl AsRef, checksum: &str) -> PathBuf { + let path = path.as_ref(); + let mut file_name = path.file_name().unwrap().to_os_string(); + file_name.push("_0x"); + file_name.push(checksum); + path.with_file_name(file_name) + } + + #[test] + fn artifact_prefix() { + assert_eq!(prefix(), format!("wasmtime_v{}_polkadot_v{}", RUNTIME_VERSION, NODE_VERSION)); + } #[test] fn from_file_name() { assert!(ArtifactId::from_file_name("").is_none()); assert!(ArtifactId::from_file_name("junk").is_none()); + let file_name = file_name( + "0022800000000000000000000000000000000000000000000000000000000000", + "0033900000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000", + ); + assert_eq!( - ArtifactId::from_file_name( - "wasmtime_0x0022800000000000000000000000000000000000000000000000000000000000_0x0033900000000000000000000000000000000000000000000000000000000000" - ), + ArtifactId::from_file_name(&file_name), Some(ArtifactId::new( hex_literal::hex![ "0022800000000000000000000000000000000000000000000000000000000000" @@ -281,40 +483,56 @@ mod tests { #[test] fn path() { - let path = Path::new("/test"); - let hash = - H256::from_str("1234567890123456789012345678901234567890123456789012345678901234") - .unwrap(); - - assert_eq!( - ArtifactId::new(hash.into(), ExecutorParamsHash::from_hash(hash)).path(path).to_str(), - Some( - "/test/wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234_0x1234567890123456789012345678901234567890123456789012345678901234" - ), - ); + let dir = Path::new("/test"); + let code_hash = "1234567890123456789012345678901234567890123456789012345678901234"; + let params_hash = "4321098765432109876543210987654321098765432109876543210987654321"; + let checksum = "34567890123456789012345678901234"; + let file_name = file_name(code_hash, params_hash, checksum); + + let code_hash = H256::from_str(code_hash).unwrap(); + let params_hash = H256::from_str(params_hash).unwrap(); + let path = ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash)) + .path(dir, checksum); + + assert_eq!(path.to_str().unwrap(), format!("/test/{}", file_name)); } #[tokio::test] - async fn artifacts_removes_cache_on_startup() { - let fake_cache_path = crate::worker_intf::tmppath("test-cache").await.unwrap(); - let fake_artifact_path = { - let mut p = fake_cache_path.clone(); - p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234"); - p - }; + async fn remove_stale_cache_on_startup() { + let cache_dir = tempfile::Builder::new().prefix("test-cache-").tempdir().unwrap(); + + // invalid prefix + create_rand_artifact(&cache_dir, ""); + create_rand_artifact(&cache_dir, "wasmtime_polkadot_v"); + create_rand_artifact(&cache_dir, "wasmtime_v8.0.0_polkadot_v1.0.0"); + + let prefix = prefix(); + + // no checksum + create_rand_artifact(&cache_dir, &prefix); + + // invalid hashes + let (path, checksum) = create_artifact(&cache_dir, &prefix, "000", "000001"); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // create a tmp cache with 1 artifact. + // checksum tampered + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, checksum.chars().rev().collect::().as_str()); + fs::rename(&path, &new_path).unwrap(); - std::fs::create_dir_all(&fake_cache_path).unwrap(); - std::fs::File::create(fake_artifact_path).unwrap(); + // valid + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // this should remove it and re-create. + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 7); - let p = &fake_cache_path; - Artifacts::new(p).await; + let artifacts = Artifacts::new_and_prune(cache_dir.path()).await; - assert_eq!(std::fs::read_dir(&fake_cache_path).unwrap().count(), 0); + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 1); + assert_eq!(artifacts.len(), 1); - std::fs::remove_dir_all(fake_cache_path).unwrap(); + fs::remove_dir_all(cache_dir).unwrap(); } } diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 87ef0b54a0406776b865ea143185fd8012ef82e1..442443f326e9815fe3b9e56813356989748a8d79 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -19,55 +19,69 @@ use polkadot_node_core_pvf_common::error::{InternalValidationError, PrepareError /// A error raised during validation of the candidate. #[derive(Debug, Clone)] pub enum ValidationError { - /// The error was raised because the candidate is invalid. + /// Deterministic preparation issue. In practice, most of the problems should be caught by + /// prechecking, so this may be a sign of internal conditions. /// - /// Whenever we are unsure if the error was due to the candidate or not, we must vote invalid. - InvalidCandidate(InvalidCandidate), - /// Some internal error occurred. - InternalError(InternalValidationError), + /// In principle if preparation of the `WASM` fails, the current candidate cannot be the + /// reason for that. So we can't say whether it is invalid or not. In addition, with + /// pre-checking enabled only valid runtimes should ever get enacted, so we can be + /// reasonably sure that this is some local problem on the current node. However, as this + /// particular error *seems* to indicate a deterministic error, we raise a warning. + Preparation(PrepareError), + /// The error was raised because the candidate is invalid. Should vote against. + Invalid(InvalidCandidate), + /// Possibly transient issue that may resolve after retries. Should vote against when retries + /// fail. + PossiblyInvalid(PossiblyInvalidError), + /// Preparation or execution issue caused by an internal condition. Should not vote against. + Internal(InternalValidationError), } /// A description of an error raised during executing a PVF and can be attributed to the combination /// of the candidate [`polkadot_parachain_primitives::primitives::ValidationParams`] and the PVF. #[derive(Debug, Clone)] pub enum InvalidCandidate { - /// PVF preparation ended up with a deterministic error. - PrepareError(String), - /// The failure is reported by the execution worker. The string contains the error message. - WorkerReportedError(String), - /// The worker has died during validation of a candidate. That may fall in one of the following - /// categories, which we cannot distinguish programmatically: + /// The candidate is reported to be invalid by the execution worker. The string contains the + /// error message. + WorkerReportedInvalid(String), + /// PVF execution (compilation is not included) took more time than was allotted. + HardTimeout, +} + +/// Possibly transient issue that may resolve after retries. +#[derive(Debug, Clone)] +pub enum PossiblyInvalidError { + /// The worker process (not the job) has died during validation of a candidate. /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be - /// that the host machine ran out of free memory and the OOM killer started killing the - /// processes, and in order to save the parent it will "sacrifice child" first. + /// It's unlikely that this is caused by malicious code since workers spawn separate job + /// processes, and those job processes are sandboxed. But, it is possible. We retry in this + /// case, and if the error persists, we assume it's caused by the candidate and vote against. + AmbiguousWorkerDeath, + /// The job process (not the worker) has died for one of the following reasons: /// - /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either - /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a - /// bug in wasmtime allowed the PVF to gain control over the execution worker. + /// (a) A seccomp violation occurred, most likely due to an attempt by malicious code to + /// execute arbitrary code. Note that there is no foolproof way to detect this if the operator + /// has seccomp auditing disabled. /// - /// We attribute such an event to an *invalid candidate* in either case. + /// (b) The host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. /// - /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all - /// candidate thrown at it and hopefully the operator notices it by decreased reward - /// performance of the validator. On the other hand, if the worker died because of (b) we would - /// have better chances to stop the attack. - AmbiguousWorkerDeath, - /// PVF execution (compilation is not included) took more time than was allotted. - HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some - /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal - /// as we would abstain from voting. This is bad because if the issue was due to the candidate, - /// then all validators would abstain, stalling finality on the chain. So we will first retry - /// the candidate, and if the issue persists we are forced to vote invalid. - Panic(String), + /// (c) Some other reason, perhaps transient or perhaps caused by malicious code. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + AmbiguousJobDeath(String), + /// An unexpected error occurred in the job process and we can't be sure whether the candidate + /// is really invalid or some internal glitch occurred. Whenever we are unsure, we can never + /// treat an error as internal as we would abstain from voting. This is bad because if the + /// issue was due to the candidate, then all validators would abstain, stalling finality on the + /// chain. So we will first retry the candidate, and if the issue persists we are forced to + /// vote invalid. + JobError(String), } impl From for ValidationError { fn from(error: InternalValidationError) -> Self { - Self::InternalError(error) + Self::Internal(error) } } @@ -76,9 +90,9 @@ impl From for ValidationError { // Here we need to classify the errors into two errors: deterministic and non-deterministic. // See [`PrepareError::is_deterministic`]. if error.is_deterministic() { - Self::InvalidCandidate(InvalidCandidate::PrepareError(error.to_string())) + Self::Preparation(error) } else { - Self::InternalError(InternalValidationError::NonDeterministicPrepareError(error)) + Self::Internal(InternalValidationError::NonDeterministicPrepareError(error)) } } } diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aca604f0de21dd805e10c68c1a8d80c202010e9b..a0c24fd44323037ebd1572a30767be349a26a7a7 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -22,7 +22,7 @@ use crate::{ host::ResultSender, metrics::Metrics, worker_intf::{IdleWorker, WorkerHandle}, - InvalidCandidate, ValidationError, LOG_TARGET, + InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ channel::mpsc, @@ -342,20 +342,27 @@ fn handle_job_finish( }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), - Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedError(err))), + Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::InternalError(err)), None), + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), + // Either the worker or the job timed out. Kill the worker in either case. Treated as + // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), // "Maybe invalid" errors (will retry). - Outcome::IoErr => ( + Outcome::WorkerIntfErr => ( None, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, ), - Outcome::Panic { err } => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::Panic(err))), None), + Outcome::JobDied { err } => ( + None, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), + None, + ), + Outcome::JobError { err } => + (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index 783c7c7abbc8c7526819d85e3e06bd1ae6e9209c..fc6cd8fc7256501b0bfac1c8be720463babbb74c 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -18,6 +18,7 @@ use crate::{ artifacts::ArtifactPathId, + security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -29,10 +30,10 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_parachain_primitives::primitives::ValidationResult; +use polkadot_parachain_primitives::primitives::{ValidationCodeHash, ValidationResult}; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; @@ -87,26 +88,33 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The execution time exceeded the hard limit. The worker is terminated. + HardTimeout, + /// An I/O error happened during communication with the worker. This may mean that the worker + /// process already died. The token is not returned in any case. + WorkerIntfErr, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + JobDied { err: String }, + /// An unexpected error occurred in the job process. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. + JobError { err: String }, + /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. InternalError { err: InternalValidationError }, - /// The execution time exceeded the hard limit. The worker is terminated. - HardTimeout, - /// An I/O error happened during communication with the worker. This may mean that the worker - /// process already died. The token is not returned in any case. - IoErr, - /// An unexpected panic has occurred in the execution worker. - Panic { err: String }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and /// returns the outcome. /// /// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being -/// killed. +/// killed, if it's still alive. pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, @@ -124,7 +132,10 @@ pub async fn start_work( artifact.path.display(), ); + let artifact_path = artifact.path.clone(); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { + let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; + if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { gum::warn!( target: LOG_TARGET, @@ -133,7 +144,7 @@ pub async fn start_work( ?error, "failed to send an execute request", ); - return Outcome::IoErr + return Outcome::WorkerIntfErr } // We use a generous timeout here. This is in addition to the one in the child process, in @@ -145,6 +156,16 @@ pub async fn start_work( let response = futures::select! { response = recv_response(&mut stream).fuse() => { match response { + Ok(response) => + handle_response( + response, + pid, + &artifact.id.code_hash, + &artifact_path, + execution_timeout, + audit_log_file + ) + .await, Err(error) => { gum::warn!( target: LOG_TARGET, @@ -153,26 +174,8 @@ pub async fn start_work( ?error, "failed to recv an execute response", ); - return Outcome::IoErr - }, - Ok(response) => { - if let Response::Ok{duration, ..} = response { - if duration > execution_timeout { - // The job didn't complete within the timeout. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "execute job took {}ms cpu time, exceeded execution timeout {}ms.", - duration.as_millis(), - execution_timeout.as_millis(), - ); - - // Return a timeout error. - return Outcome::HardTimeout; - } - } - response + return Outcome::WorkerIntfErr }, } }, @@ -183,28 +186,80 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - Response::TimedOut + WorkerResponse::JobTimedOut }, }; match response { - Response::Ok { result_descriptor, duration } => Outcome::Ok { + WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::InvalidCandidate(err) => Outcome::InvalidCandidate { + WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::TimedOut => Outcome::HardTimeout, - Response::Panic(err) => Outcome::Panic { err }, - Response::InternalError(err) => Outcome::InternalError { err }, + WorkerResponse::JobTimedOut => Outcome::HardTimeout, + WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, + WorkerResponse::JobError(err) => Outcome::JobError { err }, + + WorkerResponse::InternalError(err) => Outcome::InternalError { err }, } }) .await } +/// Handles the case where we successfully received response bytes on the host from the child. +/// +/// Here we know the artifact exists, but is still located in a temporary file which will be cleared +/// by [`with_worker_dir_setup`]. +async fn handle_response( + response: WorkerResponse, + worker_pid: u32, + validation_code_hash: &ValidationCodeHash, + artifact_path: &Path, + execution_timeout: Duration, + audit_log_file: Option, +) -> WorkerResponse { + if let WorkerResponse::Ok { duration, .. } = response { + if duration > execution_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid, + "execute job took {}ms cpu time, exceeded execution timeout {}ms.", + duration.as_millis(), + execution_timeout.as_millis(), + ); + + // Return a timeout error. + return WorkerResponse::JobTimedOut + } + } + + if let WorkerResponse::JobDied { err: _, job_pid } = response { + // The job died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have + // auditing enabled, so we don't want attackers to abuse a non-deterministic + // outcome. + for syscall in security::check_seccomp_violations_for_job(audit_log_file, job_pid).await { + gum::error!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + %syscall, + ?validation_code_hash, + ?artifact_path, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + } + + response +} + /// Create a temporary file for an artifact in the worker cache, execute the given future/closure /// passing the file path in, and clean up the worker cache. /// @@ -223,7 +278,7 @@ where // Cheaply create a hard link to the artifact. The artifact is always at a known location in the // worker cache, and the child can't access any other artifacts or gain any information from the // original filename. - let link_path = worker_dir::execute_artifact(&worker_dir.path); + let link_path = worker_dir::execute_artifact(worker_dir.path()); if let Err(err) = tokio::fs::hard_link(artifact_path, link_path).await { gum::warn!( target: LOG_TARGET, @@ -237,7 +292,7 @@ where } } - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(worker_dir).await; // Try to clear the worker dir. @@ -273,9 +328,9 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { +async fn recv_response(stream: &mut UnixStream) -> io::Result { let response_bytes = framed_recv(stream).await?; - Response::decode(&mut &response_bytes[..]).map_err(|e| { + WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("execute pvf recv_response: decode error: {:?}", e), diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 81695829122b9d3ba03307f1c597ac5877dc7023..be8f7aee778477973ef800a48b84d025ab254765 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -24,7 +24,7 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, execute::{self, PendingExecutionRequest}, metrics::Metrics, - prepare, Priority, ValidationError, LOG_TARGET, + prepare, security, Priority, SecurityStatus, ValidationError, LOG_TARGET, }; use always_assert::never; use futures::{ @@ -32,14 +32,15 @@ use futures::{ Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, + error::{PrecheckResult, PrepareError}, + prepare::PrepareSuccess, pvf::PvfPrepData, - SecurityStatus, }; +use polkadot_node_subsystem::SubsystemResult; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ collections::HashMap, - path::{Path, PathBuf}, + path::PathBuf, time::{Duration, SystemTime}, }; @@ -63,12 +64,14 @@ pub const EXECUTE_BINARY_NAME: &str = "polkadot-execute-worker"; pub(crate) type ResultSender = oneshot::Sender>; /// Transmission end used for sending the PVF preparation result. -pub(crate) type PrepareResultSender = oneshot::Sender; +pub(crate) type PrecheckResultSender = oneshot::Sender; /// A handle to the async process serving the validation host requests. #[derive(Clone)] pub struct ValidationHost { to_host_tx: mpsc::Sender, + /// Available security features, detected by the host during startup. + pub security_status: SecurityStatus, } impl ValidationHost { @@ -83,7 +86,7 @@ impl ValidationHost { pub async fn precheck_pvf( &mut self, pvf: PvfPrepData, - result_tx: PrepareResultSender, + result_tx: PrecheckResultSender, ) -> Result<(), String> { self.to_host_tx .send(ToHost::PrecheckPvf { pvf, result_tx }) @@ -133,7 +136,7 @@ impl ValidationHost { } enum ToHost { - PrecheckPvf { pvf: PvfPrepData, result_tx: PrepareResultSender }, + PrecheckPvf { pvf: PvfPrepData, result_tx: PrecheckResultSender }, ExecutePvf(ExecutePvfInputs), HeadsUp { active_pvfs: Vec }, } @@ -153,6 +156,7 @@ pub struct Config { pub cache_path: PathBuf, /// The version of the node. `None` can be passed to skip the version check (only for tests). pub node_version: Option, + /// The path to the program that can be used to spawn the prepare workers. pub prepare_worker_program_path: PathBuf, /// The time allotted for a prepare worker to spawn and report to the host. @@ -162,6 +166,7 @@ pub struct Config { pub prepare_workers_soft_max_num: usize, /// The absolute number of workers that can be spawned in the prepare pool. pub prepare_workers_hard_max_num: usize, + /// The path to the program that can be used to spawn the execute workers. pub execute_worker_program_path: PathBuf, /// The time allotted for an execute worker to spawn and report to the host. @@ -181,10 +186,12 @@ impl Config { Self { cache_path, node_version, + prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), prepare_workers_soft_max_num: 1, prepare_workers_hard_max_num: 1, + execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), execute_workers_max_num: 2, @@ -200,20 +207,18 @@ impl Config { /// The future should not return normally but if it does then that indicates an unrecoverable error. /// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. -pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { +pub async fn start( + config: Config, + metrics: Metrics, +) -> SubsystemResult<(ValidationHost, impl Future)> { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. - let security_status = { - let can_enable_landlock = check_landlock(&config.prepare_worker_program_path); - let can_unshare_user_namespace_and_change_root = - check_can_unshare_user_namespace_and_change_root(&config.prepare_worker_program_path); - SecurityStatus { can_enable_landlock, can_unshare_user_namespace_and_change_root } - }; + let security_status = security::check_security_status(&config).await; let (to_host_tx, to_host_rx) = mpsc::channel(10); - let validation_host = ValidationHost { to_host_tx }; + let validation_host = ValidationHost { to_host_tx, security_status: security_status.clone() }; let (to_prepare_pool, from_prepare_pool, run_prepare_pool) = prepare::start_pool( metrics.clone(), @@ -247,10 +252,9 @@ pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future (ValidationHost, impl Future, execute_queue: &mut mpsc::Sender, @@ -424,15 +422,8 @@ async fn handle_to_host( handle_precheck_pvf(artifacts, prepare_queue, pvf, result_tx).await?; }, ToHost::ExecutePvf(inputs) => { - handle_execute_pvf( - cache_path, - artifacts, - prepare_queue, - execute_queue, - awaiting_prepare, - inputs, - ) - .await?; + handle_execute_pvf(artifacts, prepare_queue, execute_queue, awaiting_prepare, inputs) + .await?; }, ToHost::HeadsUp { active_pvfs } => handle_heads_up(artifacts, prepare_queue, active_pvfs).await?, @@ -446,27 +437,27 @@ async fn handle_to_host( /// This tries to prepare the PVF by compiling the WASM blob within a timeout set in /// `PvfPrepData`. /// -/// If the prepare job failed previously, we may retry it under certain conditions. +/// We don't retry artifacts that previously failed preparation. We don't expect multiple +/// pre-checking requests. async fn handle_precheck_pvf( artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, pvf: PvfPrepData, - result_sender: PrepareResultSender, + result_sender: PrecheckResultSender, ) -> Result<(), Fatal> { let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, prepare_stats } => { + ArtifactState::Prepared { last_time_needed, .. } => { *last_time_needed = SystemTime::now(); - let _ = result_sender.send(Ok(prepare_stats.clone())); + let _ = result_sender.send(Ok(())); }, ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), ArtifactState::FailedToProcess { error, .. } => { - // Do not retry failed preparation if another pre-check request comes in. We do not - // retry pre-checking, anyway. - let _ = result_sender.send(PrepareResult::Err(error.clone())); + // Do not retry an artifact that previously failed preparation. + let _ = result_sender.send(PrecheckResult::Err(error.clone())); }, } } else { @@ -489,7 +480,6 @@ async fn handle_precheck_pvf( /// When preparing for execution, we use a more lenient timeout ([`LENIENT_PREPARATION_TIMEOUT`]) /// than when prechecking. async fn handle_execute_pvf( - cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, @@ -502,8 +492,8 @@ async fn handle_execute_pvf( if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, .. } => { - let file_metadata = std::fs::metadata(artifact_id.path(cache_path)); + ArtifactState::Prepared { ref path, last_time_needed, .. } => { + let file_metadata = std::fs::metadata(path); if file_metadata.is_ok() { *last_time_needed = SystemTime::now(); @@ -512,7 +502,7 @@ async fn handle_execute_pvf( send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id, cache_path), + artifact: ArtifactPathId::new(artifact_id, path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -675,7 +665,6 @@ async fn handle_heads_up( } async fn handle_prepare_done( - cache_path: &Path, artifacts: &mut Artifacts, execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, @@ -716,7 +705,8 @@ async fn handle_prepare_done( state { for result_sender in waiting_for_response.drain(..) { - let _ = result_sender.send(result.clone()); + let result = result.clone().map(|_| ()); + let _ = result_sender.send(result); } num_failures } else { @@ -736,16 +726,18 @@ async fn handle_prepare_done( continue } - // Don't send failed artifacts to the execution's queue. - if let Err(ref error) = result { - let _ = result_tx.send(Err(ValidationError::from(error.clone()))); - continue - } + let path = match &result { + Ok(success) => success.path.clone(), + Err(error) => { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + continue + }, + }; send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id.clone(), cache_path), + artifact: ArtifactPathId::new(artifact_id.clone(), &path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -758,13 +750,13 @@ async fn handle_prepare_done( } *state = match result { - Ok(prepare_stats) => - ArtifactState::Prepared { last_time_needed: SystemTime::now(), prepare_stats }, + Ok(PrepareSuccess { path, stats: prepare_stats }) => + ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), prepare_stats }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; - gum::warn!( + gum::error!( target: LOG_TARGET, ?artifact_id, time_failed = ?last_time_failed, @@ -812,7 +804,6 @@ async fn enqueue_prepare_for_execute( } async fn handle_cleanup_pulse( - cache_path: &Path, sweeper_tx: &mut mpsc::Sender, artifacts: &mut Artifacts, artifact_ttl: Duration, @@ -823,14 +814,13 @@ async fn handle_cleanup_pulse( "PVF pruning: {} artifacts reached their end of life", to_remove.len(), ); - for artifact_id in to_remove { + for (artifact_id, path) in to_remove { gum::debug!( target: LOG_TARGET, validation_code_hash = ?artifact_id.code_hash, "pruning artifact", ); - let artifact_path = artifact_id.path(cache_path); - sweeper_tx.send(artifact_path).await.map_err(|_| Fatal)?; + sweeper_tx.send(path).await.map_err(|_| Fatal)?; } Ok(()) @@ -846,7 +836,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { gum::trace!( target: LOG_TARGET, ?result, - "Sweeping the artifact file {}", + "Sweeped the artifact file {}", condemned.display(), ); }, @@ -882,112 +872,17 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream .map(|_| ()) } -/// Check if we can sandbox the root and emit a warning if not. -/// -/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible -/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on -/// success and -1 on failure. -fn check_can_unshare_user_namespace_and_change_root( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] - prepare_worker_program_path: &Path, -) -> bool { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let output = std::process::Command::new(prepare_worker_program_path) - .arg("--check-can-unshare-user-namespace-and-change-root") - .output(); - - match output { - Ok(output) if output.status.success() => true, - Ok(output) => { - let stderr = std::str::from_utf8(&output.stderr) - .expect("child process writes a UTF-8 string to stderr; qed") - .trim(); - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - // Docs say to always print status using `Display` implementation. - status = %output.status, - %stderr, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." - ); - false - } - } -} - -/// Check if landlock is supported and emit a warning if not. -/// -/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible -/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on -/// success and -1 on failure. -fn check_landlock( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] - prepare_worker_program_path: &Path, -) -> bool { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - match std::process::Command::new(prepare_worker_program_path) - .arg("--check-can-enable-landlock") - .status() - { - Ok(status) if status.success() => true, - Ok(status) => { - let abi = - polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - %abi, - "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); - false - } - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::InvalidCandidate; + use crate::PossiblyInvalidError; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{ + error::PrepareError, + prepare::{PrepareStats, PrepareSuccess}, + }; + use sp_core::hexdisplay::AsBytesRef; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); @@ -1007,12 +902,16 @@ pub(crate) mod tests { } /// Creates a new PVF which artifact id can be uniquely identified by the given number. - fn artifact_id(descriminator: u32) -> ArtifactId { - ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(descriminator)) + fn artifact_id(discriminator: u32) -> ArtifactId { + ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator)) } - fn artifact_path(descriminator: u32) -> PathBuf { - artifact_id(descriminator).path(&PathBuf::from(std::env::temp_dir())).to_owned() + fn artifact_path(discriminator: u32) -> PathBuf { + let pvf = PvfPrepData::from_discriminator(discriminator); + let checksum = blake3::hash(pvf.code().as_bytes_ref()); + artifact_id(discriminator) + .path(&PathBuf::from(std::env::temp_dir()), checksum.to_hex().as_str()) + .to_owned() } struct Builder { @@ -1050,8 +949,6 @@ pub(crate) mod tests { impl Test { fn new(Builder { cleanup_pulse_interval, artifact_ttl, artifacts }: Builder) -> Self { - let cache_path = PathBuf::from(std::env::temp_dir()); - let (to_host_tx, to_host_rx) = mpsc::channel(10); let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); @@ -1059,7 +956,6 @@ pub(crate) mod tests { let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { - cache_path, cleanup_pulse_interval, artifact_ttl, artifacts, @@ -1084,7 +980,8 @@ pub(crate) mod tests { fn host_handle(&mut self) -> ValidationHost { let to_host_tx = self.to_host_tx.take().unwrap(); - ValidationHost { to_host_tx } + let security_status = Default::default(); + ValidationHost { to_host_tx, security_status } } async fn poll_and_recv_result(&mut self, result_rx: oneshot::Receiver) -> T @@ -1208,12 +1105,18 @@ pub(crate) mod tests { let mut builder = Builder::default(); builder.cleanup_pulse_interval = Duration::from_millis(100); builder.artifact_ttl = Duration::from_millis(500); - builder - .artifacts - .insert_prepared(artifact_id(1), mock_now, PrepareStats::default()); - builder - .artifacts - .insert_prepared(artifact_id(2), mock_now, PrepareStats::default()); + builder.artifacts.insert_prepared( + artifact_id(1), + artifact_path(1), + mock_now, + PrepareStats::default(), + ); + builder.artifacts.insert_prepared( + artifact_id(2), + artifact_path(2), + mock_now, + PrepareStats::default(), + ); let mut test = builder.build(); let mut host = test.host_handle(); @@ -1285,7 +1188,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1301,7 +1204,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1311,27 +1214,27 @@ pub(crate) mod tests { ); result_tx_pvf_1_1 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_1.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_1_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1355,7 +1258,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1437,7 +1340,7 @@ pub(crate) mod tests { assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Err(PrepareError::TimedOut)); assert_matches!( result_rx_execute.now_or_never().unwrap().unwrap(), - Err(ValidationError::InternalError(_)) + Err(ValidationError::Internal(_)) ); // Reversed case: first send multiple precheck requests, then ask for an execution. @@ -1468,7 +1371,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1579,7 +1482,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Submit another execute request. We shouldn't try to prepare again, yet. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1598,7 +1501,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1624,7 +1527,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1638,11 +1541,11 @@ pub(crate) mod tests { // Send an error for the execution here, just so we can check the result receiver is still // alive. result_tx_3 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_3.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1681,10 +1584,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Submit another execute request. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1703,10 +1603,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1728,10 +1625,7 @@ pub(crate) mod tests { // The result should still contain the original error. let result = test.poll_and_recv_result(result_rx_3).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); } // Test that multiple heads-up requests trigger preparation retries if the first one failed. @@ -1800,7 +1694,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 1b8d83773813a9619321607eb49ddf83eebd28ed..3306a2461c155cad4384a73a7993712b00b3a1d7 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -19,8 +19,10 @@ //! The PVF validation host. Responsible for coordinating preparation and execution of PVFs. //! //! For more background, refer to the Implementer's Guide: [PVF -//! Pre-checking](https://paritytech.github.io/polkadot/book/pvf-prechecking.html) and [Candidate -//! Validation](https://paritytech.github.io/polkadot/book/node/utility/candidate-validation.html#pvf-host). +//! Pre-checking](https://paritytech.github.io/polkadot-sdk/book/pvf-prechecking.html), [Candidate +//! Validation](https://paritytech.github.io/polkadot-sdk/book/node/utility/candidate-validation.html) +//! and [PVF Host and Workers](https://paritytech.github.io/polkadot-sdk/book/node/utility/pvf-host-and-workers.html). +//! //! //! # Entrypoint //! @@ -82,7 +84,7 @@ //! A pruning task will run at a fixed interval of time. This task will remove all artifacts that //! weren't used or received a heads up signal for a while. //! -//! ## Execution +//! ## Execution //! //! The execute workers will be fed by the requests from the execution queue, which is basically a //! combination of a path to the compiled artifact and the @@ -95,12 +97,13 @@ mod host; mod metrics; mod prepare; mod priority; +mod security; mod worker_intf; #[cfg(feature = "test-utils")] pub mod testing; -pub use error::{InvalidCandidate, ValidationError}; +pub use error::{InvalidCandidate, PossiblyInvalidError, ValidationError}; pub use host::{start, Config, ValidationHost, EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}; pub use metrics::Metrics; pub use priority::Priority; @@ -114,5 +117,18 @@ pub use polkadot_node_core_pvf_common::{ SecurityStatus, }; +use std::{path::Path, process::Command}; + /// The log target for this crate. pub const LOG_TARGET: &str = "parachain::pvf"; + +/// Utility to get the version of a worker, used for version checks. +/// +/// The worker's existence at the given path must be checked separately. +pub fn get_worker_version(worker_path: &Path) -> std::io::Result { + let worker_version = Command::new(worker_path).args(["--version"]).output()?.stdout; + Ok(std::str::from_utf8(&worker_version) + .expect("version is printed as a string; qed") + .trim() + .to_string()) +} diff --git a/polkadot/node/core/pvf/src/metrics.rs b/polkadot/node/core/pvf/src/metrics.rs index 3d792793498b5ef69967a4337c4adc8425623fee..7fd876cf17405f4815173ab289d34f41d7e1e1cc 100644 --- a/polkadot/node/core/pvf/src/metrics.rs +++ b/polkadot/node/core/pvf/src/metrics.rs @@ -93,6 +93,10 @@ impl Metrics { metrics.preparation_max_resident.observe(max_resident_kb); metrics.preparation_max_allocated.observe(max_allocated_kb); } + + metrics + .preparation_peak_tracked_allocation + .observe((memory_stats.peak_tracked_alloc / 1024) as f64); } } } @@ -110,10 +114,14 @@ struct MetricsInner { execution_time: prometheus::Histogram, #[cfg(target_os = "linux")] preparation_max_rss: prometheus::Histogram, + // Max. allocated memory, tracked by Jemallocator, polling-based #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] preparation_max_allocated: prometheus::Histogram, + // Max. resident memory, tracked by Jemallocator, polling-based #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] preparation_max_resident: prometheus::Histogram, + // Peak allocation value, tracked by tracking-allocator + preparation_peak_tracked_allocation: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -271,6 +279,18 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + preparation_peak_tracked_allocation: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_pvf_preparation_peak_tracked_allocation", + "peak allocation observed for preparation (in kilobytes)", + ).buckets( + prometheus::exponential_buckets(8192.0, 2.0, 10) + .expect("arguments are always valid; qed"), + ), + )?, + registry, + )?, }; Ok(Metrics(Some(inner))) } diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 7933b0319a6f9fcd9fe83cad9f47950827a6ecc9..4901be9fe1b7d8a41e6390f4b9b9eaa4fff398ae 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -68,7 +68,7 @@ pub enum ToPool { /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. - StartWork { worker: Worker, pvf: PvfPrepData, artifact_path: PathBuf }, + StartWork { worker: Worker, pvf: PvfPrepData, cache_path: PathBuf }, } /// A message sent from pool to its client. @@ -232,7 +232,7 @@ fn handle_to_pool( .boxed(), ); }, - ToPool::StartWork { worker, pvf, artifact_path } => { + ToPool::StartWork { worker, pvf, cache_path } => { if let Some(data) = spawned.get_mut(worker) { if let Some(idle) = data.idle.take() { let preparation_timer = metrics.time_preparation(); @@ -242,7 +242,7 @@ fn handle_to_pool( worker, idle, pvf, - artifact_path, + cache_path, preparation_timer, ) .boxed(), @@ -303,10 +303,10 @@ async fn start_work_task( worker: Worker, idle: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, _preparation_timer: Option, ) -> PoolEvent { - let outcome = worker_intf::start_work(&metrics, idle, pvf, artifact_path).await; + let outcome = worker_intf::start_work(&metrics, idle, pvf, cache_path).await; PoolEvent::StartWork(worker, outcome) } @@ -339,17 +339,17 @@ fn handle_mux( spawned, worker, idle, - Err(PrepareError::CreateTmpFileErr(err)), + Err(PrepareError::CreateTmpFile(err)), ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFileErr { worker: idle, result: _, err, src, dest } => + Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => handle_concluded_no_rip( from_pool, spawned, worker, idle, - Err(PrepareError::RenameTmpFileErr { err, src, dest }), + Err(PrepareError::RenameTmpFile { err, src, dest }), ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { @@ -387,6 +387,21 @@ fn handle_mux( Ok(()) }, + // The worker might still be usable, but we kill it just in case. + Outcome::JobDied { err, job_pid } => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::JobDied { err, job_pid }), + }, + )?; + } + + Ok(()) + }, Outcome::TimedOut => { if attempt_retire(metrics, spawned, worker) { reply( @@ -399,6 +414,20 @@ fn handle_mux( )?; } + Ok(()) + }, + Outcome::OutOfMemory => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::OutOfMemory), + }, + )?; + } + Ok(()) }, } diff --git a/polkadot/node/core/pvf/src/prepare/queue.rs b/polkadot/node/core/pvf/src/prepare/queue.rs index c38012d745482e66fa4d74647e34663447b8f0d5..c140a6cafda08ed26aa9556935a591dfa0bd2f0e 100644 --- a/polkadot/node/core/pvf/src/prepare/queue.rs +++ b/polkadot/node/core/pvf/src/prepare/queue.rs @@ -268,12 +268,12 @@ fn find_idle_worker(queue: &mut Queue) -> Option { } async fn handle_from_pool(queue: &mut Queue, from_pool: pool::FromPool) -> Result<(), Fatal> { - use pool::FromPool::*; + use pool::FromPool; match from_pool { - Spawned(worker) => handle_worker_spawned(queue, worker).await?, - Concluded { worker, rip, result } => + FromPool::Spawned(worker) => handle_worker_spawned(queue, worker).await?, + FromPool::Concluded { worker, rip, result } => handle_worker_concluded(queue, worker, rip, result).await?, - Rip(worker) => handle_worker_rip(queue, worker).await?, + FromPool::Rip(worker) => handle_worker_rip(queue, worker).await?, } Ok(()) } @@ -424,17 +424,17 @@ async fn spawn_extra_worker(queue: &mut Queue, critical: bool) -> Result<(), Fat /// Attaches the work to the given worker telling the poll about the job. async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal> { let job_data = &mut queue.jobs[job]; - - let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf); - let artifact_path = artifact_id.path(&queue.cache_path); - job_data.worker = Some(worker); queue.workers[worker].job = Some(job); send_pool( &mut queue.to_pool_tx, - pool::ToPool::StartWork { worker, pvf: job_data.pvf.clone(), artifact_path }, + pool::ToPool::StartWork { + worker, + pvf: job_data.pvf.clone(), + cache_path: queue.cache_path.clone(), + }, ) .await?; @@ -491,7 +491,7 @@ mod tests { use crate::host::tests::TEST_PREPARATION_TIMEOUT; use assert_matches::assert_matches; use futures::{future::BoxFuture, FutureExt}; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareSuccess}; use slotmap::SlotMap; use std::task::Poll; @@ -612,7 +612,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!( @@ -651,7 +651,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); @@ -697,7 +697,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1)); } @@ -731,7 +731,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: true, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); // Since there is still work, the queue requested one extra worker to spawn to handle the diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index b66c3604434363cdffefaaddf9091397c25b49a0..f978005068c8e06d9446124be4284feda86f386e 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -17,7 +17,9 @@ //! Host interface to the prepare worker. use crate::{ + artifacts::ArtifactId, metrics::Metrics, + security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -26,8 +28,8 @@ use crate::{ }; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, - prepare::PrepareStats, + error::{PrepareError, PrepareResult, PrepareWorkerResult}, + prepare::{PrepareStats, PrepareSuccess, PrepareWorkerSuccess}, pvf::PvfPrepData, worker_dir, SecurityStatus, }; @@ -78,9 +80,9 @@ pub enum Outcome { CreateTmpFileErr { worker: IdleWorker, err: String }, /// The response from the worker is received, but the tmp file cannot be renamed (moved) to the /// final destination location. - RenameTmpFileErr { + RenameTmpFile { worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -97,6 +99,12 @@ pub enum Outcome { /// /// This doesn't return an idle worker instance, thus this worker is no longer usable. IoErr(String), + /// The worker ran out of memory and is aborting. The worker should be ripped. + OutOfMemory, + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + /// + /// The worker might still be usable, but we kill it just in case. + JobDied { err: String, job_pid: i32 }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -108,7 +116,7 @@ pub async fn start_work( metrics: &Metrics, worker: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, ) -> Outcome { let IdleWorker { stream, pid, worker_dir } = worker; @@ -116,8 +124,8 @@ pub async fn start_work( target: LOG_TARGET, worker_pid = %pid, ?worker_dir, - "starting prepare for {}", - artifact_path.display(), + "starting prepare for {:?}", + pvf, ); with_worker_dir_setup( @@ -126,7 +134,9 @@ pub async fn start_work( pid, |tmp_artifact_file, mut stream, worker_dir| async move { let preparation_timeout = pvf.prep_timeout(); - if let Err(err) = send_request(&mut stream, pvf).await { + let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; + + if let Err(err) = send_request(&mut stream, &pvf).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -150,15 +160,17 @@ pub async fn start_work( match result { // Received bytes from worker within the time limit. - Ok(Ok(prepare_result)) => + Ok(Ok(prepare_worker_result)) => handle_response( metrics, IdleWorker { stream, pid, worker_dir }, - prepare_result, + prepare_worker_result, pid, tmp_artifact_file, - artifact_path, + &pvf, + &cache_path, preparation_timeout, + audit_log_file, ) .await, Ok(Err(err)) => { @@ -193,18 +205,43 @@ pub async fn start_work( async fn handle_response( metrics: &Metrics, worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, worker_pid: u32, tmp_file: PathBuf, - artifact_path: PathBuf, + pvf: &PvfPrepData, + cache_path: &Path, preparation_timeout: Duration, + audit_log_file: Option, ) -> Outcome { - let PrepareStats { cpu_time_elapsed, memory_stats } = match result.clone() { - Ok(result) => result, - // Timed out on the child. This should already be logged by the child. - Err(PrepareError::TimedOut) => return Outcome::TimedOut, - Err(_) => return Outcome::Concluded { worker, result }, - }; + let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } = + match result.clone() { + Ok(result) => result, + // Timed out on the child. This should already be logged by the child. + Err(PrepareError::TimedOut) => return Outcome::TimedOut, + Err(PrepareError::JobDied { err, job_pid }) => { + // The job died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have + // auditing enabled, so we don't want attackers to abuse a non-deterministic + // outcome. + for syscall in + security::check_seccomp_violations_for_job(audit_log_file, job_pid).await + { + gum::error!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + %syscall, + ?pvf, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + + return Outcome::JobDied { err, job_pid } + }, + Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, + Err(err) => return Outcome::Concluded { worker, result: Err(err) }, + }; if cpu_time_elapsed > preparation_timeout { // The job didn't complete within the timeout. @@ -219,6 +256,9 @@ async fn handle_response( return Outcome::TimedOut } + let artifact_id = ArtifactId::from_pvf_prep_data(pvf); + let artifact_path = artifact_id.path(cache_path, &checksum); + gum::debug!( target: LOG_TARGET, %worker_pid, @@ -228,7 +268,13 @@ async fn handle_response( ); let outcome = match tokio::fs::rename(&tmp_file, &artifact_path).await { - Ok(()) => Outcome::Concluded { worker, result }, + Ok(()) => Outcome::Concluded { + worker, + result: Ok(PrepareSuccess { + path: artifact_path, + stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() }, + }), + }, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -238,7 +284,7 @@ async fn handle_response( artifact_path.display(), err, ); - Outcome::RenameTmpFileErr { + Outcome::RenameTmpFile { worker, result, err: format!("{:?}", err), @@ -272,7 +318,7 @@ where { // Create the tmp file here so that the child doesn't need any file creation rights. This will // be cleared at the end of this function. - let tmp_file = worker_dir::prepare_tmp_artifact(&worker_dir.path); + let tmp_file = worker_dir::prepare_tmp_artifact(worker_dir.path()); if let Err(err) = tokio::fs::File::create(&tmp_file).await { gum::warn!( target: LOG_TARGET, @@ -287,7 +333,7 @@ where } }; - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(tmp_file, stream, worker_dir).await; // Try to clear the worker dir. @@ -305,14 +351,14 @@ where outcome } -async fn send_request(stream: &mut UnixStream, pvf: PvfPrepData) -> io::Result<()> { +async fn send_request(stream: &mut UnixStream, pvf: &PvfPrepData) -> io::Result<()> { framed_send(stream, &pvf.encode()).await?; Ok(()) } -async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { +async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { let result = framed_recv(stream).await?; - let result = PrepareResult::decode(&mut &result[..]).map_err(|e| { + let result = PrepareWorkerResult::decode(&mut &result[..]).map_err(|e| { // We received invalid bytes from the worker. let bound_bytes = &result[..result.len().min(4)]; gum::warn!( diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef8714f58c81f7f0f311c3425d39cd13d4e068da --- /dev/null +++ b/polkadot/node/core/pvf/src/security.rs @@ -0,0 +1,449 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{Config, SecurityStatus, LOG_TARGET}; +use futures::join; +use std::{fmt, path::Path}; +use tokio::{ + fs::{File, OpenOptions}, + io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, +}; + +const SECURE_MODE_ANNOUNCEMENT: &'static str = + "In the next release this will be a hard error by default. + \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; + +/// Run checks for supported security features. +/// +/// # Returns +/// +/// Returns the set of security features that we were able to enable. If an error occurs while +/// enabling a security feature we set the corresponding status to `false`. +pub async fn check_security_status(config: &Config) -> SecurityStatus { + let Config { prepare_worker_program_path, cache_path, .. } = config; + + // TODO: add check that syslog is available and that seccomp violations are logged? + let (landlock, seccomp, change_root) = join!( + check_landlock(prepare_worker_program_path), + check_seccomp(prepare_worker_program_path), + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path, cache_path) + ); + + let security_status = SecurityStatus { + can_enable_landlock: landlock.is_ok(), + can_enable_seccomp: seccomp.is_ok(), + can_unshare_user_namespace_and_change_root: change_root.is_ok(), + }; + + let errs: Vec = [landlock, seccomp, change_root] + .into_iter() + .filter_map(|result| result.err()) + .collect(); + let err_occurred = print_secure_mode_message(errs); + if err_occurred { + gum::error!( + target: LOG_TARGET, + "{}", + SECURE_MODE_ANNOUNCEMENT, + ); + } + + security_status +} + +type SecureModeResult = std::result::Result<(), SecureModeError>; + +/// Errors related to enabling Secure Validator Mode. +#[derive(Debug)] +enum SecureModeError { + CannotEnableLandlock(String), + CannotEnableSeccomp(String), + CannotUnshareUserNamespaceAndChangeRoot(String), +} + +impl SecureModeError { + /// Whether this error is allowed with Secure Validator Mode enabled. + fn is_allowed_in_secure_mode(&self) -> bool { + use SecureModeError::*; + match self { + CannotEnableLandlock(_) => true, + CannotEnableSeccomp(_) => false, + CannotUnshareUserNamespaceAndChangeRoot(_) => false, + } + } +} + +impl fmt::Display for SecureModeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use SecureModeError::*; + match self { + CannotEnableLandlock(err) => write!(f, "Cannot enable landlock, a Linux 5.13+ kernel security feature: {err}"), + CannotEnableSeccomp(err) => write!(f, "Cannot enable seccomp, a Linux-specific kernel security feature: {err}"), + CannotUnshareUserNamespaceAndChangeRoot(err) => write!(f, "Cannot unshare user namespace and change root, which are Linux-specific kernel security features: {err}"), + } + } +} + +/// Errors if Secure Validator Mode and some mandatory errors occurred, warn otherwise. +/// +/// # Returns +/// +/// `true` if an error was printed, `false` otherwise. +fn print_secure_mode_message(errs: Vec) -> bool { + // Trying to run securely and some mandatory errors occurred. + const SECURE_MODE_ERROR: &'static str = "🚨 Your system cannot securely run a validator. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + // Some errors occurred when running insecurely, or some optional errors occurred when running + // securely. + const SECURE_MODE_WARNING: &'static str = "🚨 Some security issues have been detected. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + + if errs.is_empty() { + return false + } + + let errs_allowed = errs.iter().all(|err| err.is_allowed_in_secure_mode()); + let errs_string: String = errs + .iter() + .map(|err| { + format!( + "\n - {}{}", + if err.is_allowed_in_secure_mode() { "Optional: " } else { "" }, + err + ) + }) + .collect(); + + if errs_allowed { + gum::warn!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_WARNING, + errs_string, + ); + false + } else { + gum::error!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_ERROR, + errs_string, + ); + true + } +} + +/// Check if we can change root to a new, sandboxed root and return an error if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +async fn check_can_unshare_user_namespace_and_change_root( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] cache_path: &Path, +) -> SecureModeResult { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let cache_dir_tempdir = tempfile::Builder::new() + .prefix("check-can-unshare-") + .tempdir_in(cache_path) + .map_err(|err| SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not create a temporary directory in {:?}: {}", cache_path, err) + ))?; + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-unshare-user-namespace-and-change-root") + .arg(cache_dir_tempdir.path()) + .output() + .await + { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "not available".into() + )) + } else { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("not available: {}", stderr) + )) + } + }, + Err(err) => + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "only available on Linux".into() + )) + } + } +} + +/// Check if landlock is supported and return an error if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +async fn check_landlock( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> SecureModeResult { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-landlock") + .output() + .await + { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { + let abi = + polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableLandlock( + format!("landlock ABI {} not available", abi) + )) + } else { + Err(SecureModeError::CannotEnableLandlock( + format!("not available: {}", stderr) + )) + } + }, + Err(err) => + Err(SecureModeError::CannotEnableLandlock( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotEnableLandlock( + "only available on Linux".into() + )) + } + } +} + +/// Check if seccomp is supported and return an error if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +async fn check_seccomp( + #[cfg_attr(not(all(target_os = "linux", target_arch = "x86_64")), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> SecureModeResult { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-seccomp") + .output() + .await + { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableSeccomp( + "not available".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + format!("not available: {}", stderr) + )) + } + }, + Err(err) => + Err(SecureModeError::CannotEnableSeccomp( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on CPUs from the x86_64 family (usually Intel or AMD)".into() + )) + } + } + } else { + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux and on CPUs from the x86_64 family (usually Intel or AMD).".into() + )) + } + } + } + } +} + +const AUDIT_LOG_PATH: &'static str = "/var/log/audit/audit.log"; +const SYSLOG_PATH: &'static str = "/var/log/syslog"; + +/// System audit log. +pub struct AuditLogFile { + file: File, + path: &'static str, +} + +impl AuditLogFile { + /// Looks for an audit log file on the system and opens it, seeking to the end to skip any + /// events from before this was called. + /// + /// A bit of a verbose name, but it should clue future refactorers not to move calls closer to + /// where the `AuditLogFile` is used. + pub async fn try_open_and_seek_to_end() -> Option { + let mut path = AUDIT_LOG_PATH; + let mut file = match OpenOptions::new().read(true).open(AUDIT_LOG_PATH).await { + Ok(file) => Ok(file), + Err(_) => { + path = SYSLOG_PATH; + OpenOptions::new().read(true).open(SYSLOG_PATH).await + }, + } + .ok()?; + + let _pos = file.seek(SeekFrom::End(0)).await; + + Some(Self { file, path }) + } + + async fn read_new_since_open(mut self) -> String { + let mut buf = String::new(); + let _len = self.file.read_to_string(&mut buf).await; + buf + } +} + +/// Check if a seccomp violation occurred for the given job process. As the syslog may be in a +/// different location, or seccomp auditing may be disabled, this function provides a best-effort +/// attempt only. +/// +/// The `audit_log_file` must have been obtained before the job started. It only allows reading +/// entries that were written since it was obtained, so that we do not consider events from previous +/// processes with the same pid. This can still be racy, but it's unlikely and fine for a +/// best-effort attempt. +pub async fn check_seccomp_violations_for_job( + audit_log_file: Option, + job_pid: i32, +) -> Vec { + let audit_event_pid_field = format!("pid={job_pid}"); + + let audit_log_file = match audit_log_file { + Some(file) => { + gum::trace!( + target: LOG_TARGET, + %job_pid, + audit_log_path = ?file.path, + "checking audit log for seccomp violations", + ); + file + }, + None => { + gum::warn!( + target: LOG_TARGET, + %job_pid, + "could not open either {AUDIT_LOG_PATH} or {SYSLOG_PATH} for reading audit logs" + ); + return vec![] + }, + }; + let events = audit_log_file.read_new_since_open().await; + + let mut violations = vec![]; + for event in events.lines() { + if let Some(syscall) = parse_audit_log_for_seccomp_event(event, &audit_event_pid_field) { + violations.push(syscall); + } + } + + violations +} + +fn parse_audit_log_for_seccomp_event(event: &str, audit_event_pid_field: &str) -> Option { + const SECCOMP_AUDIT_EVENT_TYPE: &'static str = "type=1326"; + + // Do a series of simple .contains instead of a regex, because I'm not sure if the fields are + // guaranteed to always be in the same order. + if !event.contains(SECCOMP_AUDIT_EVENT_TYPE) || !event.contains(&audit_event_pid_field) { + return None + } + + // Get the syscall. Let's avoid a dependency on regex just for this. + for field in event.split(" ") { + if let Some(syscall) = field.strip_prefix("syscall=") { + return syscall.parse::().ok() + } + } + + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_audit_log_for_seccomp_event() { + let audit_event_pid_field = "pid=2559058"; + + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559058 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + Some(53) + ); + // pid is wrong + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + // type is wrong + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + // no syscall field + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + } +} diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 4301afc3cc7ea6e736a351470b39019b74f5f16b..400b65bfe7d832123b8d77f1ffdad867293a6eb2 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -14,14 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Various things for testing other crates. -//! -//! N.B. This is not guarded with some feature flag. Overexposing items here may affect the final -//! artifact even for production builds. +//! Various utilities for testing. -pub use crate::worker_intf::{spawn_with_program_path, SpawnErr}; +pub use crate::{ + host::{EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}, + worker_intf::{spawn_with_program_path, SpawnErr}, +}; +use crate::get_worker_version; +use is_executable::IsExecutable; +use polkadot_node_primitives::NODE_VERSION; use polkadot_primitives::ExecutorParams; +use std::{ + path::PathBuf, + sync::{Mutex, OnceLock}, +}; /// A function that emulates the stitches together behaviors of the preparation and the execution /// worker in a single synchronous function. @@ -29,21 +36,92 @@ pub fn validate_candidate( code: &[u8], params: &[u8], ) -> Result, Box> { - use polkadot_node_core_pvf_execute_worker::Executor; - use polkadot_node_core_pvf_prepare_worker::{prepare, prevalidate}; + use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; + use polkadot_node_core_pvf_execute_worker::execute_artifact; let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) .expect("Decompressing code failed"); let blob = prevalidate(&code)?; - let compiled_artifact_blob = prepare(blob, &ExecutorParams::default())?; + let executor_params = ExecutorParams::default(); + let compiled_artifact_blob = prepare(blob, &executor_params)?; - let executor = Executor::new(ExecutorParams::default())?; let result = unsafe { // SAFETY: This is trivially safe since the artifact is obtained by calling `prepare` // and is written into a temporary directory in an unmodified state. - executor.execute(&compiled_artifact_blob, params)? + execute_artifact(&compiled_artifact_blob, &executor_params, params)? }; Ok(result) } + +/// Retrieves the worker paths and builds workers as needed. +/// +/// NOTE: This should only be called in dev code (tests, benchmarks) as it relies on the relative +/// paths of the built workers. +pub fn build_workers_and_get_paths(is_bench: bool) -> (PathBuf, PathBuf) { + // Only needs to be called once for the current process. + static WORKER_PATHS: OnceLock> = OnceLock::new(); + + fn build_workers(is_bench: bool) { + let mut build_args = vec![ + "build", + "--package=polkadot", + "--bin=polkadot-prepare-worker", + "--bin=polkadot-execute-worker", + ]; + if is_bench { + // Benches require --release. Regular tests are debug (no flag needed). + build_args.push("--release"); + } + let mut cargo = std::process::Command::new("cargo"); + let cmd = cargo + // wasm runtime not needed + .env("SKIP_WASM_BUILD", "1") + .args(build_args) + .stdout(std::process::Stdio::piped()); + + println!("INFO: calling `{cmd:?}`"); + let exit_status = cmd.status().expect("Failed to run the build program"); + + if !exit_status.success() { + eprintln!("ERROR: Failed to build workers: {}", exit_status.code().unwrap()); + std::process::exit(1); + } + } + + let mutex = WORKER_PATHS.get_or_init(|| { + let mut workers_path = std::env::current_exe().unwrap(); + workers_path.pop(); + workers_path.pop(); + let mut prepare_worker_path = workers_path.clone(); + prepare_worker_path.push(PREPARE_BINARY_NAME); + let mut execute_worker_path = workers_path.clone(); + execute_worker_path.push(EXECUTE_BINARY_NAME); + + // explain why a build happens + if !prepare_worker_path.is_executable() { + println!("WARN: Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); + } + if !execute_worker_path.is_executable() { + println!("WARN: Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); + } + if let Ok(ver) = get_worker_version(&prepare_worker_path) { + if ver != NODE_VERSION { + println!("WARN: Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); + } + } + if let Ok(ver) = get_worker_version(&execute_worker_path) { + if ver != NODE_VERSION { + println!("WARN: Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); + } + } + + build_workers(is_bench); + + Mutex::new((prepare_worker_path, execute_worker_path)) + }); + + let guard = mutex.lock().unwrap(); + (guard.0.clone(), guard.1.clone()) +} diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs index bd85d84055ce5e6136a97fac329ee20a39744137..9d6907c10929f1f7ee8ddb16c7330262a5a9beac 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_intf.rs @@ -71,6 +71,7 @@ pub async fn spawn_with_program_path( with_transient_socket_path(debug_id, |socket_path| { let socket_path = socket_path.to_owned(); + let worker_dir_path = worker_dir.path().to_owned(); async move { let listener = UnixListener::bind(&socket_path).map_err(|err| { @@ -91,7 +92,7 @@ pub async fn spawn_with_program_path( &program_path, &extra_args, &socket_path, - &worker_dir.path, + &worker_dir_path, security_status, ) .map_err(|err| { @@ -100,7 +101,7 @@ pub async fn spawn_with_program_path( %debug_id, ?program_path, ?extra_args, - ?worker_dir.path, + ?worker_dir_path, ?socket_path, "cannot spawn a worker: {:?}", err, @@ -108,7 +109,6 @@ pub async fn spawn_with_program_path( SpawnErr::ProcessSpawn })?; - let worker_dir_path = worker_dir.path.clone(); futures::select! { accept_result = listener.accept().fuse() => { let (stream, _) = accept_result.map_err(|err| { @@ -150,7 +150,42 @@ where F: FnOnce(&Path) -> Fut, Fut: futures::Future> + 'static, { - let socket_path = tmppath(&format!("pvf-host-{}", debug_id)) + /// Returns a path under [`std::env::temp_dir`]. The path name will start with the given prefix. + /// + /// There is only a certain number of retries. If exceeded this function will give up and return + /// an error. + pub async fn tmppath(prefix: &str) -> io::Result { + fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { + use rand::distributions::Alphanumeric; + + const DESCRIMINATOR_LEN: usize = 10; + + let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + buf.extend(prefix.as_bytes()); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + + let s = std::str::from_utf8(&buf) + .expect("the string is collected from a valid utf-8 sequence; qed"); + + let mut path = dir.to_owned(); + path.push(s); + path + } + + const NUM_RETRIES: usize = 50; + + let dir = std::env::temp_dir(); + for _ in 0..NUM_RETRIES { + let tmp_path = make_tmppath(prefix, &dir); + if !tmp_path.exists() { + return Ok(tmp_path) + } + } + + Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) + } + + let socket_path = tmppath(&format!("pvf-host-{}-", debug_id)) .await .map_err(|_| SpawnErr::TmpPath)?; let result = f(&socket_path).await; @@ -162,46 +197,6 @@ where result } -/// Returns a path under the given `dir`. The path name will start with the given prefix. -/// -/// There is only a certain number of retries. If exceeded this function will give up and return an -/// error. -pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result { - fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { - use rand::distributions::Alphanumeric; - - const DESCRIMINATOR_LEN: usize = 10; - - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); - buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); - - let s = std::str::from_utf8(&buf) - .expect("the string is collected from a valid utf-8 sequence; qed"); - - let mut path = dir.to_owned(); - path.push(s); - path - } - - const NUM_RETRIES: usize = 50; - - for _ in 0..NUM_RETRIES { - let tmp_path = make_tmppath(prefix, dir); - if !tmp_path.exists() { - return Ok(tmp_path) - } - } - - Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) -} - -/// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory. -pub async fn tmppath(prefix: &str) -> io::Result { - let temp_dir = PathBuf::from(std::env::temp_dir()); - tmppath_in(prefix, &temp_dir).await -} - /// A struct that represents an idle worker. /// /// This struct is supposed to be used as a token that is passed by move into a subroutine that @@ -224,8 +219,6 @@ pub struct IdleWorker { pub enum SpawnErr { /// Cannot obtain a temporary path location. TmpPath, - /// An FS error occurred. - Fs(String), /// Cannot bind the socket to the given path. Bind, /// An error happened during accepting a connection to the socket. @@ -245,7 +238,7 @@ pub enum SpawnErr { /// has been terminated. Since the worker is running in another process it is obviously not /// necessary to poll this future to make the worker run, it's only for termination detection. /// -/// This future relies on the fact that a child process's stdout `fd` is closed upon it's +/// This future relies on the fact that a child process's stdout `fd` is closed upon its /// termination. #[pin_project] pub struct WorkerHandle { @@ -270,6 +263,9 @@ impl WorkerHandle { if security_status.can_enable_landlock { args.push("--can-enable-landlock".to_string()); } + if security_status.can_enable_seccomp { + args.push("--can-enable-seccomp".to_string()); + } if security_status.can_unshare_user_namespace_and_change_root { args.push("--can-unshare-user-namespace-and-change-root".to_string()); } @@ -283,6 +279,7 @@ impl WorkerHandle { if let Ok(value) = std::env::var("RUST_LOG") { command.env("RUST_LOG", value); } + let mut child = command .args(extra_args) .arg("--socket-path") @@ -415,26 +412,22 @@ pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result /// ``` #[derive(Debug)] pub struct WorkerDir { - pub path: PathBuf, + tempdir: tempfile::TempDir, } impl WorkerDir { /// Creates a new, empty worker dir with a random name in the given cache dir. pub async fn new(debug_id: &'static str, cache_dir: &Path) -> Result { let prefix = format!("worker-dir-{}-", debug_id); - let path = tmppath_in(&prefix, cache_dir).await.map_err(|_| SpawnErr::TmpPath)?; - tokio::fs::create_dir(&path) - .await - .map_err(|err| SpawnErr::Fs(err.to_string()))?; - Ok(Self { path }) + let tempdir = tempfile::Builder::new() + .prefix(&prefix) + .tempdir_in(cache_dir) + .map_err(|_| SpawnErr::TmpPath)?; + Ok(Self { tempdir }) } -} -// Try to clean up the temporary worker dir at the end of the worker's lifetime. It should be wiped -// on startup, but we make a best effort not to leave it around. -impl Drop for WorkerDir { - fn drop(&mut self) { - let _ = std::fs::remove_dir_all(&self.path); + pub fn path(&self) -> &Path { + self.tempdir.path() } } @@ -449,7 +442,7 @@ impl Drop for WorkerDir { /// artifacts from previous jobs. pub fn clear_worker_dir_path(worker_dir_path: &Path) -> io::Result<()> { fn remove_dir_contents(path: &Path) -> io::Result<()> { - for entry in std::fs::read_dir(&path)? { + for entry in std::fs::read_dir(path)? { let entry = entry?; let path = entry.path(); diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index 8bdd09db208a92f5510f0c60856bab6a1c3103e0..9a7ddcb408909e6fcac2c812921174e81b7911bb 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +//! PVF host integration tests checking the chain production pipeline. + use super::TestHost; use adder::{hash_state, BlockData, HeadData}; use parity_scale_codec::{Decode, Encode}; @@ -28,7 +30,7 @@ async fn execute_good_block_on_parent() { let block_data = BlockData { state: 0, add: 512 }; - let host = TestHost::new(); + let host = TestHost::new().await; let ret = host .validate_candidate( @@ -56,7 +58,7 @@ async fn execute_good_chain_on_parent() { let mut parent_hash = [0; 32]; let mut last_state = 0; - let host = TestHost::new(); + let host = TestHost::new().await; for (number, add) in (0..10).enumerate() { let parent_head = @@ -98,7 +100,7 @@ async fn execute_bad_block_on_parent() { add: 256, }; - let host = TestHost::new(); + let host = TestHost::new().await; let _err = host .validate_candidate( @@ -117,7 +119,7 @@ async fn execute_bad_block_on_parent() { #[tokio::test] async fn stress_spawn() { - let host = std::sync::Arc::new(TestHost::new()); + let host = std::sync::Arc::new(TestHost::new().await); async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; @@ -149,9 +151,12 @@ async fn stress_spawn() { // With one worker, run multiple execution jobs serially. They should not conflict. #[tokio::test] async fn execute_can_run_serially() { - let host = std::sync::Arc::new(TestHost::new_with_config(|cfg| { - cfg.execute_workers_max_num = 1; - })); + let host = std::sync::Arc::new( + TestHost::new_with_config(|cfg| { + cfg.execute_workers_max_num = 1; + }) + .await, + ); async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index f699b5840d8faa7e0a51e859b735caed02f3e621..b53d598cd0f74fda710074d46769da1b7d4aef4e 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -14,27 +14,29 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#[cfg(feature = "ci-only-tests")] +//! General PVF host integration tests checking the functionality of the PVF host itself. + use assert_matches::assert_matches; use parity_scale_codec::Encode as _; +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ - start, Config, InvalidCandidate, Metrics, PrepareError, PrepareJobKind, PrepareStats, - PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, + PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; -use polkadot_primitives::ExecutorParams; - -#[cfg(feature = "ci-only-tests")] -use polkadot_primitives::ExecutorParam; +use polkadot_primitives::{ExecutorParam, ExecutorParams}; use std::time::Duration; use tokio::sync::Mutex; mod adder; +#[cfg(target_os = "linux")] +mod process; mod worker_common; -const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); -const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(3); +const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6); +const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(6); struct TestHost { cache_dir: tempfile::TempDir, @@ -42,21 +44,16 @@ struct TestHost { } impl TestHost { - fn new() -> Self { - Self::new_with_config(|_| ()) + async fn new() -> Self { + Self::new_with_config(|_| ()).await } - fn new_with_config(f: F) -> Self + async fn new_with_config(f: F) -> Self where F: FnOnce(&mut Config), { - let mut workers_path = std::env::current_exe().unwrap(); - workers_path.pop(); - workers_path.pop(); - let mut prepare_worker_path = workers_path.clone(); - prepare_worker_path.push("polkadot-prepare-worker"); - let mut execute_worker_path = workers_path.clone(); - execute_worker_path.push("polkadot-execute-worker"); + let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(false); + let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( cache_dir.path().to_owned(), @@ -65,7 +62,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()); + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = tokio::task::spawn(task); Self { cache_dir, host: Mutex::new(host) } } @@ -74,7 +71,7 @@ impl TestHost { &self, code: &[u8], executor_params: ExecutorParams, - ) -> Result { + ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) @@ -127,11 +124,35 @@ impl TestHost { .unwrap(); result_rx.await.unwrap() } + + #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] + async fn security_status(&self) -> SecurityStatus { + self.host.lock().await.security_status.clone() + } +} + +#[tokio::test] +async fn prepare_job_terminates_on_timeout() { + let host = TestHost::new().await; + + let start = std::time::Instant::now(); + let result = host + .precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()) + .await; + + match result { + Err(PrepareError::TimedOut) => {}, + r => panic!("{:?}", r), + } + + let duration = std::time::Instant::now().duration_since(start); + assert!(duration >= TEST_PREPARATION_TIMEOUT); + assert!(duration < TEST_PREPARATION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } #[tokio::test] -async fn terminates_on_timeout() { - let host = TestHost::new(); +async fn execute_job_terminates_on_timeout() { + let host = TestHost::new().await; let start = std::time::Instant::now(); let result = host @@ -148,7 +169,7 @@ async fn terminates_on_timeout() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } @@ -161,7 +182,7 @@ async fn terminates_on_timeout() { #[tokio::test] async fn ensure_parallel_execution() { // Run some jobs that do not complete, thus timing out. - let host = TestHost::new(); + let host = TestHost::new().await; let execute_pvf_future_1 = host.validate_candidate( halt::wasm_binary_unwrap(), ValidationParams { @@ -188,8 +209,8 @@ async fn ensure_parallel_execution() { assert_matches!( (res1, res2), ( - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) ) ); @@ -208,7 +229,8 @@ async fn ensure_parallel_execution() { async fn execute_queue_doesnt_stall_if_workers_died() { let host = TestHost::new_with_config(|cfg| { cfg.execute_workers_max_num = 5; - }); + }) + .await; // Here we spawn 8 validation jobs for the `halt` PVF and share those between 5 workers. The // first five jobs should timeout and the workers killed. For the next 3 jobs a new batch of @@ -245,7 +267,8 @@ async fn execute_queue_doesnt_stall_if_workers_died() { async fn execute_queue_doesnt_stall_with_varying_executor_params() { let host = TestHost::new_with_config(|cfg| { cfg.execute_workers_max_num = 2; - }); + }) + .await; let executor_params_1 = ExecutorParams::default(); let executor_params_2 = ExecutorParams::from(&[ExecutorParam::StackLogicalMax(1024)][..]); @@ -293,28 +316,12 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { // Test that deleting a prepared artifact does not lead to a dispute when we try to execute it. #[tokio::test] async fn deleting_prepared_artifact_does_not_dispute() { - let host = TestHost::new(); + let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); - let result = host - .validate_candidate( - halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ) - .await; - - match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, - r => panic!("{:?}", r), - } + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); - // Delete the prepared artifact. + // Manually delete the prepared artifact from disk. The in-memory artifacts table won't change. { // Get the artifact path (asserting it exists). let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); @@ -329,7 +336,7 @@ async fn deleting_prepared_artifact_does_not_dispute() { std::fs::remove_file(artifact_path.path()).unwrap(); } - // Try to validate again, artifact should get recreated. + // Try to validate, artifact should get recreated. let result = host .validate_candidate( halt::wasm_binary_unwrap(), @@ -344,17 +351,55 @@ async fn deleting_prepared_artifact_does_not_dispute() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } } +// This test checks if the adder parachain runtime can be prepared with 10Mb preparation memory +// limit enforced. At the moment of writing, the limit if far enough to prepare the PVF. If it +// starts failing, either Wasmtime version has changed, or the PVF code itself has changed, and +// more memory is required now. Multi-threaded preparation, if ever enabled, may also affect +// memory consumption. +#[tokio::test] +async fn prechecking_within_memory_limits() { + let host = TestHost::new().await; + let result = host + .precheck_pvf( + ::adder::wasm_binary_unwrap(), + ExecutorParams::from(&[ExecutorParam::PrecheckingMaxMemory(10 * 1024 * 1024)][..]), + ) + .await; + + assert_matches!(result, Ok(_)); +} + +// This test checks if the adder parachain runtime can be prepared with 512Kb preparation memory +// limit enforced. At the moment of writing, the limit if not enough to prepare the PVF, and the +// preparation is supposed to generate an error. If the test starts failing, either Wasmtime +// version has changed, or the PVF code itself has changed, and less memory is required now. +#[tokio::test] +async fn prechecking_out_of_memory() { + use polkadot_node_core_pvf::PrepareError; + + let host = TestHost::new().await; + let result = host + .precheck_pvf( + ::adder::wasm_binary_unwrap(), + ExecutorParams::from(&[ExecutorParam::PrecheckingMaxMemory(512 * 1024)][..]), + ) + .await; + + assert_matches!(result, Err(PrepareError::OutOfMemory)); +} + // With one worker, run multiple preparation jobs serially. They should not conflict. #[tokio::test] async fn prepare_can_run_serially() { let host = TestHost::new_with_config(|cfg| { cfg.prepare_workers_hard_max_num = 1; - }); + }) + .await; let _stats = host .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) @@ -364,3 +409,28 @@ async fn prepare_can_run_serially() { // Prepare a different wasm blob to prevent skipping work. let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); } + +// CI machines should be able to enable all the security features. +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +#[tokio::test] +async fn all_security_features_work() { + // Landlock is only available starting Linux 5.13, and we may be testing on an old kernel. + let sysinfo = sc_sysinfo::gather_sysinfo(); + // The version will look something like "5.15.0-87-generic". + let version = sysinfo.linux_kernel.unwrap(); + let version_split: Vec<&str> = version.split(".").collect(); + let major: u32 = version_split[0].parse().unwrap(); + let minor: u32 = version_split[1].parse().unwrap(); + let can_enable_landlock = if major >= 6 { true } else { minor >= 13 }; + + let host = TestHost::new().await; + + assert_eq!( + host.security_status().await, + SecurityStatus { + can_enable_landlock, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: true, + } + ); +} diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs new file mode 100644 index 0000000000000000000000000000000000000000..b742acb15d028caf64f2e4c4d147fa31307e6e8d --- /dev/null +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -0,0 +1,385 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test unexpected behaviors of the spawned processes. We test both worker processes (directly +//! spawned by the host) and job processes (spawned by the workers to securely perform PVF jobs). + +use super::TestHost; +use assert_matches::assert_matches; +use polkadot_node_core_pvf::{ + InvalidCandidate, PossiblyInvalidError, PrepareError, ValidationError, +}; +use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams}; +use procfs::process; +use rusty_fork::rusty_fork_test; +use std::time::Duration; + +const PREPARE_PROCESS_NAME: &'static str = "polkadot-prepare-worker"; +const EXECUTE_PROCESS_NAME: &'static str = "polkadot-execute-worker"; + +const SIGNAL_KILL: i32 = 9; +const SIGNAL_STOP: i32 = 19; + +fn send_signal_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, + signal: i32, +) { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + assert_eq!(unsafe { libc::kill(process.pid(), signal) }, 0); +} +fn get_num_threads_by_sid_and_name(sid: i32, exe_name: &'static str, is_direct_child: bool) -> i64 { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + process.stat().unwrap().num_threads +} + +fn find_process_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, +) -> process::Process { + let all_processes: Vec = process::all_processes() + .expect("Can't read /proc") + .filter_map(|p| match p { + Ok(p) => Some(p), // happy path + Err(e) => match e { + // process vanished during iteration, ignore it + procfs::ProcError::NotFound(_) => None, + x => { + panic!("some unknown error: {}", x); + }, + }, + }) + .collect(); + + let mut found = None; + for process in all_processes { + let stat = process.stat().unwrap(); + + if stat.session != sid || !process.exe().unwrap().to_str().unwrap().contains(exe_name) { + continue + } + // The workers are direct children of the current process, the worker job processes are not + // (they are children of the workers). + let process_is_direct_child = stat.ppid as u32 == std::process::id(); + if is_direct_child != process_is_direct_child { + continue + } + + if found.is_some() { + panic!("Found more than one process") + } + found = Some(process); + } + found.expect("Should have found the expected process") +} + +// Run these tests in their own processes with rusty-fork. They work by each creating a new session, +// then doing something with the child process that matches the session ID and expected process +// name. +rusty_fork_test! { + // What happens when the prepare worker (not the job) times out? + #[test] + fn prepare_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!(result, Err(PrepareError::TimedOut)); + }) + } + + // What happens when the execute worker (not the job) times out? + #[test] + fn execute_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!( + result, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) + ); + }) + } + + // What happens when the prepare worker dies in the middle of a job? + #[test] + fn prepare_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!(result, Err(PrepareError::IoErr(_))); + }) + } + + // What happens when the execute worker dies in the middle of a job? + #[test] + fn execute_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) + ); + }) + } + + // What happens when the forked prepare job dies in the middle of its job? + #[test] + fn forked_prepare_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(PrepareError::JobDied{ err, job_pid: _ }) if err == "received signal: SIGKILL" + ); + }) + } + + // What happens when the forked execute job dies in the middle of its job? + #[test] + fn forked_execute_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) + if err == "received signal: SIGKILL" + ); + }) + } + + // Ensure that the spawned prepare worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_prepare_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, CPU time + // monitor, and memory tracking. + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false), + 4 + ); + + // End the test. + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } + + // Ensure that the spawned execute worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_execute_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that tests the thread count while the worker is running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, and CPU + // time monitor. + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false), + 3 + ); + + // End the test. + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } +} diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index 5379d29556c76aff3a489d7d0a967d4b6a503637..0d33af7e096cdcb4df8432498adde3a925ff1030 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -15,27 +15,21 @@ // along with Polkadot. If not, see . use polkadot_node_core_pvf::{ - testing::{spawn_with_program_path, SpawnErr}, + testing::{build_workers_and_get_paths, spawn_with_program_path, SpawnErr}, SecurityStatus, }; use std::{env, time::Duration}; -fn worker_path(name: &str) -> std::path::PathBuf { - let mut worker_path = std::env::current_exe().unwrap(); - worker_path.pop(); - worker_path.pop(); - worker_path.push(name); - worker_path -} - // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { + let (prepare_worker_path, _) = build_workers_and_get_paths(false); + // There's no explicit `exit` subcommand in the worker; it will panic on an unknown // subcommand anyway let result = spawn_with_program_path( "integration-test", - worker_path("polkadot-prepare-worker"), + prepare_worker_path, &env::temp_dir(), &["exit"], Duration::from_secs(2), @@ -47,9 +41,11 @@ async fn spawn_immediate_exit() { #[tokio::test] async fn spawn_timeout() { + let (_, execute_worker_path) = build_workers_and_get_paths(false); + let result = spawn_with_program_path( "integration-test", - worker_path("polkadot-execute-worker"), + execute_worker_path, &env::temp_dir(), &["test-sleep"], Duration::from_secs(2), @@ -61,9 +57,11 @@ async fn spawn_timeout() { #[tokio::test] async fn should_connect() { + let (prepare_worker_path, _) = build_workers_and_get_paths(false); + let _ = spawn_with_program_path( "integration-test", - worker_path("polkadot-prepare-worker"), + prepare_worker_path, &env::temp_dir(), &["prepare-worker"], Duration::from_secs(2), diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index b16a501686d3503ae476e807d859f58bdf88fbb5..f324f1e79c46b7cae80f3597dc6e7b5dcecbe637 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-runtime-api" version = "1.0.0" +description="Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 6cf7fa744d3f917ac999545725b0e4f67aa68589..8a7a3dc08b8173e7029158d3a0ca6030d898e3f7 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,7 +20,7 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + async_backing, slashing, vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, @@ -64,8 +64,10 @@ pub(crate) struct RequestResultCache { unapplied_slashes: LruMap>, key_ownership_proof: LruMap<(Hash, ValidatorId), Option>, minimum_backing_votes: LruMap, + disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, + node_features: LruMap, } impl Default for RequestResultCache { @@ -96,8 +98,10 @@ impl Default for RequestResultCache { unapplied_slashes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + disabled_validators: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -444,6 +448,36 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } + pub(crate) fn node_features( + &mut self, + session_index: SessionIndex, + ) -> Option<&vstaging::NodeFeatures> { + self.node_features.get(&session_index).map(|f| &*f) + } + + pub(crate) fn cache_node_features( + &mut self, + session_index: SessionIndex, + features: vstaging::NodeFeatures, + ) { + self.node_features.insert(session_index, features); + } + + pub(crate) fn disabled_validators( + &mut self, + relay_parent: &Hash, + ) -> Option<&Vec> { + self.disabled_validators.get(relay_parent).map(|v| &*v) + } + + pub(crate) fn cache_disabled_validators( + &mut self, + relay_parent: Hash, + disabled_validators: Vec, + ) { + self.disabled_validators.insert(relay_parent, disabled_validators); + } + pub(crate) fn para_backing_state( &mut self, key: (Hash, ParaId), @@ -520,6 +554,8 @@ pub(crate) enum RequestResult { slashing::OpaqueKeyOwnershipProof, Option<()>, ), + DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), + NodeFeatures(SessionIndex, vstaging::NodeFeatures), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 1b18941e54645d57d51af049888037f8d6fd994f..8689355c4139af263d393cdd46d62f17abde66c1 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -166,11 +166,15 @@ where .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), SubmitReportDisputeLost(_, _, _, _) => {}, + DisabledValidators(relay_parent, disabled_validators) => + self.requests_cache.cache_disabled_validators(relay_parent, disabled_validators), ParaBackingState(relay_parent, para_id, constraints) => self .requests_cache .cache_para_backing_state((relay_parent, para_id), constraints), AsyncBackingParams(relay_parent, params) => self.requests_cache.cache_async_backing_params(relay_parent, params), + NodeFeatures(session_index, params) => + self.requests_cache.cache_node_features(session_index, params), } } @@ -296,6 +300,8 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), + Request::DisabledValidators(sender) => query!(disabled_validators(), sender) + .map(|sender| Request::DisabledValidators(sender)), Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) .map(|sender| Request::ParaBackingState(para, sender)), Request::AsyncBackingParams(sender) => query!(async_backing_params(), sender) @@ -309,6 +315,15 @@ where Some(Request::MinimumBackingVotes(index, sender)) } }, + Request::NodeFeatures(index, sender) => { + if let Some(value) = self.requests_cache.node_features(index) { + self.metrics.on_cached_request(); + let _ = sender.send(Ok(value.clone())); + None + } else { + Some(Request::NodeFeatures(index, sender)) + } + }, } } @@ -404,6 +419,9 @@ where macro_rules! query { ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr) => {{ + query!($req_variant, $api_name($($param),*), ver = $version, $sender, result = ( relay_parent $(, $param )* ) ) + }}; + ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await @@ -437,7 +455,7 @@ where metrics.on_request(res.is_ok()); let _ = sender.send(res.clone()); - res.ok().map(|res| RequestResult::$req_variant(relay_parent, $( $param, )* res)) + res.ok().map(|res| RequestResult::$req_variant($( $results, )* res)) }} } @@ -565,6 +583,12 @@ where ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, sender ), + Request::DisabledValidators(sender) => query!( + DisabledValidators, + disabled_validators(), + ver = Request::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT, + sender + ), Request::ParaBackingState(para, sender) => { query!( ParaBackingState, @@ -581,5 +605,12 @@ where sender ) }, + Request::NodeFeatures(index, sender) => query!( + NodeFeatures, + node_features(), + ver = Request::NODE_FEATURES_RUNTIME_REQUIREMENT, + sender, + result = (index) + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index fb97139a802873aa928df21263e5f946f9080acd..b939bffb0e7f8c896c35f51d75ffa9e43e60df79 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,12 +20,12 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -268,6 +268,14 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn minimum_backing_votes(&self, _: Hash, _: SessionIndex) -> Result { todo!("Not required for tests") } + + async fn node_features(&self, _: Hash) -> Result { + todo!("Not required for tests") + } + + async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { + todo!("Not required for tests") + } } #[test] diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 01ed34f7a7309e01dd7a836984ddc0871c2f894c..acee9efd0e098d2e9c31c3ce250ca06484ae5b38 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -9,6 +9,5 @@ description = "Stick logs together with the TraceID as provided by tempo" [dependencies] coarsetime = "0.1.22" tracing = "0.1.35" -jaeger = { package = "polkadot-node-jaeger" , path = "../jaeger" } -gum-proc-macro = { package = "tracing-gum-proc-macro" , path = "proc-macro" } +gum-proc-macro = { package = "tracing-gum-proc-macro", path = "proc-macro" } polkadot-primitives = { path = "../../primitives", features = ["std"] } diff --git a/polkadot/node/gum/src/lib.rs b/polkadot/node/gum/src/lib.rs index 1cc4d8dec1cbf7ad477e50edf4f57ac92de3c894..dad5887af224382ec384b4d354fd74fb2d9c31f5 100644 --- a/polkadot/node/gum/src/lib.rs +++ b/polkadot/node/gum/src/lib.rs @@ -105,8 +105,23 @@ pub use tracing::{enabled, event, Level}; -#[doc(hidden)] -pub use jaeger::hash_to_trace_identifier; +// jaeger dependency + +/// Alias for the 16 byte unique identifier used with jaeger. +pub(crate) type TraceIdentifier = u128; + +/// A helper to convert the hash to the fixed size representation +/// needed for jaeger. +#[inline] +pub fn hash_to_trace_identifier(hash: Hash) -> TraceIdentifier { + let mut buf = [0u8; 16]; + buf.copy_from_slice(&hash.as_ref()[0..16]); + // The slice bytes are copied in reading order, so if interpreted + // in string form by a human, that means lower indices have higher + // values and hence corresponds to BIG endian ordering of the individual + // bytes. + u128::from_be_bytes(buf) as TraceIdentifier +} #[doc(hidden)] pub use polkadot_primitives::{CandidateHash, Hash}; diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 9ce725f1682211d9b98166e50438e643c942d336..f52f0cc0282f8286a717f0aeae1f283c9e93db14 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -48,7 +48,7 @@ erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } rand = "0.8.5" # Required for worker binaries to build. -polkadot-node-core-pvf-common = { path = "../core/pvf/common", features = ["test-utils"] } +polkadot-node-core-pvf-common = { path = "../core/pvf/common" } polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 365b2f16ac21b67480b2dfc1c49b6092511c998d..20b6654638e7d389d6062a728597386b8f23b5ed 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, - PvfExecTimeoutKind, + PvfExecKind, }; use futures::channel::oneshot; @@ -90,10 +90,10 @@ impl FakeCandidateValidation { } } - fn should_misbehave(&self, timeout: PvfExecTimeoutKind) -> bool { + fn should_misbehave(&self, timeout: PvfExecKind) -> bool { match timeout { - PvfExecTimeoutKind::Backing => self.includes_backing(), - PvfExecTimeoutKind::Approval => self.includes_approval(), + PvfExecKind::Backing => self.includes_backing(), + PvfExecKind::Approval => self.includes_approval(), } } } @@ -273,30 +273,31 @@ where // Message sent by the approval voting subsystem FromOrchestra::Communication { msg: - CandidateValidationMessage::ValidateFromExhaustive( + CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_kind, + response_sender, + .. + }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(timeout) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_kind, + response_sender, + }, }) } // Create the fake response with probability `p` if the `PoV` is malicious, @@ -313,7 +314,7 @@ where create_validation_response( validation_data, candidate_receipt.descriptor, - sender, + response_sender, ); None }, @@ -326,20 +327,20 @@ where ); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_kind, + response_sender, + }, }) }, } }, - x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -358,7 +359,7 @@ where // We're not even checking the candidate, this makes us appear // faster than honest validators. - sender.send(Ok(validation_result)).unwrap(); + response_sender.send(Ok(validation_result)).unwrap(); None }, false => { @@ -366,56 +367,57 @@ where gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_kind, + response_sender, + }, }) }, } }, // Handle FakeCandidateValidation::Disabled _ => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_kind, + response_sender, + }, }), } }, // Behaviour related to the backing subsystem FromOrchestra::Communication { msg: - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ), + .. + }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(timeout) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ), + }, }) } // If the `PoV` is malicious, back the candidate with some probability `p`, @@ -439,17 +441,17 @@ where // If the `PoV` is malicious, we behave normally with some probability // `(1-p)` false => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ), + }, }), } }, - x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Maliciously set the validation result to invalid for a valid candidate // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -473,25 +475,25 @@ where gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ), + }, }) }, } }, _ => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_kind, response_sender, - ), + }, }), } }, diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index e19a1b83a62fe33b0db6989a3abadd18d768a29a..f8a7cc15f87ee8354cc6bc464d42a7867e0d5c74 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-approval-distribution" version = "1.0.0" +description = "Polkadot Approval Distribution subsystem for the distribution of assignments and approvals for approval checks on candidates over the network." authors.workspace = true edition.workspace = true license.workspace = true @@ -14,10 +15,12 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-jaeger = { path = "../../jaeger" } rand = "0.8" +itertools = "0.10.5" futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } [dev-dependencies] sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index f76826d7fdf42a5366898cbdba11e221256c8c0e..47482eef764096427a1a227810b386491ba49f11 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -16,21 +16,30 @@ //! [`ApprovalDistribution`] implementation. //! -//! +//! See the documentation on [approval distribution][approval-distribution-page] in the +//! implementers' guide. +//! +//! [approval-distribution-page]: https://paritytech.github.io/polkadot-sdk/book/node/approval/approval-distribution.html #![warn(missing_docs)] +use self::metrics::Metrics; use futures::{channel::oneshot, select, FutureExt as _}; +use itertools::Itertools; +use net_protocol::peer_set::{ProtocolVersion, ValidationVersion}; use polkadot_node_jaeger as jaeger; use polkadot_node_network_protocol::{ - self as net_protocol, + self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, - peer_set::{ValidationVersion, MAX_NOTIFICATION_SIZE}, - v1 as protocol_v1, v2 as protocol_v2, PeerId, UnifiedReputationChange as Rep, Versioned, - VersionedValidationProtocol, View, + peer_set::MAX_NOTIFICATION_SIZE, + v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::approval::{ - AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + v1::{ + AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + }, + v2::{AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2}, }; use polkadot_node_subsystem::{ messages::{ @@ -49,8 +58,6 @@ use std::{ time::Duration, }; -use self::metrics::Metrics; - mod metrics; #[cfg(test)] @@ -64,11 +71,15 @@ const COST_DUPLICATE_MESSAGE: Rep = Rep::CostMinorRepeated("Peer sent identical const COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE: Rep = Rep::CostMinor("The vote was valid but too far in the future"); const COST_INVALID_MESSAGE: Rep = Rep::CostMajor("The vote was bad"); +const COST_OVERSIZED_BITFIELD: Rep = Rep::CostMajor("Oversized certificate or candidate bitfield"); const BENEFIT_VALID_MESSAGE: Rep = Rep::BenefitMinor("Peer sent a valid message"); const BENEFIT_VALID_MESSAGE_FIRST: Rep = Rep::BenefitMinorFirst("Valid message with new information"); +// Maximum valid size for the `CandidateBitfield` in the assignment messages. +const MAX_BITFIELD_SIZE: usize = 500; + /// The Approval Distribution subsystem. pub struct ApprovalDistribution { metrics: Metrics, @@ -97,7 +108,144 @@ impl RecentlyOutdated { } } -// In case the original gtid topology mechanisms don't work on their own, we need to trade bandwidth +// Contains topology routing information for assignments and approvals. +struct ApprovalRouting { + required_routing: RequiredRouting, + local: bool, + random_routing: RandomRouting, +} + +// This struct is responsible for tracking the full state of an assignment and grid routing +// information. +struct ApprovalEntry { + // The assignment certificate. + assignment: IndirectAssignmentCertV2, + // The candidates claimed by the certificate. A mapping between bit index and candidate index. + candidates: CandidateBitfield, + // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. + approvals: HashMap, + // The validator index of the assignment signer. + validator_index: ValidatorIndex, + // Information required for gossiping to other peers using the grid topology. + routing_info: ApprovalRouting, +} + +#[derive(Debug)] +enum ApprovalEntryError { + InvalidValidatorIndex, + CandidateIndexOutOfBounds, + InvalidCandidateIndex, + DuplicateApproval, +} + +impl ApprovalEntry { + pub fn new( + assignment: IndirectAssignmentCertV2, + candidates: CandidateBitfield, + routing_info: ApprovalRouting, + ) -> ApprovalEntry { + Self { + validator_index: assignment.validator, + assignment, + approvals: HashMap::with_capacity(candidates.len()), + candidates, + routing_info, + } + } + + // Create a `MessageSubject` to reference the assignment. + pub fn create_assignment_knowledge(&self, block_hash: Hash) -> (MessageSubject, MessageKind) { + ( + MessageSubject(block_hash, self.candidates.clone(), self.validator_index), + MessageKind::Assignment, + ) + } + + // Create a `MessageSubject` to reference the approval. + pub fn create_approval_knowledge( + &self, + block_hash: Hash, + candidate_index: CandidateIndex, + ) -> (MessageSubject, MessageKind) { + ( + MessageSubject(block_hash, candidate_index.into(), self.validator_index), + MessageKind::Approval, + ) + } + + // Updates routing information and returns the previous information if any. + pub fn routing_info_mut(&mut self) -> &mut ApprovalRouting { + &mut self.routing_info + } + + // Get the routing information. + pub fn routing_info(&self) -> &ApprovalRouting { + &self.routing_info + } + + // Update routing information. + pub fn update_required_routing(&mut self, required_routing: RequiredRouting) { + self.routing_info.required_routing = required_routing; + } + + // Records a new approval. Returns error if the claimed candidate is not found or we already + // have received the approval. + pub fn note_approval( + &mut self, + approval: IndirectSignedApprovalVote, + ) -> Result<(), ApprovalEntryError> { + // First do some sanity checks: + // - check validator index matches + // - check claimed candidate + // - check for duplicate approval + if self.validator_index != approval.validator { + return Err(ApprovalEntryError::InvalidValidatorIndex) + } + + if self.candidates.len() <= approval.candidate_index as usize { + return Err(ApprovalEntryError::CandidateIndexOutOfBounds) + } + + if !self.candidates.bit_at(approval.candidate_index.as_bit_index()) { + return Err(ApprovalEntryError::InvalidCandidateIndex) + } + + if self.approvals.contains_key(&approval.candidate_index) { + return Err(ApprovalEntryError::DuplicateApproval) + } + + self.approvals.insert(approval.candidate_index, approval); + Ok(()) + } + + // Get the assignment certiticate and claimed candidates. + pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { + (self.assignment.clone(), self.candidates.clone()) + } + + // Get all approvals for all candidates claimed by the assignment. + pub fn approvals(&self) -> Vec { + self.approvals.values().cloned().collect::>() + } + + // Get the approval for a specific candidate index. + pub fn approval(&self, candidate_index: CandidateIndex) -> Option { + self.approvals.get(&candidate_index).cloned() + } + + // Get validator index. + pub fn validator_index(&self) -> ValidatorIndex { + self.validator_index + } +} + +// We keep track of each peer view and protocol version using this struct. +struct PeerEntry { + pub view: View, + pub version: ProtocolVersion, +} + +// In case the original grid topology mechanisms don't work on their own, we need to trade bandwidth // for protocol liveliness by introducing aggression. // // Aggression has 3 levels: @@ -117,7 +265,6 @@ impl RecentlyOutdated { // not to all blocks older than the threshold. Most likely, a few assignments struggle to // be propagated in a single block and this holds up all of its descendants blocks. // Accordingly, we only step on the gas for the block which is most obviously holding up finality. - /// Aggression configuration representation #[derive(Clone)] struct AggressionConfig { @@ -160,15 +307,6 @@ enum Resend { No, } -/// Data stored on a per-peer basis. -#[derive(Debug)] -struct PeerData { - /// The peer's view. - view: View, - /// The peer's protocol version. - version: ValidationVersion, -} - /// The [`State`] struct is responsible for tracking the overall state of the subsystem. /// /// It tracks metadata about our view of the unfinalized chain, @@ -189,7 +327,7 @@ struct State { pending_known: HashMap>, /// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s - peer_data: HashMap, + peer_views: HashMap, /// Keeps a topology for various different sessions. topologies: SessionGridTopologies, @@ -197,12 +335,12 @@ struct State { /// Tracks recently finalized blocks. recent_outdated_blocks: RecentlyOutdated, - /// Config for aggression. - aggression_config: AggressionConfig, - - /// `HashMap` from active leaves to spans + /// HashMap from active leaves to spans spans: HashMap, + /// Aggression configuration. + aggression_config: AggressionConfig, + /// Current approval checking finality lag. approval_checking_lag: BlockNumber, @@ -216,8 +354,11 @@ enum MessageKind { Approval, } +// Utility structure to identify assignments and approvals for specific candidates. +// Assignments can span multiple candidates, while approvals refer to only one candidate. +// #[derive(Debug, Clone, Hash, PartialEq, Eq)] -struct MessageSubject(Hash, CandidateIndex, ValidatorIndex); +struct MessageSubject(Hash, pub CandidateBitfield, ValidatorIndex); #[derive(Debug, Clone, Default)] struct Knowledge { @@ -238,9 +379,11 @@ impl Knowledge { } fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { - match self.known_messages.entry(message) { + let mut success = match self.known_messages.entry(message.clone()) { hash_map::Entry::Vacant(vacant) => { vacant.insert(kind); + // If there are multiple candidates assigned in the message, create + // separate entries for each one. true }, hash_map::Entry::Occupied(mut occupied) => match (*occupied.get(), kind) { @@ -252,7 +395,25 @@ impl Knowledge { true }, }, + }; + + // In case of succesful insertion of multiple candidate assignments create additional + // entries for each assigned candidate. This fakes knowledge of individual assignments, but + // we need to share the same `MessageSubject` with the followup approval candidate index. + if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { + for candidate_index in message.1.iter_ones() { + success = success && + self.insert( + MessageSubject( + message.0, + vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), + message.2, + ), + kind, + ); + } } + success } } @@ -286,47 +447,97 @@ struct BlockEntry { candidates: Vec, /// The session index of this block. session: SessionIndex, + /// Approval entries for whole block. These also contain all approvals in the case of multiple + /// candidates being claimed by assignments. + approval_entries: HashMap<(ValidatorIndex, CandidateBitfield), ApprovalEntry>, } -#[derive(Debug)] -enum ApprovalState { - Assigned(AssignmentCert), - Approved(AssignmentCert, ValidatorSignature), -} +impl BlockEntry { + // Returns the peer which currently know this block. + pub fn known_by(&self) -> Vec { + self.known_by.keys().cloned().collect::>() + } -impl ApprovalState { - fn assignment_cert(&self) -> &AssignmentCert { - match *self { - ApprovalState::Assigned(ref cert) => cert, - ApprovalState::Approved(ref cert, _) => cert, + pub fn insert_approval_entry(&mut self, entry: ApprovalEntry) -> &mut ApprovalEntry { + // First map one entry per candidate to the same key we will use in `approval_entries`. + // Key is (Validator_index, CandidateBitfield) that links the `ApprovalEntry` to the (K,V) + // entry in `candidate_entry.messages`. + for claimed_candidate_index in entry.candidates.iter_ones() { + match self.candidates.get_mut(claimed_candidate_index) { + Some(candidate_entry) => { + candidate_entry + .messages + .entry(entry.validator_index()) + .or_insert(entry.candidates.clone()); + }, + None => { + // This should never happen, but if it happens, it means the subsystem is + // broken. + gum::warn!( + target: LOG_TARGET, + hash = ?entry.assignment.block_hash, + ?claimed_candidate_index, + "Missing candidate entry on `import_and_circulate_assignment`", + ); + }, + }; } + + self.approval_entries + .entry((entry.validator_index, entry.candidates.clone())) + .or_insert(entry) } - fn approval_signature(&self) -> Option { - match *self { - ApprovalState::Assigned(_) => None, - ApprovalState::Approved(_, ref sig) => Some(sig.clone()), - } + // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator + // `validator_index`. + pub fn approval_entry( + &mut self, + candidate_index: CandidateIndex, + validator_index: ValidatorIndex, + ) -> Option<&mut ApprovalEntry> { + self.candidates + .get(candidate_index as usize) + .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) + .map_or(None, |candidate_indices| { + self.approval_entries.get_mut(&(validator_index, candidate_indices.clone())) + }) } -} -// routing state bundled with messages for the candidate. Corresponding assignments -// and approvals are stored together and should be routed in the same way, with -// assignments preceding approvals in all cases. -#[derive(Debug)] -struct MessageState { - required_routing: RequiredRouting, - local: bool, - random_routing: RandomRouting, - approval_state: ApprovalState, + // Get all approval entries for a given candidate. + pub fn approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { + // Get the keys for fetching `ApprovalEntry` from `self.approval_entries`, + let approval_entry_keys = self + .candidates + .get(candidate_index as usize) + .map(|candidate_entry| &candidate_entry.messages); + + if let Some(approval_entry_keys) = approval_entry_keys { + // Ensure no duplicates. + let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); + + let mut entries = Vec::new(); + for (validator_index, candidate_indices) in approval_entry_keys { + if let Some(entry) = + self.approval_entries.get(&(*validator_index, candidate_indices.clone())) + { + entries.push(entry); + } + } + entries + } else { + vec![] + } + } } -/// Information about candidates in the context of a particular block they are included in. -/// In other words, multiple `CandidateEntry`s may exist for the same candidate, -/// if it is included by multiple blocks - this is likely the case when there are forks. +// Information about candidates in the context of a particular block they are included in. +// In other words, multiple `CandidateEntry`s may exist for the same candidate, +// if it is included by multiple blocks - this is likely the case when there are forks. #[derive(Debug, Default)] struct CandidateEntry { - messages: HashMap, + // The value represents part of the lookup key in `approval_entries` to fetch the assignment + // and existing votes. + messages: HashMap, } #[derive(Debug, Clone, PartialEq)] @@ -345,7 +556,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCert, CandidateIndex), + Assignment(IndirectAssignmentCertV2, CandidateBitfield), Approval(IndirectSignedApprovalVote), } @@ -362,27 +573,13 @@ impl State { NetworkBridgeEvent::PeerConnected(peer_id, role, version, _) => { // insert a blank view if none already present gum::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected"); - let version = match ValidationVersion::try_from(version).ok() { - Some(v) => v, - None => { - // sanity: network bridge is supposed to detect this already. - gum::error!( - target: LOG_TARGET, - ?peer_id, - ?version, - "Unsupported protocol version" - ); - return - }, - }; - - self.peer_data + self.peer_views .entry(peer_id) - .or_insert_with(|| PeerData { version, view: Default::default() }); + .or_insert(PeerEntry { view: Default::default(), version }); }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected"); - self.peer_data.remove(&peer_id); + self.peer_views.remove(&peer_id); self.blocks.iter_mut().for_each(|(_hash, entry)| { entry.known_by.remove(&peer_id); }) @@ -419,8 +616,8 @@ impl State { live }); }, - NetworkBridgeEvent::PeerMessage(peer_id, msg) => { - self.process_incoming_peer_message(ctx, metrics, peer_id, msg, rng).await; + NetworkBridgeEvent::PeerMessage(peer_id, message) => { + self.process_incoming_peer_message(ctx, metrics, peer_id, message, rng).await; }, NetworkBridgeEvent::UpdatedAuthorityIds { .. } => { // The approval-distribution subsystem doesn't deal with `AuthorityDiscoveryId`s. @@ -459,6 +656,7 @@ impl State { knowledge: Knowledge::default(), candidates, session: meta.session, + approval_entries: HashMap::new(), }); self.topologies.inc_session_refs(meta.session); @@ -481,18 +679,17 @@ impl State { { let sender = ctx.sender(); - for (peer_id, data) in self.peer_data.iter() { - let intersection = data.view.iter().filter(|h| new_hashes.contains(h)); - let view_intersection = - View::new(intersection.cloned(), data.view.finalized_number); + for (peer_id, PeerEntry { view, version }) in self.peer_views.iter() { + let intersection = view.iter().filter(|h| new_hashes.contains(h)); + let view_intersection = View::new(intersection.cloned(), view.finalized_number); Self::unify_with_peer( sender, metrics, &mut self.blocks, &self.topologies, - self.peer_data.len(), + self.peer_views.len(), *peer_id, - data.version, + *version, view_intersection, rng, ) @@ -530,13 +727,13 @@ impl State { for (peer_id, message) in to_import { match message { - PendingMessage::Assignment(assignment, claimed_index) => { + PendingMessage::Assignment(assignment, claimed_indices) => { self.import_and_circulate_assignment( ctx, metrics, MessageSource::Peer(peer_id), assignment, - claimed_index, + claimed_indices, rng, ) .await; @@ -575,32 +772,100 @@ impl State { adjust_required_routing_and_propagate( ctx, - &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| block_entry.session == session, |required_routing, local, validator_index| { - if *required_routing == RequiredRouting::PendingTopology { - *required_routing = topology + if required_routing == &RequiredRouting::PendingTopology { + topology .local_grid_neighbors() - .required_routing_by_index(*validator_index, local); + .required_routing_by_index(*validator_index, local) + } else { + *required_routing } }, + &self.peer_views, ) .await; } + async fn process_incoming_assignments( + &mut self, + ctx: &mut Context, + metrics: &Metrics, + peer_id: PeerId, + assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, + rng: &mut R, + ) where + R: CryptoRng + Rng, + { + for (assignment, claimed_indices) in assignments { + if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { + let block_hash = &assignment.block_hash; + let validator_index = assignment.validator; + + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?block_hash, + ?claimed_indices, + ?validator_index, + "Pending assignment", + ); + + pending.push((peer_id, PendingMessage::Assignment(assignment, claimed_indices))); + + continue + } + + self.import_and_circulate_assignment( + ctx, + metrics, + MessageSource::Peer(peer_id), + assignment, + claimed_indices, + rng, + ) + .await; + } + } + async fn process_incoming_peer_message( &mut self, ctx: &mut Context, metrics: &Metrics, peer_id: PeerId, - msg: net_protocol::ApprovalDistributionMessage, + msg: Versioned< + protocol_v1::ApprovalDistributionMessage, + protocol_v2::ApprovalDistributionMessage, + protocol_vstaging::ApprovalDistributionMessage, + >, rng: &mut R, ) where R: CryptoRng + Rng, { match msg { + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( + assignments, + )) => { + gum::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + num = assignments.len(), + "Processing assignments from a peer", + ); + let sanitized_assignments = + self.sanitize_v2_assignments(peer_id, ctx.sender(), assignments).await; + + self.process_incoming_assignments( + ctx, + metrics, + peer_id, + sanitized_assignments, + rng, + ) + .await; + }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) | Versioned::V2(protocol_v2::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( @@ -609,38 +874,22 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - for (assignment, claimed_index) in assignments.into_iter() { - if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { - let message_subject = MessageSubject( - assignment.block_hash, - claimed_index, - assignment.validator, - ); - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?message_subject, - "Pending assignment", - ); - - pending - .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); - - continue - } + let sanitized_assignments = + self.sanitize_v1_assignments(peer_id, ctx.sender(), assignments).await; - self.import_and_circulate_assignment( - ctx, - metrics, - MessageSource::Peer(peer_id), - assignment, - claimed_index, - rng, - ) - .await; - } + self.process_incoming_assignments( + ctx, + metrics, + peer_id, + sanitized_assignments, + rng, + ) + .await; }, + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( + approvals, + )) | Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { gum::trace!( @@ -651,17 +900,17 @@ impl State { ); for approval_vote in approvals.into_iter() { if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { - let message_subject = MessageSubject( - approval_vote.block_hash, - approval_vote.candidate_index, - approval_vote.validator, - ); + let block_hash = approval_vote.block_hash; + let candidate_index = approval_vote.candidate_index; + let validator_index = approval_vote.validator; gum::trace!( target: LOG_TARGET, %peer_id, - ?message_subject, - "Pending approval", + ?block_hash, + ?candidate_index, + ?validator_index, + "Pending assignment", ); pending.push((peer_id, PendingMessage::Approval(approval_vote))); @@ -695,14 +944,23 @@ impl State { { gum::trace!(target: LOG_TARGET, ?view, "Peer view change"); let finalized_number = view.finalized_number; - let (peer_protocol_version, old_finalized_number) = match self - .peer_data - .get_mut(&peer_id) - .map(|d| (d.version, std::mem::replace(&mut d.view, view.clone()))) - { - Some((v, view)) => (v, view.finalized_number), - None => return, // unknown peer - }; + + let (old_view, protocol_version) = + if let Some(peer_entry) = self.peer_views.get_mut(&peer_id) { + (Some(std::mem::replace(&mut peer_entry.view, view.clone())), peer_entry.version) + } else { + // This shouldn't happen, but if it does we assume protocol version 1. + gum::warn!( + target: LOG_TARGET, + ?peer_id, + ?view, + "Peer view change for missing `peer_entry`" + ); + + (None, ValidationVersion::V1.into()) + }; + + let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0); // we want to prune every block known_by peer up to (including) view.finalized_number let blocks = &mut self.blocks; @@ -727,9 +985,9 @@ impl State { metrics, &mut self.blocks, &self.topologies, - self.peer_data.len(), + self.peer_views.len(), peer_id, - peer_protocol_version, + protocol_version, view, rng, ) @@ -770,8 +1028,8 @@ impl State { ctx: &mut Context, metrics: &Metrics, source: MessageSource, - assignment: IndirectAssignmentCert, - claimed_candidate_index: CandidateIndex, + assignment: IndirectAssignmentCertV2, + claimed_candidate_indices: CandidateBitfield, rng: &mut R, ) where R: CryptoRng + Rng, @@ -819,9 +1077,11 @@ impl State { }, }; - // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, claimed_candidate_index, validator_index); - let message_kind = MessageKind::Assignment; + // Compute metadata on the assignment. + let (message_subject, message_kind) = ( + MessageSubject(block_hash, claimed_candidate_indices.clone(), validator_index), + MessageKind::Assignment, + ); if let Some(peer_id) = source.peer_id() { // check if our knowledge of the peer already contains this assignment @@ -837,6 +1097,7 @@ impl State { ?message_subject, "Duplicate assignment", ); + modify_reputation( &mut self.reputation, ctx.sender(), @@ -844,6 +1105,15 @@ impl State { COST_DUPLICATE_MESSAGE, ) .await; + } else { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + hash = ?block_hash, + ?validator_index, + ?message_subject, + "We sent the message to the peer while peer was sending it to us. Known race condition.", + ); } return } @@ -885,7 +1155,7 @@ impl State { ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( assignment.clone(), - claimed_candidate_index, + claimed_candidate_indices.clone(), tx, )) .await; @@ -916,7 +1186,7 @@ impl State { BENEFIT_VALID_MESSAGE_FIRST, ) .await; - entry.knowledge.known_messages.insert(message_subject.clone(), message_kind); + entry.knowledge.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); } @@ -990,7 +1260,7 @@ impl State { // Invariant: to our knowledge, none of the peers except for the `source` know about the // assignment. - metrics.on_assignment_imported(); + metrics.on_assignment_imported(&assignment.cert.kind); let topology = self.topologies.get_topology(entry.session); let local = source == MessageSource::Local; @@ -999,28 +1269,14 @@ impl State { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); - let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { - Some(candidate_entry) => { - // set the approval state for validator_index to Assigned - // unless the approval state is set already - candidate_entry.messages.entry(validator_index).or_insert_with(|| MessageState { - required_routing, - local, - random_routing: Default::default(), - approval_state: ApprovalState::Assigned(assignment.cert.clone()), - }) - }, - None => { - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?claimed_candidate_index, - "Expected a candidate entry on import_and_circulate_assignment", - ); + // All the peers that know the relay chain block. + let peers_to_filter = entry.known_by(); - return - }, - }; + let approval_entry = entry.insert_approval_entry(ApprovalEntry::new( + assignment.clone(), + claimed_candidate_indices.clone(), + ApprovalRouting { required_routing, local, random_routing: Default::default() }, + )); // Dispatch the message to all peers in the routing set which // know the block. @@ -1028,83 +1284,65 @@ impl State { // If the topology isn't known yet (race with networking subsystems) // then messages will be sent when we get it. - let assignments = vec![(assignment, claimed_candidate_index)]; - let n_peers_total = self.peer_data.len(); + let assignments = vec![(assignment, claimed_candidate_indices.clone())]; + let n_peers_total = self.peer_views.len(); let source_peer = source.peer_id(); - let mut peer_filter = move |peer| { - if Some(peer) == source_peer.as_ref() { - return false + // Peers that we will send the assignment to. + let mut peers = Vec::new(); + + // Filter destination peers + for peer in peers_to_filter.into_iter() { + if Some(peer) == source_peer { + continue } if let Some(true) = topology .as_ref() - .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, peer)) + .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, &peer)) { - return true + peers.push(peer); + continue } // Note: at this point, we haven't received the message from any peers // other than the source peer, and we just got it, so we haven't sent it // to any peers either. - let route_random = message_state.random_routing.sample(n_peers_total, rng); + let route_random = + approval_entry.routing_info().random_routing.sample(n_peers_total, rng); if route_random { - message_state.random_routing.inc_sent(); - } - - route_random - }; - - let (v1_peers, v2_peers) = { - let peer_data = &self.peer_data; - let peers = entry - .known_by - .keys() - .filter_map(|p| peer_data.get_key_value(p)) - .filter(|(p, _)| peer_filter(p)) - .map(|(p, peer_data)| (*p, peer_data.version)) - .collect::>(); - - // Add the metadata of the assignment to the knowledge of each peer. - for (peer, _) in peers.iter() { - // we already filtered peers above, so this should always be Some - if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { - peer_knowledge.sent.insert(message_subject.clone(), message_kind); - } + approval_entry.routing_info_mut().random_routing.inc_sent(); + peers.push(peer); } + } - if !peers.is_empty() { - gum::trace!( - target: LOG_TARGET, - ?block_hash, - ?claimed_candidate_index, - local = source.peer_id().is_none(), - num_peers = peers.len(), - "Sending an assignment to peers", - ); + // Add the metadata of the assignment to the knowledge of each peer. + for peer in peers.iter() { + // we already filtered peers above, so this should always be Some + if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { + peer_knowledge.sent.insert(message_subject.clone(), message_kind); } + } - let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - - (v1_peers, v2_peers) - }; + if !peers.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?claimed_candidate_indices, + local = source.peer_id().is_none(), + num_peers = peers.len(), + "Sending an assignment to peers", + ); - if !v1_peers.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v1_peers, - versioned_assignments_packet(ValidationVersion::V1, assignments.clone()), - )) - .await; - } + let peers = peers + .iter() + .filter_map(|peer_id| { + self.peer_views.get(peer_id).map(|peer_entry| (*peer_id, peer_entry.version)) + }) + .collect::>(); - if !v2_peers.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v2_peers, - versioned_assignments_packet(ValidationVersion::V2, assignments.clone()), - )) - .await; + send_assignments_batched(ctx.sender(), assignments, &peers).await; } } @@ -1139,6 +1377,14 @@ impl State { _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?block_hash, + ?validator_index, + ?candidate_index, + "Approval from a peer is out of view", + ); modify_reputation( &mut self.reputation, ctx.sender(), @@ -1153,7 +1399,7 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, candidate_index, validator_index); + let message_subject = MessageSubject(block_hash, candidate_index.into(), validator_index); let message_kind = MessageKind::Approval; if let Some(peer_id) = source.peer_id() { @@ -1303,71 +1549,48 @@ impl State { } } - // Invariant: to our knowledge, none of the peers except for the `source` know about the - // approval. - metrics.on_approval_imported(); - - let required_routing = match entry.candidates.get_mut(candidate_index as usize) { - Some(candidate_entry) => { - // set the approval state for validator_index to Approved - // it should be in assigned state already - match candidate_entry.messages.remove(&validator_index) { - Some(MessageState { - approval_state: ApprovalState::Assigned(cert), - required_routing, - local, - random_routing, - }) => { - candidate_entry.messages.insert( - validator_index, - MessageState { - approval_state: ApprovalState::Approved( - cert, - vote.signature.clone(), - ), - required_routing, - local, - random_routing, - }, - ); - - required_routing - }, - Some(_) => { - unreachable!( - "we only insert it after the metadata, checked the metadata above; qed" - ); - }, - None => { - // this would indicate a bug in approval-voting - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - "Importing an approval we don't have an assignment for", - ); + let required_routing = match entry.approval_entry(candidate_index, validator_index) { + Some(approval_entry) => { + // Invariant: to our knowledge, none of the peers except for the `source` know about + // the approval. + metrics.on_approval_imported(); + + if let Err(err) = approval_entry.note_approval(vote.clone()) { + // this would indicate a bug in approval-voting: + // - validator index mismatch + // - candidate index mismatch + // - duplicate approval + gum::warn!( + target: LOG_TARGET, + hash = ?block_hash, + ?candidate_index, + ?validator_index, + ?err, + "Possible bug: Vote import failed", + ); - return - }, + return } + + approval_entry.routing_info().required_routing }, None => { + let peer_id = source.peer_id(); + // This indicates a bug in approval-distribution, since we check the knowledge at + // the begining of the function. gum::warn!( target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - "Expected a candidate entry on import_and_circulate_approval", + ?peer_id, + ?message_subject, + "Unknown approval assignment", ); - + // No rep change as this is caused by an issue return }, }; // Dispatch a ApprovalDistributionV1Message::Approval(vote) // to all peers required by the topology, with the exception of the source peer. - let topology = self.topologies.get_topology(entry.session); let source_peer = source.peer_id(); @@ -1391,57 +1614,32 @@ impl State { in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; - let (v1_peers, v2_peers) = { - let peer_data = &self.peer_data; - let peers = entry - .known_by - .iter() - .filter_map(|(p, k)| peer_data.get(&p).map(|pd| (p, k, pd.version))) - .filter(|(p, k, _)| peer_filter(p, k)) - .map(|(p, _, v)| (*p, v)) - .collect::>(); - - // Add the metadata of the assignment to the knowledge of each peer. - for (peer, _) in peers.iter() { - // we already filtered peers above, so this should always be Some - if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { - peer_knowledge.sent.insert(message_subject.clone(), message_kind); - } - } - - if !peers.is_empty() { - gum::trace!( - target: LOG_TARGET, - ?block_hash, - ?candidate_index, - local = source.peer_id().is_none(), - num_peers = peers.len(), - "Sending an approval to peers", - ); + let peers = entry + .known_by + .iter() + .filter(|(p, k)| peer_filter(p, k)) + .filter_map(|(p, _)| self.peer_views.get(p).map(|entry| (*p, entry.version))) + .collect::>(); + + // Add the metadata of the assignment to the knowledge of each peer. + for peer in peers.iter() { + // we already filtered peers above, so this should always be Some + if let Some(entry) = entry.known_by.get_mut(&peer.0) { + entry.sent.insert(message_subject.clone(), message_kind); } - - let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - - (v1_peers, v2_peers) - }; - - let approvals = vec![vote]; - - if !v1_peers.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v1_peers, - versioned_approvals_packet(ValidationVersion::V1, approvals.clone()), - )) - .await; } - if !v2_peers.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v2_peers, - versioned_approvals_packet(ValidationVersion::V2, approvals), - )) - .await; + if !peers.is_empty() { + let approvals = vec![vote]; + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?candidate_index, + local = source.peer_id().is_none(), + num_peers = peers.len(), + "Sending an approval to peers", + ); + send_approvals_batched(ctx.sender(), approvals, &peers).await; } } @@ -1472,25 +1670,11 @@ impl State { Some(e) => e, }; - let candidate_entry = match block_entry.candidates.get(index as usize) { - None => { - gum::debug!( - target: LOG_TARGET, - ?hash, - ?index, - "`get_approval_signatures`: could not find candidate entry for given hash and index!" - ); - continue - }, - Some(e) => e, - }; - let sigs = - candidate_entry.messages.iter().filter_map(|(validator_index, message_state)| { - match &message_state.approval_state { - ApprovalState::Approved(_, sig) => Some((*validator_index, sig.clone())), - ApprovalState::Assigned(_) => None, - } - }); + let sigs = block_entry + .approval_entries(index) + .into_iter() + .filter_map(|approval_entry| approval_entry.approval(index)) + .map(|approval| (approval.validator, approval.signature)); all_sigs.extend(sigs); } all_sigs @@ -1503,7 +1687,7 @@ impl State { topologies: &SessionGridTopologies, total_peers: usize, peer_id: PeerId, - peer_protocol_version: ValidationVersion, + protocol_version: ProtocolVersion, view: View, rng: &mut (impl CryptoRng + Rng), ) { @@ -1516,6 +1700,8 @@ impl State { let view_finalized_number = view.finalized_number; for head in view.into_iter() { let mut block = head; + + // Walk the chain back to last finalized block of the peer view. loop { let entry = match entries.get_mut(&block) { Some(entry) if entry.number > view_finalized_number => entry, @@ -1530,19 +1716,16 @@ impl State { } let peer_knowledge = entry.known_by.entry(peer_id).or_default(); - let topology = topologies.get_topology(entry.session); - // Iterate all messages in all candidates. - for (candidate_index, validator, message_state) in - entry.candidates.iter_mut().enumerate().flat_map(|(c_i, c)| { - c.messages.iter_mut().map(move |(k, v)| (c_i as _, k, v)) - }) { + // We want to iterate the `approval_entries` of the block entry as these contain all + // assignments that also link all approval votes. + for approval_entry in entry.approval_entries.values_mut() { // Propagate the message to all peers in the required routing set OR // randomly sample peers. { - let random_routing = &mut message_state.random_routing; - let required_routing = message_state.required_routing; + let required_routing = approval_entry.routing_info().required_routing; + let random_routing = &mut approval_entry.routing_info_mut().random_routing; let rng = &mut *rng; let mut peer_filter = move |peer_id| { let in_topology = topology.as_ref().map_or(false, |t| { @@ -1563,39 +1746,24 @@ impl State { } } - let message_subject = MessageSubject(block, candidate_index, *validator); + let assignment_message = approval_entry.assignment(); + let approval_messages = approval_entry.approvals(); + let (assignment_knowledge, message_kind) = + approval_entry.create_assignment_knowledge(block); - let assignment_message = ( - IndirectAssignmentCert { - block_hash: block, - validator: *validator, - cert: message_state.approval_state.assignment_cert().clone(), - }, - candidate_index, - ); - - let approval_message = - message_state.approval_state.approval_signature().map(|signature| { - IndirectSignedApprovalVote { - block_hash: block, - validator: *validator, - candidate_index, - signature, - } - }); - - if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) { - peer_knowledge - .sent - .insert(message_subject.clone(), MessageKind::Assignment); + // Only send stuff a peer doesn't know in the context of a relay chain block. + if !peer_knowledge.contains(&assignment_knowledge, message_kind) { + peer_knowledge.sent.insert(assignment_knowledge, message_kind); assignments_to_send.push(assignment_message); } - if let Some(approval_message) = approval_message { - if !peer_knowledge.contains(&message_subject, MessageKind::Approval) { - peer_knowledge - .sent - .insert(message_subject.clone(), MessageKind::Approval); + // Filter approval votes. + for approval_message in approval_messages { + let (approval_knowledge, message_kind) = approval_entry + .create_approval_knowledge(block, approval_message.candidate_index); + + if !peer_knowledge.contains(&approval_knowledge, message_kind) { + peer_knowledge.sent.insert(approval_knowledge, message_kind); approvals_to_send.push(approval_message); } } @@ -1609,23 +1777,30 @@ impl State { gum::trace!( target: LOG_TARGET, ?peer_id, + ?protocol_version, num = assignments_to_send.len(), "Sending assignments to unified peer", ); - send_assignments_batched(sender, assignments_to_send, peer_id, peer_protocol_version) - .await; + send_assignments_batched( + sender, + assignments_to_send, + &vec![(peer_id, protocol_version)], + ) + .await; } if !approvals_to_send.is_empty() { gum::trace!( target: LOG_TARGET, ?peer_id, + ?protocol_version, num = approvals_to_send.len(), "Sending approvals to unified peer", ); - send_approvals_batched(sender, approvals_to_send, peer_id, peer_protocol_version).await; + send_approvals_batched(sender, approvals_to_send, &vec![(peer_id, protocol_version)]) + .await; } } @@ -1661,7 +1836,6 @@ impl State { adjust_required_routing_and_propagate( ctx, - &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| { @@ -1683,13 +1857,13 @@ impl State { false } }, - |_, _, _| {}, + |required_routing, _, _| *required_routing, + &self.peer_views, ) .await; adjust_required_routing_and_propagate( ctx, - &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| { @@ -1708,30 +1882,110 @@ impl State { lag = ?self.approval_checking_lag, "Encountered old block pending gossip topology", ); - return + return *required_routing } + let mut new_required_routing = *required_routing; + if config.l1_threshold.as_ref().map_or(false, |t| &self.approval_checking_lag >= t) { // Message originator sends to everyone. - if local && *required_routing != RequiredRouting::All { + if local && new_required_routing != RequiredRouting::All { metrics.on_aggression_l1(); - *required_routing = RequiredRouting::All; + new_required_routing = RequiredRouting::All; } } if config.l2_threshold.as_ref().map_or(false, |t| &self.approval_checking_lag >= t) { // Message originator sends to everyone. Everyone else sends to XY. - if !local && *required_routing != RequiredRouting::GridXY { + if !local && new_required_routing != RequiredRouting::GridXY { metrics.on_aggression_l2(); - *required_routing = RequiredRouting::GridXY; + new_required_routing = RequiredRouting::GridXY; } } + new_required_routing }, + &self.peer_views, ) .await; } + + // Filter out invalid candidate index and certificate core bitfields. + // For each invalid assignment we also punish the peer. + async fn sanitize_v1_assignments( + &mut self, + peer_id: PeerId, + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, + ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { + let mut sanitized_assignments = Vec::new(); + for (cert, candidate_index) in assignments.into_iter() { + let cert_bitfield_bits = match cert.cert.kind { + AssignmentCertKind::RelayVRFDelay { core_index } => core_index.0 as usize + 1, + // We don't want to run the VRF yet, but the output is always bounded by `n_cores`. + // We assume `candidate_bitfield` length for the core bitfield and we just check + // against `MAX_BITFIELD_SIZE` later. + AssignmentCertKind::RelayVRFModulo { .. } => candidate_index as usize + 1, + }; + + let candidate_bitfield_bits = candidate_index as usize + 1; + + // Ensure bitfields length under hard limit. + if cert_bitfield_bits > MAX_BITFIELD_SIZE || candidate_bitfield_bits > MAX_BITFIELD_SIZE + { + // Punish the peer for the invalid message. + modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) + .await; + } else { + sanitized_assignments.push((cert.into(), candidate_index.into())) + } + } + + sanitized_assignments + } + + // Filter out oversized candidate and certificate core bitfields. + // For each invalid assignment we also punish the peer. + async fn sanitize_v2_assignments( + &mut self, + peer_id: PeerId, + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, + ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { + let mut sanitized_assignments = Vec::new(); + for (cert, candidate_bitfield) in assignments.into_iter() { + let cert_bitfield_bits = match &cert.cert.kind { + AssignmentCertKindV2::RelayVRFDelay { core_index } => core_index.0 as usize + 1, + // We don't want to run the VRF yet, but the output is always bounded by `n_cores`. + // We assume `candidate_bitfield` length for the core bitfield and we just check + // against `MAX_BITFIELD_SIZE` later. + AssignmentCertKindV2::RelayVRFModulo { .. } => candidate_bitfield.len(), + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => + core_bitfield.len(), + }; + + let candidate_bitfield_bits = candidate_bitfield.len(); + + // Our bitfield has `Lsb0`. + let msb = candidate_bitfield_bits - 1; + + // Ensure bitfields length under hard limit. + if cert_bitfield_bits > MAX_BITFIELD_SIZE + || candidate_bitfield_bits > MAX_BITFIELD_SIZE + // Ensure minimum bitfield size - MSB needs to be one. + || !candidate_bitfield.bit_at(msb.as_bit_index()) + { + // Punish the peer for the invalid message. + modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) + .await; + } else { + sanitized_assignments.push((cert, candidate_bitfield)) + } + } + + sanitized_assignments + } } // This adjusts the required routing of messages in blocks that pass the block filter @@ -1749,14 +2003,14 @@ impl State { #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] async fn adjust_required_routing_and_propagate( ctx: &mut Context, - peer_data: &HashMap, blocks: &mut HashMap, topologies: &SessionGridTopologies, block_filter: BlockFilter, routing_modifier: RoutingModifier, + peer_views: &HashMap, ) where BlockFilter: Fn(&mut BlockEntry) -> bool, - RoutingModifier: Fn(&mut RequiredRouting, bool, &ValidatorIndex), + RoutingModifier: Fn(&RequiredRouting, bool, &ValidatorIndex) -> RequiredRouting, { let mut peer_assignments = HashMap::new(); let mut peer_approvals = HashMap::new(); @@ -1768,64 +2022,55 @@ async fn adjust_required_routing_and_propagate t, + None => continue, + }; - if message_state.required_routing.is_empty() { - continue - } + // We just need to iterate the `approval_entries` of the block entry as these contain all + // assignments that also link all approval votes. + for approval_entry in block_entry.approval_entries.values_mut() { + let new_required_routing = routing_modifier( + &approval_entry.routing_info().required_routing, + approval_entry.routing_info().local, + &approval_entry.validator_index(), + ); - let topology = match topologies.get_topology(block_entry.session) { - Some(t) => t, - None => continue, - }; + approval_entry.update_required_routing(new_required_routing); - // Propagate the message to all peers in the required routing set. - let message_subject = MessageSubject(*block_hash, candidate_index, *validator); + if approval_entry.routing_info().required_routing.is_empty() { + continue + } - let assignment_message = ( - IndirectAssignmentCert { - block_hash: *block_hash, - validator: *validator, - cert: message_state.approval_state.assignment_cert().clone(), - }, - candidate_index, - ); - let approval_message = - message_state.approval_state.approval_signature().map(|signature| { - IndirectSignedApprovalVote { - block_hash: *block_hash, - validator: *validator, - candidate_index, - signature, - } - }); + let assignment_message = approval_entry.assignment(); + let approval_messages = approval_entry.approvals(); + let (assignment_knowledge, message_kind) = + approval_entry.create_assignment_knowledge(*block_hash); for (peer, peer_knowledge) in &mut block_entry.known_by { if !topology .local_grid_neighbors() - .route_to_peer(message_state.required_routing, peer) + .route_to_peer(approval_entry.routing_info().required_routing, peer) { continue } - if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) { - peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Assignment); + // Only send stuff a peer doesn't know in the context of a relay chain block. + if !peer_knowledge.contains(&assignment_knowledge, message_kind) { + peer_knowledge.sent.insert(assignment_knowledge.clone(), message_kind); peer_assignments .entry(*peer) .or_insert_with(Vec::new) .push(assignment_message.clone()); } - if let Some(approval_message) = approval_message.as_ref() { - if !peer_knowledge.contains(&message_subject, MessageKind::Approval) { - peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Approval); + // Filter approval votes. + for approval_message in &approval_messages { + let (approval_knowledge, message_kind) = approval_entry + .create_approval_knowledge(*block_hash, approval_message.candidate_index); + + if !peer_knowledge.contains(&approval_knowledge, message_kind) { + peer_knowledge.sent.insert(approval_knowledge, message_kind); peer_approvals .entry(*peer) .or_insert_with(Vec::new) @@ -1837,24 +2082,32 @@ async fn adjust_required_routing_and_propagate continue, - Some(v) => v, - }; - - send_assignments_batched(ctx.sender(), assignments_packet, peer, peer_protocol_version) + if let Some(peer_view) = peer_views.get(&peer) { + send_assignments_batched( + ctx.sender(), + assignments_packet, + &vec![(peer, peer_view.version)], + ) .await; + } else { + // This should never happen. + gum::warn!(target: LOG_TARGET, ?peer, "Unknown protocol version for peer",); + } } for (peer, approvals_packet) in peer_approvals { - let peer_protocol_version = match peer_data.get(&peer).map(|pd| pd.version) { - None => continue, - Some(v) => v, - }; - - send_approvals_batched(ctx.sender(), approvals_packet, peer, peer_protocol_version).await; + if let Some(peer_view) = peer_views.get(&peer) { + send_approvals_batched( + ctx.sender(), + approvals_packet, + &vec![(peer, peer_view.version)], + ) + .await; + } else { + // This should never happen. + gum::warn!(target: LOG_TARGET, ?peer, "Unknown protocol version for peer",); + } } } @@ -1956,12 +2209,21 @@ impl ApprovalDistribution { ApprovalDistributionMessage::NewBlocks(metas) => { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index) => { + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { + let _span = state + .spans + .get(&cert.block_hash) + .map(|span| span.child("import-and-distribute-assignment")) + .unwrap_or_else(|| jaeger::Span::new(&cert.block_hash, "distribute-assignment")) + .with_string_tag("block-hash", format!("{:?}", cert.block_hash)) + .with_stage(jaeger::Stage::ApprovalDistribution); + gum::debug!( target: LOG_TARGET, - "Distributing our assignment on candidate (block={}, index={})", - cert.block_hash, - candidate_index, + ?candidate_indices, + block_hash = ?cert.block_hash, + assignment_kind = ?cert.cert.kind, + "Distributing our assignment on candidates", ); state @@ -1970,7 +2232,7 @@ impl ApprovalDistribution { &metrics, MessageSource::Local, cert, - candidate_index, + candidate_indices, rng, ) .await; @@ -2004,49 +2266,6 @@ impl ApprovalDistribution { } } -fn versioned_approvals_packet( - version: ValidationVersion, - approvals: Vec, -) -> VersionedValidationProtocol { - match version { - ValidationVersion::V1 => - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals), - )), - ValidationVersion::V2 => - Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( - protocol_v2::ApprovalDistributionMessage::Approvals(approvals), - )), - } -} - -fn versioned_assignments_packet( - version: ValidationVersion, - assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, -) -> VersionedValidationProtocol { - match version { - ValidationVersion::V1 => - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(assignments), - )), - ValidationVersion::V2 => - Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( - protocol_v2::ApprovalDistributionMessage::Assignments(assignments), - )), - } -} - -fn filter_peers_by_version( - peers: &[(PeerId, ValidationVersion)], - version: ValidationVersion, -) -> Vec { - peers - .iter() - .filter(|(_, v)| v == &version) - .map(|(peer_id, _)| *peer_id) - .collect() -} - #[overseer::subsystem(ApprovalDistribution, error=SubsystemError, prefix=self::overseer)] impl ApprovalDistribution { fn start(self, ctx: Context) -> SpawnedSubsystem { @@ -2071,7 +2290,7 @@ const fn ensure_size_not_zero(size: usize) -> usize { /// the protocol configuration. pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( MAX_NOTIFICATION_SIZE as usize / - std::mem::size_of::<(IndirectAssignmentCert, CandidateIndex)>() / + std::mem::size_of::<(IndirectAssignmentCertV2, CandidateIndex)>() / 3, ); @@ -2080,6 +2299,57 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::() / 3, ); +// Low level helper for sending assignments. +async fn send_assignments_batched_inner( + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + batch: impl IntoIterator, + peers: Vec, + peer_version: ValidationVersion, +) { + if peer_version == ValidationVersion::VStaging { + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments( + batch.into_iter().collect(), + ), + )), + )) + .await; + } else { + // Create a batch of v1 assignments from v2 assignments that are compatible with v1. + // `IndirectAssignmentCertV2` -> `IndirectAssignmentCert` + let batch = batch + .into_iter() + .filter_map(|(cert, candidates)| { + cert.try_into().ok().map(|cert| { + ( + cert, + // First 1 bit index is the candidate index. + candidates + .first_one() + .map(|index| index as CandidateIndex) + .expect("Assignment was checked for not being empty; qed"), + ) + }) + }) + .collect(); + let message = if peer_version == ValidationVersion::V1 { + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Assignments(batch), + )) + } else { + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(batch), + )) + }; + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage(peers, message)) + .await; + } +} + /// Send assignments while honoring the `max_notification_size` of the protocol. /// /// Splitting the messages into multiple notifications allows more granular processing at the @@ -2087,37 +2357,121 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, - peer: PeerId, - protocol_version: ValidationVersion, + v2_assignments: impl IntoIterator + Clone, + peers: &[(PeerId, ProtocolVersion)], ) { - let mut batches = assignments.into_iter().peekable(); + let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); + let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + + // V1 and V2 validation protocol do not have any changes with regard to + // ApprovalDistributionMessage so they can be treated the same. + if !v1_peers.is_empty() || !v2_peers.is_empty() { + // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these + // out. + let v1_assignments = v2_assignments + .clone() + .into_iter() + .filter(|(_, candidates)| candidates.count_ones() == 1); + + let mut v1_batches = v1_assignments.peekable(); + + while v1_batches.peek().is_some() { + let batch: Vec<_> = v1_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); + if !v1_peers.is_empty() { + send_assignments_batched_inner( + sender, + batch.clone(), + v1_peers.clone(), + ValidationVersion::V1, + ) + .await; + } - while batches.peek().is_some() { - let batch: Vec<_> = batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); - let versioned = versioned_assignments_packet(protocol_version, batch); + if !v2_peers.is_empty() { + send_assignments_batched_inner( + sender, + batch, + v2_peers.clone(), + ValidationVersion::V2, + ) + .await; + } + } + } - sender - .send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], versioned)) + if !vstaging_peers.is_empty() { + let mut vstaging = v2_assignments.into_iter().peekable(); + + while vstaging.peek().is_some() { + let batch = vstaging.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); + send_assignments_batched_inner( + sender, + batch, + vstaging_peers.clone(), + ValidationVersion::VStaging, + ) .await; + } } } -/// Send approvals while honoring the `max_notification_size` of the protocol. +/// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - approvals: Vec, - peer: PeerId, - protocol_version: ValidationVersion, + approvals: impl IntoIterator + Clone, + peers: &[(PeerId, ProtocolVersion)], ) { - let mut batches = approvals.into_iter().peekable(); + let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); + let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + + if !v1_peers.is_empty() || !v2_peers.is_empty() { + let mut batches = approvals.clone().into_iter().peekable(); + + while batches.peek().is_some() { + let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); + + if !v1_peers.is_empty() { + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers.clone(), + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Approvals(batch.clone()), + )), + )) + .await; + } - while batches.peek().is_some() { - let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); - let versioned = versioned_approvals_packet(protocol_version, batch); + if !v2_peers.is_empty() { + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + v2_peers.clone(), + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(batch), + )), + )) + .await; + } + } + } - sender - .send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], versioned)) - .await; + if !vstaging_peers.is_empty() { + let mut batches = approvals.into_iter().peekable(); + + while batches.peek().is_some() { + let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); + + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_peers.clone(), + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(batch), + ), + ), + )) + .await; + } } } diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 896866ce099a18c993540877a73144fc194a9a81..6864259e6fdb90ce3be3553d101afe1023f1b523 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_metrics::metrics::{prometheus, Metrics as MetricsTrait}; +use polkadot_node_primitives::approval::v2::AssignmentCertKindV2; /// Approval Distribution metrics. #[derive(Default, Clone)] @@ -22,21 +23,34 @@ pub struct Metrics(Option); #[derive(Clone)] struct MetricsInner { - assignments_imported_total: prometheus::Counter, + assignments_imported_total: prometheus::CounterVec, approvals_imported_total: prometheus::Counter, unified_with_peer_total: prometheus::Counter, aggression_l1_messages_total: prometheus::Counter, aggression_l2_messages_total: prometheus::Counter, - time_unify_with_peer: prometheus::Histogram, time_import_pending_now_known: prometheus::Histogram, time_awaiting_approval_voting: prometheus::Histogram, } +trait AsLabel { + fn as_label(&self) -> &str; +} + +impl AsLabel for &AssignmentCertKindV2 { + fn as_label(&self) -> &str { + match self { + AssignmentCertKindV2::RelayVRFDelay { .. } => "VRF Delay", + AssignmentCertKindV2::RelayVRFModulo { .. } => "VRF Modulo", + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => "VRF Modulo Compact", + } + } +} + impl Metrics { - pub(crate) fn on_assignment_imported(&self) { + pub(crate) fn on_assignment_imported(&self, kind: &AssignmentCertKindV2) { if let Some(metrics) = &self.0 { - metrics.assignments_imported_total.inc(); + metrics.assignments_imported_total.with_label_values(&[kind.as_label()]).inc(); } } @@ -89,9 +103,12 @@ impl MetricsTrait for Metrics { fn try_register(registry: &prometheus::Registry) -> Result { let metrics = MetricsInner { assignments_imported_total: prometheus::register( - prometheus::Counter::new( - "polkadot_parachain_assignments_imported_total", - "Number of valid assignments imported locally or from other peers.", + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_assignments_imported_total", + "Number of valid assignments imported locally or from other peers.", + ), + &["kind"], )?, registry, )?, @@ -124,10 +141,16 @@ impl MetricsTrait for Metrics { registry, )?, time_unify_with_peer: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_time_unify_with_peer", - "Time spent within fn `unify_with_peer`.", - ).buckets(vec![0.000625, 0.00125,0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,]))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_time_unify_with_peer", + "Time spent within fn `unify_with_peer`.", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, time_import_pending_now_known: prometheus::register( diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 29c7d8aa45daa1f88ec6737c448e9e2d8a493bb5..33c38c7c5dfa309ae7dc52ec38dd541bba52a3d4 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -24,20 +24,26 @@ use polkadot_node_network_protocol::{ view, ObservedRole, }; use polkadot_node_primitives::approval::{ - AssignmentCertKind, VrfOutput, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT, + v1::{ + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, VrfOutput, VrfProof, + VrfSignature, + }, + v2::{ + AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, + RELAY_VRF_MODULO_CONTEXT, + }, }; use polkadot_node_subsystem::messages::{ network_bridge_event, AllMessages, ApprovalCheckError, ReportPeerMessage, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt as _}; -use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, HashT}; +use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, CoreIndex, HashT}; use polkadot_primitives_test_helpers::dummy_signature; use rand::SeedableRng; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::crypto::Pair as PairT; use std::time::Duration; - type VirtualOverseer = test_helpers::TestSubsystemContextHandle; fn test_harness>( @@ -219,15 +225,15 @@ async fn setup_gossip_topology( async fn setup_peer_with_view( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, - validation_version: ValidationVersion, view: View, + version: ValidationVersion, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( *peer_id, ObservedRole::Full, - validation_version.into(), + version.into(), None, )), ) @@ -244,12 +250,43 @@ async fn setup_peer_with_view( async fn send_message_from_peer( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, - msg: net_protocol::ApprovalDistributionMessage, + msg: protocol_v1::ApprovalDistributionMessage, +) { + overseer_send( + virtual_overseer, + ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + *peer_id, + Versioned::V1(msg), + )), + ) + .await; +} + +async fn send_message_from_peer_v2( + virtual_overseer: &mut VirtualOverseer, + peer_id: &PeerId, + msg: protocol_v2::ApprovalDistributionMessage, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - *peer_id, msg, + *peer_id, + Versioned::V2(msg), + )), + ) + .await; +} + +async fn send_message_from_peer_vstaging( + virtual_overseer: &mut VirtualOverseer, + peer_id: &PeerId, + msg: protocol_vstaging::ApprovalDistributionMessage, +) { + overseer_send( + virtual_overseer, + ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + *peer_id, + Versioned::VStaging(msg), )), ) .await; @@ -273,6 +310,28 @@ fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> Indirect } } +fn fake_assignment_cert_v2( + block_hash: Hash, + validator: ValidatorIndex, + core_bitfield: CoreBitfield, +) -> IndirectAssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield }, + vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) }, + }, + } +} + async fn expect_reputation_change( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, @@ -331,9 +390,9 @@ fn try_import_the_same_assignment() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; - setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -353,7 +412,7 @@ fn try_import_the_same_assignment() { let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, &peer_a, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_a, msg).await; expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; @@ -362,10 +421,11 @@ fn try_import_the_same_assignment() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - 0u32, + claimed_indices, tx, )) => { - assert_eq!(assignment, cert); + assert_eq!(claimed_indices, 0u32.into()); + assert_eq!(assignment, cert.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -385,12 +445,104 @@ fn try_import_the_same_assignment() { } ); - // setup new peer - setup_peer_with_view(overseer, &peer_d, ValidationVersion::V1, view![]).await; + // setup new peer with V2 + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; // send the same assignment from peer_d let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, &peer_d, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_d, msg).await; + + expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }); +} + +/// Just like `try_import_the_same_assignment` but use `VRFModuloCompact` assignments for multiple +/// cores. +#[test] +fn try_import_the_same_assignment_v2() { + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::VStaging).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 2, + candidates: vec![Default::default(); 1], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // send the assignment related to `hash` + let validator_index = ValidatorIndex(0); + let cores = vec![1, 2, 3, 4]; + let core_bitfield: CoreBitfield = cores + .iter() + .map(|index| CoreIndex(*index)) + .collect::>() + .try_into() + .unwrap(); + + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); + let assignments = vec![(cert.clone(), cores.clone().try_into().unwrap())]; + + let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments.clone()); + send_message_from_peer_vstaging(overseer, &peer_a, msg).await; + + expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; + + // send an `Accept` message from the Approval Voting subsystem + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + assignment, + claimed_indices, + tx, + )) => { + assert_eq!(claimed_indices, cores.try_into().unwrap()); + assert_eq!(assignment, cert.into()); + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_a, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 2); + assert_eq!(assignments.len(), 1); + } + ); + + // setup new peer + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + + // send the same assignment from peer_d + let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_vstaging(overseer, &peer_d, msg).await; expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; @@ -413,7 +565,7 @@ fn delay_reputation_change() { let overseer = &mut virtual_overseer; // Setup peers - setup_peer_with_view(overseer, &peer, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, &peer, view![], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -433,17 +585,18 @@ fn delay_reputation_change() { let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, &peer, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer, msg).await; // send an `Accept` message from the Approval Voting subsystem assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - 0u32, + claimed_candidates, tx, )) => { - assert_eq!(assignment, cert); + assert_eq!(assignment.cert, cert.cert.into()); + assert_eq!(claimed_candidates, vec![0u32].try_into().unwrap()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -474,7 +627,7 @@ fn spam_attack_results_in_negative_reputation_change() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; // new block `hash_b` with 20 candidates let candidates_count = 20; @@ -501,7 +654,7 @@ fn spam_attack_results_in_negative_reputation_change() { .collect(); let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; + send_message_from_peer(overseer, peer, msg.clone()).await; for i in 0..candidates_count { expect_reputation_change(overseer, peer, COST_UNEXPECTED_MESSAGE).await; @@ -513,8 +666,8 @@ fn spam_attack_results_in_negative_reputation_change() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0); - assert_eq!(claimed_candidate_index, assignments[i].1); + assert_eq!(assignment, assignments[i].0.clone().into()); + assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -533,7 +686,7 @@ fn spam_attack_results_in_negative_reputation_change() { .await; // send the assignments again - send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; + send_message_from_peer(overseer, peer, msg.clone()).await; // each of them will incur `COST_UNEXPECTED_MESSAGE`, not only the first one for _ in 0..candidates_count { @@ -558,7 +711,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; // new block `hash` with 1 candidates let meta = BlockApprovalMeta { @@ -578,7 +731,10 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -610,12 +766,12 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // the peer could send us it as well let assignments = vec![(cert, candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; + send_message_from_peer(overseer, peer, msg.clone()).await; assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "we should not punish the peer"); // send the assignments again - send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; + send_message_from_peer(overseer, peer, msg).await; // now we should expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; @@ -633,10 +789,10 @@ fn import_approval_happy_path() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; - // setup peers - setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; - setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::V1, view![hash]).await; + // setup peers with V1 and V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -656,10 +812,14 @@ fn import_approval_happy_path() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; + // 1 peer is v1 assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( @@ -668,7 +828,21 @@ fn import_approval_happy_path() { protocol_v1::ApprovalDistributionMessage::Assignments(assignments) )) )) => { - assert_eq!(peers.len(), 2); + assert_eq!(peers.len(), 1); + assert_eq!(assignments.len(), 1); + } + ); + + // 1 peer is v2 + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 1); assert_eq!(assignments.len(), 1); } ); @@ -681,7 +855,7 @@ fn import_approval_happy_path() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -722,8 +896,8 @@ fn import_approval_bad() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; - setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -749,14 +923,14 @@ fn import_approval_bad() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_b, msg).await; expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; // now import an assignment from peer_b let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -765,8 +939,8 @@ fn import_approval_bad() { i, tx, )) => { - assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(assignment, cert.into()); + assert_eq!(i, candidate_index.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -775,7 +949,7 @@ fn import_approval_bad() { // and try again let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -911,12 +1085,20 @@ fn update_peer_view() { let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, 0)).await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment(cert_a.into(), 0.into()), + ) + .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, 0)).await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment(cert_b.into(), 0.into()), + ) + .await; // connect a peer - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash_a]).await; + setup_peer_with_view(overseer, peer, view![hash_a], ValidationVersion::V1).await; // we should send relevant assignments to the peer assert_matches!( @@ -934,7 +1116,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_data.get(peer).map(|data| data.view.finalized_number), Some(0)); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(0)); assert_eq!( state .blocks @@ -965,7 +1147,7 @@ fn update_peer_view() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), 0), + ApprovalDistributionMessage::DistributeAssignment(cert_c.clone().into(), 0.into()), ) .await; @@ -986,7 +1168,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_data.get(peer).map(|data| data.view.finalized_number), Some(2)); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(2)); assert_eq!( state .blocks @@ -1016,10 +1198,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!( - state.peer_data.get(peer).map(|data| data.view.finalized_number), - Some(finalized_number) - ); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(finalized_number)); assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } @@ -1034,7 +1213,7 @@ fn import_remotely_then_locally() { let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup the peer - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -1054,7 +1233,7 @@ fn import_remotely_then_locally() { let cert = fake_assignment_cert(hash, validator_index); let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; + send_message_from_peer(overseer, peer, msg).await; // send an `Accept` message from the Approval Voting subsystem assert_matches!( @@ -1064,8 +1243,8 @@ fn import_remotely_then_locally() { i, tx, )) => { - assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(assignment, cert.clone().into()); + assert_eq!(i, candidate_index.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -1075,7 +1254,10 @@ fn import_remotely_then_locally() { // import the same assignment locally overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1089,7 +1271,7 @@ fn import_remotely_then_locally() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; + send_message_from_peer(overseer, peer, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -1147,7 +1329,10 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1155,7 +1340,7 @@ fn sends_assignments_even_when_state_is_approved() { .await; // connect the peer. - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -1191,6 +1376,112 @@ fn sends_assignments_even_when_state_is_approved() { }); } +/// Same as `sends_assignments_even_when_state_is_approved_v2` but with `VRFModuloCompact` +/// assignemnts. +#[test] +fn sends_assignments_even_when_state_is_approved_v2() { + let peer_a = PeerId::random(); + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + let peer = &peer_a; + + let _ = test_harness(State::default(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 4], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + let validator_index = ValidatorIndex(0); + let cores = vec![0, 1, 2, 3]; + let candidate_bitfield: CandidateBitfield = cores.clone().try_into().unwrap(); + + let core_bitfield: CoreBitfield = cores + .iter() + .map(|index| CoreIndex(*index)) + .collect::>() + .try_into() + .unwrap(); + + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); + + // Assumes candidate index == core index. + let approvals = cores + .iter() + .map(|core| IndirectSignedApprovalVote { + block_hash: hash, + candidate_index: *core, + validator: validator_index, + signature: dummy_signature(), + }) + .collect::>(); + + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_bitfield.clone(), + ), + ) + .await; + + for approval in &approvals { + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone()), + ) + .await; + } + + // connect the peer. + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::VStaging).await; + + let assignments = vec![(cert.clone(), candidate_bitfield.clone())]; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(sent_assignments) + )) + )) => { + assert_eq!(peers, vec![*peer]); + assert_eq!(sent_assignments, assignments); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(sent_approvals) + )) + )) => { + // Construct a hashmaps of approvals for comparison. Approval distribution reorders messages because they are kept in a + // hashmap as well. + let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + let approvals = approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + + assert_eq!(peers, vec![*peer]); + assert_eq!(sent_approvals, approvals); + } + ); + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }); +} + /// /// /// 1. Receive remote peer view update with an unknown head @@ -1219,7 +1510,7 @@ fn race_condition_in_local_vs_remote_view_update() { }; // This will send a peer view that is ahead of our view - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash_b]).await; + setup_peer_with_view(overseer, peer, view![hash_b], ValidationVersion::V1).await; // Send our view update to include a new head overseer_send( @@ -1240,7 +1531,7 @@ fn race_condition_in_local_vs_remote_view_update() { .collect(); let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; + send_message_from_peer(overseer, peer, msg.clone()).await; // This will handle pending messages being processed let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); @@ -1257,8 +1548,8 @@ fn race_condition_in_local_vs_remote_view_update() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0); - assert_eq!(claimed_candidate_index, assignments[i].1); + assert_eq!(assignment, assignments[i].0.clone().into()); + assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -1283,7 +1574,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. @@ -1325,7 +1616,10 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1388,7 +1682,7 @@ fn propagates_assignments_along_unshared_dimension() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. @@ -1424,7 +1718,7 @@ fn propagates_assignments_along_unshared_dimension() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peers[99].0, msg).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -1473,7 +1767,7 @@ fn propagates_assignments_along_unshared_dimension() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peers[99].0, msg).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -1530,7 +1824,7 @@ fn propagates_to_required_after_connect() { // Connect all peers except omitted. for (i, (peer, _)) in peers.iter().enumerate() { if !omitted.contains(&i) { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } } @@ -1573,7 +1867,10 @@ fn propagates_to_required_after_connect() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1619,7 +1916,7 @@ fn propagates_to_required_after_connect() { ); for i in omitted.iter().copied() { - setup_peer_with_view(overseer, &peers[i].0, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peers[i].0, view![hash], ValidationVersion::V1).await; assert_matches!( overseer_recv(overseer).await, @@ -1668,7 +1965,7 @@ fn sends_to_more_peers_after_getting_topology() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -1698,7 +1995,10 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1820,7 +2120,7 @@ fn originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -1857,7 +2157,10 @@ fn originator_aggression_l1() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1979,7 +2282,7 @@ fn non_originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -2008,12 +2311,12 @@ fn non_originator_aggression_l1() { ) .await; - let assignments = vec![(cert.clone(), candidate_index)]; + let assignments = vec![(cert.clone().into(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peers[99].0, msg).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -2084,7 +2387,7 @@ fn non_originator_aggression_l2() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -2118,7 +2421,7 @@ fn non_originator_aggression_l2() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peers[99].0, msg).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -2249,7 +2552,7 @@ fn resends_messages_periodically() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. @@ -2284,7 +2587,7 @@ fn resends_messages_periodically() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; + send_message_from_peer(overseer, &peers[99].0, msg).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -2388,9 +2691,9 @@ fn import_versioned_approval() { let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // All peers are aware of relay parent. - setup_peer_with_view(overseer, &peer_a, ValidationVersion::V2, view![hash]).await; - setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::V2, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, view![hash], ValidationVersion::V2).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -2410,7 +2713,7 @@ fn import_versioned_approval() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.into(), candidate_index.into()), ) .await; @@ -2451,7 +2754,7 @@ fn import_versioned_approval() { signature: dummy_signature(), }; let msg = protocol_v2::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_a, Versioned::V2(msg)).await; + send_message_from_peer_v2(overseer, &peer_a, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -2512,7 +2815,9 @@ fn batch_test_round(message_count: usize) { let validators = 0..message_count; let assignments: Vec<_> = validators .clone() - .map(|index| (fake_assignment_cert(Hash::zero(), ValidatorIndex(index as u32)), 0)) + .map(|index| { + (fake_assignment_cert(Hash::zero(), ValidatorIndex(index as u32)).into(), 0.into()) + }) .collect(); let approvals: Vec<_> = validators @@ -2525,9 +2830,18 @@ fn batch_test_round(message_count: usize) { .collect(); let peer = PeerId::random(); - send_assignments_batched(&mut sender, assignments.clone(), peer, ValidationVersion::V1) - .await; - send_approvals_batched(&mut sender, approvals.clone(), peer, ValidationVersion::V1).await; + send_assignments_batched( + &mut sender, + assignments.clone(), + &vec![(peer, ValidationVersion::V1.into())], + ) + .await; + send_approvals_batched( + &mut sender, + approvals.clone(), + &vec![(peer, ValidationVersion::V1.into())], + ) + .await; // Check expected assignments batches. for assignment_index in (0..assignments.len()).step_by(super::MAX_ASSIGNMENT_BATCH_SIZE) { @@ -2549,7 +2863,7 @@ fn batch_test_round(message_count: usize) { assert_eq!(peers.len(), 1); for (message_index, assignment) in sent_assignments.iter().enumerate() { - assert_eq!(assignment.0, assignments[assignment_index + message_index].0); + assert_eq!(assignment.0, assignments[assignment_index + message_index].0.clone().try_into().unwrap()); assert_eq!(assignment.1, 0); } } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index c3c7aa4e0ea517b68e6e69d7aef45870d94efb90..91ed1026e41e8e2abed16489edbe1cd15258adce 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-availability-distribution" +description = "The Availability Distribution subsystem. Requests the required availability data. Also distributes availability data and chunks to requesters." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index 446988f7cc0dd72797f4e804a7e9fdc31290befc..97e80d696e7ef2adabdbc24dda76172603e462f0 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -35,7 +35,7 @@ use futures::{ use polkadot_node_subsystem::{ jaeger, messages::{ChainApiMessage, RuntimeApiMessage}, - overseer, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, + overseer, ActivatedLeaf, ActiveLeavesUpdate, }; use polkadot_node_subsystem_util::runtime::{get_occupied_cores, RuntimeInfo}; use polkadot_primitives::{CandidateHash, Hash, OccupiedCore, SessionIndex}; @@ -105,8 +105,7 @@ impl Requester { ) -> Result<()> { gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads"); let ActiveLeavesUpdate { activated, deactivated } = update; - // Stale leaves happen after a reversion - we don't want to re-run availability there. - if let Some(leaf) = activated.filter(|leaf| leaf.status == LeafStatus::Fresh) { + if let Some(leaf) = activated { let span = spans .get(&leaf.hash) .map(|span| span.child("update-fetching-heads")) diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 42c3abef547b93e4187ed92764a91b4415602f06..6048e6323cb45e4ef82256a94414e04bfda3c263 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-availability-recovery" +description = "The Availability Recovery subsystem. Handles requests for recovering the availability data of included candidates." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index e2146981da926d2c7027c902ba5be4716382f4da..4a658449f09c3ac195cc35846493f44fb353e533 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -105,6 +105,17 @@ pub struct AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, /// Metrics for this subsystem. metrics: Metrics, + /// The type of check to perform after available data was recovered. + post_recovery_check: PostRecoveryCheck, +} + +#[derive(Clone, PartialEq, Debug)] +/// The type of check to perform after available data was recovered. +pub enum PostRecoveryCheck { + /// Reencode the data and check erasure root. For validators. + Reencode, + /// Only check the pov hash. For collators only. + PovHash, } /// Expensive erasure coding computations that we want to run on a blocking thread. @@ -344,6 +355,7 @@ async fn launch_recovery_task( metrics: &Metrics, recovery_strategies: VecDeque::Sender>>>, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); let params = RecoveryParams { @@ -354,6 +366,8 @@ async fn launch_recovery_task( erasure_root: receipt.descriptor.erasure_root, metrics: metrics.clone(), bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, }; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); @@ -390,6 +404,7 @@ async fn handle_recover( erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); @@ -486,6 +501,7 @@ async fn handle_recover( metrics, recovery_strategies, bypass_availability_store, + post_recovery_check, ) .await }, @@ -527,15 +543,17 @@ async fn query_chunk_size( #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] impl AvailabilityRecoverySubsystem { - /// Create a new instance of `AvailabilityRecoverySubsystem` which never requests the - /// `AvailabilityStoreSubsystem` subsystem. - pub fn with_availability_store_skip( + /// Create a new instance of `AvailabilityRecoverySubsystem` suitable for collator nodes, + /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash + /// instead of reencoding the available data. + pub fn for_collator( req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: true, + post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, } @@ -550,6 +568,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -563,6 +582,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -577,6 +597,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -584,8 +605,13 @@ impl AvailabilityRecoverySubsystem { async fn run(self, mut ctx: Context) -> SubsystemResult<()> { let mut state = State::default(); - let Self { mut req_receiver, metrics, recovery_strategy_kind, bypass_availability_store } = - self; + let Self { + mut req_receiver, + metrics, + recovery_strategy_kind, + bypass_availability_store, + post_recovery_check, + } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); @@ -675,7 +701,8 @@ impl AvailabilityRecoverySubsystem { &metrics, erasure_task_tx.clone(), recovery_strategy_kind.clone(), - bypass_availability_store + bypass_availability_store, + post_recovery_check.clone() ).await { gum::warn!( target: LOG_TARGET, @@ -822,6 +849,7 @@ async fn erasure_task_thread( target: LOG_TARGET, "Erasure task channel closed. Node shutting down ?", ); + break }, } } diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs index d5bc2da84944a3403f9d4a3bf9d5a11b0e772f40..f705d5c0e4cfb15eae8fd873312fc1946d55a216 100644 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ b/polkadot/node/network/availability-recovery/src/task.rs @@ -20,7 +20,7 @@ use crate::{ futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - LOG_TARGET, + PostRecoveryCheck, LOG_TARGET, }; use futures::{channel::oneshot, SinkExt}; #[cfg(not(test))] @@ -95,6 +95,12 @@ pub struct RecoveryParams { /// Do not request data from availability-store. Useful for collators. pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, } /// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the @@ -501,39 +507,48 @@ impl RecoveryStrategy match response.await { Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.params + .erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; - return Ok(data) - } else { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); - // it doesn't help to report the peer with req/res. - } + return Ok(data) + }, + None => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + }; }, Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, Err(e) => gum::debug!( @@ -647,22 +662,43 @@ impl FetchChunks { match available_data_response { Ok(data) => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery error - root mismatch", + ); + None + }) + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + pov_hash = ?common_params.pov_hash, + "Data recovery error - PoV hash mismatch", + ); + None + }), + }; + + if let Some(data) = maybe_data { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, @@ -673,12 +709,6 @@ impl FetchChunks { Ok(data) } else { recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); Err(RecoveryError::Invalid) } diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 32b0ba2545e69159c1abf6cbe336de84218ca79d..0e61e9cf6209a4e99d2e51d05a0315b4128aff25 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-availability-bitfield-distribution" version = "1.0.0" +description = "Polkadot Bitfiled Distribution subsystem, which gossips signed availability bitfields used to compactly determine which backed candidates are available or not based on a 2/3+ quorum." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index 68e381ab6be51d65e4ed0a45eb971b84089cb62e..9cc79aee8490705893a5065cb1fbde6124e4f61c 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -25,14 +25,15 @@ use always_assert::never; use futures::{channel::oneshot, FutureExt}; +use net_protocol::filter_by_peer_version; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, OurView, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, @@ -101,6 +102,11 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), + Some(ValidationVersion::VStaging) => + Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + self.relay_parent, + self.signed_availability.into(), + )), None => { never!("Peers should only have supported protocol versions."); @@ -131,9 +137,9 @@ pub struct PeerData { /// Data used to track information of peers and relay parents the /// overseer ordered us to work on. -#[derive(Default, Debug)] +#[derive(Default)] struct ProtocolState { - /// Track all active peers and their views + /// Track all active peer views and protocol versions /// to determine what is relevant to them. peer_data: HashMap, @@ -492,17 +498,13 @@ async fn relay_message( } else { let _span = span.child("gossip"); - let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], - version: ValidationVersion| { - peers - .iter() - .filter(|(_, v)| v == &version.into()) - .map(|(peer_id, _)| *peer_id) - .collect::>() - }; + let v1_interested_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::V1.into()); + let v2_interested_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::V2.into()); - let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1); - let v2_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V2); + let vstaging_interested_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -515,7 +517,15 @@ async fn relay_message( if !v2_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v2_interested_peers, - message.into_validation_protocol(ValidationVersion::V2.into()), + message.clone().into_validation_protocol(ValidationVersion::V2.into()), + )) + .await + } + + if !vstaging_interested_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_interested_peers, + message.into_validation_protocol(ValidationVersion::VStaging.into()), )) .await } @@ -540,6 +550,10 @@ async fn process_incoming_peer_message( Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, + )) | + Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + relay_parent, + bitfield, )) => (relay_parent, bitfield), }; @@ -774,9 +788,11 @@ async fn handle_network_msg( handle_peer_view_change(ctx, state, new_peer, old_view, rng).await; } }, - NetworkBridgeEvent::PeerViewChange(peerid, new_view) => { - gum::trace!(target: LOG_TARGET, ?peerid, ?new_view, "Peer view change"); - handle_peer_view_change(ctx, state, peerid, new_view, rng).await; + NetworkBridgeEvent::PeerViewChange(peer_id, new_view) => { + gum::trace!(target: LOG_TARGET, ?peer_id, ?new_view, "Peer view change"); + if state.peer_data.get(&peer_id).is_some() { + handle_peer_view_change(ctx, state, peer_id, new_view, rng).await; + } }, NetworkBridgeEvent::OurViewChange(new_view) => { gum::trace!(target: LOG_TARGET, ?new_view, "Our view change"); diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index df8e881234dc56dc364478040a87363d104de4c1..6ae765c252f2d53b80da52c7509e725b2a9d19a8 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-network-bridge" version = "1.0.0" +description = "The Network Bridge Subsystem — protocol multiplexer for Polkadot." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index 823e1254612f8cae6980c518ffd848f0b61f2e88..c264c94cc19bf6f0d6973b418671b4b13c0bf2ac 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -28,23 +28,129 @@ use sc_network::{ }; use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames, ProtocolVersion}, + peer_set::{ + CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion, + }, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, - PeerId, + v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, }; use polkadot_primitives::{AuthorityDiscoveryId, Block, Hash}; -use crate::validator_discovery::AuthorityDiscovery; +use crate::{metrics::Metrics, validator_discovery::AuthorityDiscovery, WireMessage}; // network bridge network abstraction log target const LOG_TARGET: &'static str = "parachain::network-bridge-net"; -/// Send a message to the network. +// Helper function to send a validation v1 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_validation_message_v1( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); + + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::V1.into(), + peerset_protocol_names, + message, + metrics, + ); +} + +// Helper function to send a validation vstaging message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_validation_message_vstaging( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation vstaging message to peers",); + + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::VStaging.into(), + peerset_protocol_names, + message, + metrics, + ); +} + +// Helper function to send a validation v2 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_validation_message_v2( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::V2.into(), + protocol_names, + message, + metrics, + ); +} + +// Helper function to send a collation v1 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_collation_message_v1( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Collation, + CollationVersion::V1.into(), + peerset_protocol_names, + message, + metrics, + ); +} + +// Helper function to send a collation v2 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_collation_message_v2( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Collation, + CollationVersion::V2.into(), + peerset_protocol_names, + message, + metrics, + ); +} + +/// Lower level function that sends a message to the network using the main protocol version. /// /// This function is only used internally by the network-bridge, which is responsible to only send /// messages that are compatible with the passed peer set, as that is currently not enforced by /// this function. These are messages of type `WireMessage` parameterized on the matching type. -pub(crate) fn send_message( +fn send_message( net: &mut impl Network, mut peers: Vec, peer_set: PeerSet, @@ -65,6 +171,17 @@ pub(crate) fn send_message( encoded }; + // optimization: generate the protocol name once. + let protocol_name = protocol_names.get_name(peer_set, version); + gum::trace!( + target: LOG_TARGET, + ?peers, + ?version, + ?protocol_name, + ?message, + "Sending message to peers", + ); + // optimization: avoid cloning the message for the last peer in the // list. The message payload can be quite large. If the underlying // network used `Bytes` this would not be necessary. diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 7e86b46a7e0397ed78e5796afd9a9979688350fa..06be57ead0060749227c01f18eb142c9db5375b2 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -21,6 +21,7 @@ use super::*; use always_assert::never; use bytes::Bytes; use futures::stream::{BoxStream, StreamExt}; +use net_protocol::filter_by_peer_version; use parity_scale_codec::{Decode, DecodeAll}; use sc_network::Event as NetworkEvent; @@ -33,8 +34,8 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, v2 as protocol_v2, ObservedRole, OurView, PeerId, - UnifiedReputationChange as Rep, View, + v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, ObservedRole, OurView, + PeerId, UnifiedReputationChange as Rep, View, }; use polkadot_node_subsystem::{ @@ -64,9 +65,11 @@ use super::validator_discovery; /// Actual interfacing to the network based on the `Network` trait. /// /// Defines the `Network` trait with an implementation for an `Arc`. -use crate::network::{send_message, Network}; - -use crate::network::get_peer_id_by_authority_id; +use crate::network::{ + send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, + send_validation_message_v2, send_validation_message_vstaging, Network, +}; +use crate::{network::get_peer_id_by_authority_id, WireMessage}; use super::metrics::Metrics; @@ -251,22 +254,27 @@ where match ValidationVersion::try_from(version) .expect("try_get_protocol has already checked version is known; qed") { - ValidationVersion::V1 => send_message( + ValidationVersion::V1 => send_validation_message_v1( &mut network_service, vec![peer], - PeerSet::Validation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, ), &metrics, ), - ValidationVersion::V2 => send_message( + ValidationVersion::VStaging => send_validation_message_vstaging( + &mut network_service, + vec![peer], + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + ValidationVersion::V2 => send_validation_message_v2( &mut network_service, vec![peer], - PeerSet::Validation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, @@ -293,22 +301,18 @@ where match CollationVersion::try_from(version) .expect("try_get_protocol has already checked version is known; qed") { - CollationVersion::V1 => send_message( + CollationVersion::V1 => send_collation_message_v1( &mut network_service, vec![peer], - PeerSet::Collation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, ), &metrics, ), - CollationVersion::V2 => send_message( + CollationVersion::V2 => send_collation_message_v2( &mut network_service, vec![peer], - PeerSet::Collation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, @@ -386,8 +390,16 @@ where .filter_map(|(protocol, msg_bytes)| { // version doesn't matter because we always receive on the 'correct' // protocol name, not the negotiated fallback. - let (peer_set, _version) = + let (peer_set, version) = peerset_protocol_names.try_get_protocol(protocol)?; + gum::trace!( + target: LOG_TARGET, + ?peer_set, + ?protocol, + ?version, + "Received notification" + ); + if peer_set == PeerSet::Validation { if expected_versions[PeerSet::Validation].is_none() { return Some(Err(UNCONNECTED_PEERSET_COST)) @@ -474,6 +486,16 @@ where v_messages, &metrics, ) + } else if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::VStaging.into()) + { + handle_peer_messages::( + remote, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + v_messages, + &metrics, + ) } else { gum::warn!( target: LOG_TARGET, @@ -551,6 +573,15 @@ where let mut peers = Vec::with_capacity(neighbors.len()); for (discovery_id, validator_index) in neighbors { let addr = get_peer_id_by_authority_id(ads, discovery_id.clone()).await; + if addr.is_none() { + // See on why is not good in https://github.com/paritytech/polkadot-sdk/issues/2138 + gum::debug!( + target: LOG_TARGET, + ?validator_index, + "Could not determine peer_id for validator, let the team know in \n + https://github.com/paritytech/polkadot-sdk/issues/2138" + ) + } peers.push(TopologyPeerInfo { peer_ids: addr.into_iter().collect(), validator_index, @@ -806,15 +837,16 @@ fn update_our_view( ) }; - let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| { - peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() - }; + let v1_validation_peers = + filter_by_peer_version(&validation_peers, ValidationVersion::V1.into()); + let v1_collation_peers = filter_by_peer_version(&collation_peers, CollationVersion::V1.into()); - let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); - let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into()); + let v2_validation_peers = + filter_by_peer_version(&validation_peers, ValidationVersion::V2.into()); + let v2_collation_peers = filter_by_peer_version(&collation_peers, CollationVersion::V2.into()); - let v2_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V2.into()); - let v2_collation_peers = filter_by_version(&collation_peers, ValidationVersion::V2.into()); + let vstaging_validation_peers = + filter_by_peer_version(&validation_peers, ValidationVersion::VStaging.into()); send_validation_message_v1( net, @@ -844,7 +876,15 @@ fn update_our_view( net, v2_collation_peers, peerset_protocol_names, - WireMessage::ViewUpdate(new_view), + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + + send_validation_message_vstaging( + net, + vstaging_validation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), metrics, ); @@ -917,78 +957,6 @@ fn handle_peer_messages>( (outgoing_events, reports) } -fn send_validation_message_v1( - net: &mut impl Network, - peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V1.into(), - peerset_protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v1( - net: &mut impl Network, - peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V1.into(), - peerset_protocol_names, - message, - metrics, - ); -} - -fn send_validation_message_v2( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V2.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v2( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V2.into(), - protocol_names, - message, - metrics, - ); -} - async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 7c69cce48391b65294970232b1a277d7c261ba05..f784e78a7f2154c92b9ee85f5b6a1d05ea51c82f 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -1248,6 +1248,9 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V2 => WireMessage::::ViewUpdate(view.clone()) .encode(), + ValidationVersion::VStaging => + WireMessage::::ViewUpdate(view.clone()) + .encode(), }; assert_network_actions_contains( &actions, diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index f15635f1f41c528f6b15a07870f1f348a6b777a3..5f222ad59c75e229f8d8df0c0fb0a080aa2e1505 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -18,9 +18,7 @@ use super::*; use polkadot_node_network_protocol::{ - peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, - request_response::ReqProtocolNames, - v1 as protocol_v1, v2 as protocol_v2, PeerId, Versioned, + peer_set::PeerSetProtocolNames, request_response::ReqProtocolNames, Versioned, }; use polkadot_node_subsystem::{ @@ -41,7 +39,10 @@ use crate::validator_discovery; /// Actual interfacing to the network based on the `Network` trait. /// /// Defines the `Network` trait with an implementation for an `Arc`. -use crate::network::{send_message, Network}; +use crate::network::{ + send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, + send_validation_message_v2, send_validation_message_vstaging, Network, +}; use crate::metrics::Metrics; @@ -187,6 +188,7 @@ where gum::trace!( target: LOG_TARGET, action = "SendValidationMessages", + ?msg, num_messages = 1usize, ); @@ -198,6 +200,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, @@ -212,6 +221,7 @@ where target: LOG_TARGET, action = "SendValidationMessages", num_messages = %msgs.len(), + ?msgs, ); for (peers, msg) in msgs { @@ -223,6 +233,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, @@ -248,7 +265,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::V2(msg) => send_collation_message_v2( + Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -273,7 +290,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::V2(msg) => send_collation_message_v2( + Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -386,75 +403,3 @@ where Ok(()) } - -fn send_validation_message_v1( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V1.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v1( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V1.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_validation_message_v2( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V2.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v2( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V2.into(), - protocol_names, - message, - metrics, - ); -} diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 48287f8b74c91a526263fed1d176f916753ce985..1a2d9a7a4240cb64d545f3657804ada5155463bf 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -25,9 +25,9 @@ use std::collections::HashSet; use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSetProtocolNames, ValidationVersion}, request_response::{outgoing::Requests, ReqProtocolNames}, - ObservedRole, Versioned, + v1 as protocol_v1, v2 as protocol_v2, ObservedRole, Versioned, }; use polkadot_node_subsystem::{FromOrchestra, OverseerSignal}; use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; @@ -356,7 +356,6 @@ fn network_protocol_versioning_send() { } // send a validation protocol message. - { let approval_distribution_message = protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index e5328cf16629f4c7bdb2180e90fbe888dbfb8f42..367a97f35d994c566564247cf093e73e15ec4396 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-collator-protocol" version = "1.0.0" +description = "Polkadot Collator Protocol subsystem. Allows collators and validators to talk to each other." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 304cabbaac80c1a540a352da9ed74faa5f4dbf32..b3a396e1be3488e34424681132e1ffde11097717 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -93,13 +93,18 @@ const COST_APPARENT_FLOOD: Rep = /// For considerations on this value, see: https://github.com/paritytech/polkadot/issues/4386 const MAX_UNSHARED_UPLOAD_TIME: Duration = Duration::from_millis(150); -/// Ensure that collator issues a connection request at least once every this many seconds. -/// Usually it's done when advertising new collation. However, if the core stays occupied or -/// it's not our turn to produce a candidate, it's important to disconnect from previous -/// peers. +/// Ensure that collator updates its connection requests to validators +/// this long after the most recent leaf. +/// +/// The timeout is designed for substreams to be properly closed if they need to be +/// reopened shortly after the next leaf. +/// +/// Collators also update their connection requests on every new collation. +/// This timeout is mostly about removing stale connections while avoiding races +/// with new collations which may want to reactivate them. /// /// Validators are obtained from [`ValidatorGroupsBuffer::validators_to_connect`]. -const RECONNECT_TIMEOUT: Duration = Duration::from_secs(12); +const RECONNECT_AFTER_LEAF_TIMEOUT: Duration = Duration::from_secs(4); /// Future that when resolved indicates that we should update reserved peer-set /// of validators we want to be connected to. @@ -108,6 +113,13 @@ const RECONNECT_TIMEOUT: Duration = Duration::from_secs(12); /// connected. type ReconnectTimeout = Fuse; +#[derive(Debug)] +enum ShouldAdvertiseTo { + Yes, + NotAuthority, + AlreadyAdvertised, +} + /// Info about validators we are currently connected to. /// /// It keeps track to which validators we advertised our collation. @@ -129,10 +141,10 @@ impl ValidatorGroup { candidate_hash: &CandidateHash, peer_ids: &HashMap>, peer: &PeerId, - ) -> bool { + ) -> ShouldAdvertiseTo { let authority_ids = match peer_ids.get(peer) { Some(authority_ids) => authority_ids, - None => return false, + None => return ShouldAdvertiseTo::NotAuthority, }; for id in authority_ids { @@ -151,11 +163,13 @@ impl ValidatorGroup { .get(candidate_hash) .map_or(true, |advertised| !advertised[validator_index]) { - return true + return ShouldAdvertiseTo::Yes + } else { + return ShouldAdvertiseTo::AlreadyAdvertised } } - false + ShouldAdvertiseTo::NotAuthority } /// Should be called after we advertised our collation to the given `peer` to keep track of it. @@ -255,8 +269,8 @@ struct State { /// Tracks which validators we want to stay connected to. validator_groups_buf: ValidatorGroupsBuffer, - /// Timeout-future that enforces collator to update the peer-set at least once - /// every [`RECONNECT_TIMEOUT`] seconds. + /// Timeout-future which is reset after every leaf to [`RECONNECT_AFTER_LEAF_TIMEOUT`] seconds. + /// When it fires, we update our reserved peers. reconnect_timeout: ReconnectTimeout, /// Metrics. @@ -443,7 +457,7 @@ async fn distribute_collation( } // Update a set of connected validators if necessary. - state.reconnect_timeout = connect_to_validators(ctx, &state.validator_groups_buf).await; + connect_to_validators(ctx, &state.validator_groups_buf).await; if let Some(result_sender) = result_sender { state.collation_result_senders.insert(candidate_hash, result_sender); @@ -619,15 +633,12 @@ async fn declare( /// Updates a set of connected validators based on their advertisement-bits /// in a validators buffer. -/// -/// Should be called again once a returned future resolves. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn connect_to_validators( ctx: &mut Context, validator_groups_buf: &ValidatorGroupsBuffer, -) -> ReconnectTimeout { +) { let validator_ids = validator_groups_buf.validators_to_connect(); - let is_disconnect = validator_ids.is_empty(); // ignore address resolution failure // will reissue a new request on new collation @@ -638,14 +649,6 @@ async fn connect_to_validators( failed, }) .await; - - if is_disconnect { - gum::trace!(target: LOG_TARGET, "Disconnecting from all peers"); - // Never resolves. - Fuse::terminated() - } else { - futures_timer::Delay::new(RECONNECT_TIMEOUT).fuse() - } } /// Advertise collation to the given `peer`. @@ -685,22 +688,29 @@ async fn advertise_collation( .validator_group .should_advertise_to(candidate_hash, peer_ids, &peer); - if !should_advertise { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - peer_id = %peer, - "Not advertising collation since validator is not interested", - ); - continue + match should_advertise { + ShouldAdvertiseTo::Yes => {}, + ShouldAdvertiseTo::NotAuthority | ShouldAdvertiseTo::AlreadyAdvertised => { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?candidate_hash, + peer_id = %peer, + reason = ?should_advertise, + "Not advertising collation" + ); + continue + }, } gum::debug!( target: LOG_TARGET, ?relay_parent, + ?candidate_hash, peer_id = %peer, "Advertising collation.", ); + collation.status.advance_to_advertised(); let collation_message = match protocol_version { @@ -870,7 +880,9 @@ async fn handle_incoming_peer_message( use protocol_v2::CollatorProtocolMessage as V2; match msg { - Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) => { + Versioned::V1(V1::Declare(..)) | + Versioned::V2(V2::Declare(..)) | + Versioned::VStaging(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -881,7 +893,9 @@ async fn handle_incoming_peer_message( ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) => { + Versioned::V1(V1::AdvertiseCollation(_)) | + Versioned::V2(V2::AdvertiseCollation { .. }) | + Versioned::VStaging(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -896,7 +910,8 @@ async fn handle_incoming_peer_message( .await; }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | - Versioned::V2(V2::CollationSeconded(relay_parent, statement)) => { + Versioned::V2(V2::CollationSeconded(relay_parent, statement)) | + Versioned::VStaging(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, @@ -1149,7 +1164,7 @@ async fn handle_network_msg( PeerConnected(peer_id, observed_role, protocol_version, maybe_authority) => { // If it is possible that a disconnected validator would attempt a reconnect // it should be handled here. - gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected"); + gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, ?maybe_authority, "Peer connected"); let version = match protocol_version.try_into() { Ok(version) => version, @@ -1200,7 +1215,11 @@ async fn handle_network_msg( }, UpdatedAuthorityIds(peer_id, authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?authority_ids, "Updated authority ids"); - state.peer_ids.insert(peer_id, authority_ids); + if let Some(version) = state.peer_data.get(&peer_id).map(|d| d.version) { + if state.peer_ids.insert(peer_id, authority_ids).is_none() { + declare(ctx, state, &peer_id, version).await; + } + } }, NewGossipTopology { .. } => { // impossible! @@ -1369,7 +1388,11 @@ async fn run_inner( "Failed to process message" )?; }, - FromOrchestra::Signal(ActiveLeaves(_update)) => {} + FromOrchestra::Signal(ActiveLeaves(update)) => { + if update.activated.is_some() { + *reconnect_timeout = futures_timer::Delay::new(RECONNECT_AFTER_LEAF_TIMEOUT).fuse(); + } + } FromOrchestra::Signal(BlockFinalized(..)) => {} FromOrchestra::Signal(Conclude) => return Ok(()), }, @@ -1390,7 +1413,7 @@ async fn run_inner( // The request it still alive, it should be kept in a waiting queue. } else { for authority_id in state.peer_ids.get(&peer_id).into_iter().flatten() { - // Timeout not hit, this peer is no longer interested in this relay parent. + // This peer has received the candidate. Not interested anymore. state.validator_groups_buf.reset_validator_interest(candidate_hash, authority_id); } waiting.waiting_peers.remove(&(peer_id, candidate_hash)); @@ -1446,12 +1469,11 @@ async fn run_inner( } } _ = reconnect_timeout => { - state.reconnect_timeout = - connect_to_validators(&mut ctx, &state.validator_groups_buf).await; + connect_to_validators(&mut ctx, &state.validator_groups_buf).await; gum::trace!( target: LOG_TARGET, - timeout = ?RECONNECT_TIMEOUT, + timeout = ?RECONNECT_AFTER_LEAF_TIMEOUT, "Peer-set updated due to a timeout" ); }, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index cfa7627038481264e942ae5fb60e13e437713e23..5b88efc99d8307594c5ac726d1af5ab141e4718d 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -133,7 +133,7 @@ impl ValidatorGroupsBuffer { } } - /// Note that a validator is no longer interested in a given relay parent. + /// Note that a validator is no longer interested in a given candidate. pub fn reset_validator_interest( &mut self, candidate_hash: CandidateHash, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index a53e0028b9e70455f3dd918bad58fc5ec472c173..d6f34fc81b82568f77835a31706abeab5ac51d13 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -31,6 +31,7 @@ use std::{collections::VecDeque, future::Future, pin::Pin, task::Poll}; use futures::{future::BoxFuture, FutureExt}; use polkadot_node_network_protocol::{ + peer_set::CollationVersion, request_response::{outgoing::RequestError, v1 as request_v1, OutgoingResult}, PeerId, }; @@ -160,6 +161,8 @@ pub fn fetched_collation_sanity_check( pub struct CollationEvent { /// Collator id. pub collator_id: CollatorId, + /// The network protocol version the collator is using. + pub collator_protocol_version: CollationVersion, /// The requested collation data. pub pending_collation: PendingCollation, } @@ -307,6 +310,8 @@ pub(super) struct CollationFetchRequest { pub pending_collation: PendingCollation, /// Collator id. pub collator_id: CollatorId, + /// The network protocol version the collator is using. + pub collator_protocol_version: CollationVersion, /// Responses from collator. pub from_collator: BoxFuture<'static, OutgoingResult>, /// Handle used for checking if this request was cancelled. @@ -334,6 +339,7 @@ impl Future for CollationFetchRequest { self.span.as_mut().map(|s| s.add_string_tag("success", "false")); return Poll::Ready(( CollationEvent { + collator_protocol_version: self.collator_protocol_version, collator_id: self.collator_id.clone(), pending_collation: self.pending_collation, }, @@ -344,6 +350,7 @@ impl Future for CollationFetchRequest { let res = self.from_collator.poll_unpin(cx).map(|res| { ( CollationEvent { + collator_protocol_version: self.collator_protocol_version, collator_id: self.collator_id.clone(), pending_collation: self.pending_collation, }, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index fcb408d54b1b72b856b2439bd2eb1823020fb513..20b3b9ea1d265ba242e6dd6dabf5999ae46b286c 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -85,6 +85,8 @@ const COST_NETWORK_ERROR: Rep = Rep::CostMinor("Some network error"); const COST_INVALID_SIGNATURE: Rep = Rep::Malicious("Invalid network message signature"); const COST_REPORT_BAD: Rep = Rep::Malicious("A collator was reported by another subsystem"); const COST_WRONG_PARA: Rep = Rep::Malicious("A collator provided a collation for the wrong para"); +const COST_PROTOCOL_MISUSE: Rep = + Rep::Malicious("A collator advertising a collation for an async backing relay parent using V1"); const COST_UNNEEDED_COLLATOR: Rep = Rep::CostMinor("An unneeded collator connected"); const BENEFIT_NOTIFY_GOOD: Rep = Rep::BenefitMinor("A collator was noted good by another subsystem"); @@ -144,9 +146,6 @@ enum InsertAdvertisementError { UndeclaredCollator, /// A limit for announcements per peer is reached. PeerLimitReached, - /// Mismatch of relay parent mode and advertisement arguments. - /// An internal error that should not happen. - ProtocolMismatch, } #[derive(Debug)] @@ -252,23 +251,41 @@ impl PeerData { }, ( ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }, - Some(candidate_hash), + candidate_hash, ) => { - if state - .advertisements - .get(&on_relay_parent) - .map_or(false, |candidates| candidates.contains(&candidate_hash)) - { - return Err(InsertAdvertisementError::Duplicate) - } - let candidates = state.advertisements.entry(on_relay_parent).or_default(); - - if candidates.len() > max_candidate_depth { - return Err(InsertAdvertisementError::PeerLimitReached) - } - candidates.insert(candidate_hash); + if let Some(candidate_hash) = candidate_hash { + if state + .advertisements + .get(&on_relay_parent) + .map_or(false, |candidates| candidates.contains(&candidate_hash)) + { + return Err(InsertAdvertisementError::Duplicate) + } + + let candidates = + state.advertisements.entry(on_relay_parent).or_default(); + + if candidates.len() > max_candidate_depth { + return Err(InsertAdvertisementError::PeerLimitReached) + } + candidates.insert(candidate_hash); + } else { + if self.version != CollationVersion::V1 { + gum::error!( + target: LOG_TARGET, + "Programming error, `candidate_hash` can not be `None` \ + for non `V1` networking.", + ); + } + + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }; }, - _ => return Err(InsertAdvertisementError::ProtocolMismatch), } state.last_active = Instant::now(); @@ -705,6 +722,7 @@ async fn request_collation( let collation_request = CollationFetchRequest { pending_collation, collator_id: collator_id.clone(), + collator_protocol_version: peer_protocol_version, from_collator: response_recv.boxed(), cancellation_token: cancellation_token.clone(), span: state @@ -758,7 +776,8 @@ async fn process_incoming_peer_message( match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | - Versioned::V2(V2::Declare(collator_id, para_id, signature)) => { + Versioned::V2(V2::Declare(collator_id, para_id, signature)) | + Versioned::VStaging(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -874,6 +893,11 @@ async fn process_incoming_peer_message( relay_parent, candidate_hash, parent_head_data_hash, + }) | + Versioned::VStaging(V2::AdvertiseCollation { + relay_parent, + candidate_hash, + parent_head_data_hash, }) => if let Err(err) = handle_advertisement( ctx.sender(), @@ -897,7 +921,9 @@ async fn process_incoming_peer_message( modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) => { + Versioned::V1(V1::CollationSeconded(..)) | + Versioned::V2(V2::CollationSeconded(..)) | + Versioned::VStaging(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, @@ -920,10 +946,11 @@ enum AdvertisementError { UndeclaredCollator, /// We're assigned to a different para at the given relay parent. InvalidAssignment, - /// An advertisement format doesn't match the relay parent. - ProtocolMismatch, /// Para reached a limit of seconded candidates for this relay parent. SecondedLimitReached, + /// Collator trying to advertise a collation using V1 protocol for an async backing relay + /// parent. + ProtocolMisuse, /// Advertisement is invalid. Invalid(InsertAdvertisementError), } @@ -933,8 +960,9 @@ impl AdvertisementError { use AdvertisementError::*; match self { InvalidAssignment => Some(COST_WRONG_PARA), + ProtocolMisuse => Some(COST_PROTOCOL_MISUSE), RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE), - UnknownPeer | ProtocolMismatch | SecondedLimitReached => None, + UnknownPeer | SecondedLimitReached => None, } } } @@ -1042,6 +1070,13 @@ where .get(&relay_parent) .map(|s| s.child("advertise-collation")); + let peer_data = state.peer_data.get_mut(&peer_id).ok_or(AdvertisementError::UnknownPeer)?; + + if peer_data.version == CollationVersion::V1 && !state.active_leaves.contains_key(&relay_parent) + { + return Err(AdvertisementError::ProtocolMisuse) + } + let per_relay_parent = state .per_relay_parent .get(&relay_parent) @@ -1050,20 +1085,12 @@ where let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let assignment = &per_relay_parent.assignment; - let peer_data = state.peer_data.get_mut(&peer_id).ok_or(AdvertisementError::UnknownPeer)?; let collator_para_id = peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; - match assignment.current { - Some(id) if id == collator_para_id => { - // Our assignment. - }, - _ => return Err(AdvertisementError::InvalidAssignment), - }; - - if relay_parent_mode.is_enabled() && prospective_candidate.is_none() { - // Expected v2 advertisement. - return Err(AdvertisementError::ProtocolMismatch) + // Check if this is assigned to us. + if assignment.current.map_or(true, |id| id != collator_para_id) { + return Err(AdvertisementError::InvalidAssignment) } // Always insert advertisements that pass all the checks for spam protection. @@ -1077,13 +1104,17 @@ where &state.active_leaves, ) .map_err(AdvertisementError::Invalid)?; + if !per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { return Err(AdvertisementError::SecondedLimitReached) } if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { - let is_seconding_allowed = !relay_parent_mode.is_enabled() || - can_second( + // We need to queue the advertisement if we are not allowed to second it. + // + // This is also only important when async backing is enabled. + let queue_advertisement = relay_parent_mode.is_enabled() && + !can_second( sender, collator_para_id, relay_parent, @@ -1092,7 +1123,7 @@ where ) .await; - if !is_seconding_allowed { + if queue_advertisement { gum::debug!( target: LOG_TARGET, relay_parent = ?relay_parent, @@ -1125,6 +1156,7 @@ where prospective_candidate, ) .await; + if let Err(fetch_error) = result { gum::debug!( target: LOG_TARGET, @@ -1477,7 +1509,7 @@ async fn process_msg( }, }; let fetched_collation = FetchedCollation::from(&receipt.to_plain()); - if let Some(CollationEvent { collator_id, pending_collation }) = + if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) { let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = @@ -1635,7 +1667,7 @@ async fn run_inner( Ok(res) => res }; - let CollationEvent {collator_id, pending_collation} = res.collation_event.clone(); + let CollationEvent {collator_id, pending_collation, .. } = res.collation_event.clone(); if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await { gum::warn!( target: LOG_TARGET, @@ -1783,39 +1815,39 @@ async fn kick_off_seconding( }, }; let collations = &mut per_relay_parent.collations; - let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let fetched_collation = FetchedCollation::from(&candidate_receipt); if let Entry::Vacant(entry) = state.fetched_candidates.entry(fetched_collation) { collation_event.pending_collation.commitments_hash = Some(candidate_receipt.commitments_hash); - let pvd = - match (relay_parent_mode, collation_event.pending_collation.prospective_candidate) { - ( - ProspectiveParachainsMode::Enabled { .. }, - Some(ProspectiveCandidate { parent_head_data_hash, .. }), - ) => - request_prospective_validation_data( - ctx.sender(), - relay_parent, - parent_head_data_hash, - pending_collation.para_id, - ) - .await?, - (ProspectiveParachainsMode::Disabled, _) => - request_persisted_validation_data( - ctx.sender(), - candidate_receipt.descriptor().relay_parent, - candidate_receipt.descriptor().para_id, - ) - .await?, - _ => { - // `handle_advertisement` checks for protocol mismatch. - return Ok(()) - }, - } - .ok_or(SecondingError::PersistedValidationDataNotFound)?; + let pvd = match ( + collation_event.collator_protocol_version, + collation_event.pending_collation.prospective_candidate, + ) { + (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) + if per_relay_parent.prospective_parachains_mode.is_enabled() => + request_prospective_validation_data( + ctx.sender(), + relay_parent, + parent_head_data_hash, + pending_collation.para_id, + ) + .await?, + // Support V2 collators without async backing enabled. + (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => + request_persisted_validation_data( + ctx.sender(), + candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().para_id, + ) + .await?, + _ => { + // `handle_advertisement` checks for protocol mismatch. + return Ok(()) + }, + } + .ok_or(SecondingError::PersistedValidationDataNotFound)?; fetched_collation_sanity_check( &collation_event.pending_collation, @@ -1864,7 +1896,8 @@ async fn handle_collation_fetch_response( network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, ) -> std::result::Result> { - let (CollationEvent { collator_id, pending_collation }, response) = response; + let (CollationEvent { collator_id, collator_protocol_version, pending_collation }, response) = + response; // Remove the cancellation handle, as the future already completed. state.collation_requests_cancel_handles.remove(&pending_collation); @@ -1970,7 +2003,11 @@ async fn handle_collation_fetch_response( metrics_result = Ok(()); Ok(PendingCollationFetch { - collation_event: CollationEvent { collator_id, pending_collation }, + collation_event: CollationEvent { + collator_id, + pending_collation, + collator_protocol_version, + }, candidate_receipt, pov, }) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 9812998aab763cee54e1dac5232c1c9bddb4651b..3a9740149948f4796df9d5f9d775861b4fe901de 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -269,15 +269,15 @@ async fn assert_candidate_backing_second( expected_relay_parent: Hash, expected_para_id: ParaId, expected_pov: &PoV, - mode: ProspectiveParachainsMode, + version: CollationVersion, ) -> CandidateReceipt { let pvd = dummy_pvd(); // Depending on relay parent mode pvd will be either requested // from the Runtime API or Prospective Parachains. let msg = overseer_recv(virtual_overseer).await; - match mode { - ProspectiveParachainsMode::Disabled => assert_matches!( + match version { + CollationVersion::V1 => assert_matches!( msg, AllMessages::RuntimeApi(RuntimeApiMessage::Request( hash, @@ -289,7 +289,7 @@ async fn assert_candidate_backing_second( tx.send(Ok(Some(pvd.clone()))).unwrap(); } ), - ProspectiveParachainsMode::Enabled { .. } => assert_matches!( + CollationVersion::V2 => assert_matches!( msg, AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), @@ -532,7 +532,14 @@ fn act_on_advertisement_v2() { ) .await; - let candidate_hash = CandidateHash::default(); + let pov = PoV { block_data: BlockData(vec![]) }; + let mut candidate_a = + dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); + candidate_a.descriptor.para_id = test_state.chain_ids[0]; + candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + + let candidate_hash = candidate_a.hash(); let parent_head_data_hash = Hash::zero(); // v2 advertisement. advertise_collation( @@ -543,7 +550,7 @@ fn act_on_advertisement_v2() { ) .await; - assert_fetch_collation_request( + let response_channel = assert_fetch_collation_request( &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], @@ -551,6 +558,24 @@ fn act_on_advertisement_v2() { ) .await; + response_channel + .send(Ok(request_v1::CollationFetchingResponse::Collation( + candidate_a.clone(), + pov.clone(), + ) + .encode())) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + &mut virtual_overseer, + test_state.relay_parent, + test_state.chain_ids[0], + &pov, + // Async backing isn't enabled and thus it should do it the old way. + CollationVersion::V1, + ) + .await; + virtual_overseer }); } @@ -748,7 +773,7 @@ fn fetch_one_collation_at_a_time() { test_state.relay_parent, test_state.chain_ids[0], &pov, - ProspectiveParachainsMode::Disabled, + CollationVersion::V1, ) .await; @@ -880,7 +905,7 @@ fn fetches_next_collation() { second, test_state.chain_ids[0], &pov, - ProspectiveParachainsMode::Disabled, + CollationVersion::V1, ) .await; @@ -1010,7 +1035,7 @@ fn fetch_next_collation_on_invalid_collation() { test_state.relay_parent, test_state.chain_ids[0], &pov, - ProspectiveParachainsMode::Disabled, + CollationVersion::V1, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 4da0f11da39002fe035c618608e44ffca2939e94..c5236ef3eb211eedd5b95afb57a609c9b082c6d6 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -74,7 +74,7 @@ async fn assert_assign_incoming( } /// Handle a view update. -async fn update_view( +pub(super) async fn update_view( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. @@ -212,6 +212,7 @@ async fn assert_collation_seconded( virtual_overseer: &mut VirtualOverseer, relay_parent: Hash, peer_id: PeerId, + version: CollationVersion, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -222,29 +223,51 @@ async fn assert_collation_seconded( assert_eq!(rep.value, BENEFIT_NOTIFY_GOOD.cost_or_benefit()); } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( - peers, - Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( - protocol_v2::CollatorProtocolMessage::CollationSeconded( - _relay_parent, - .., - ), - )), - )) => { - assert_eq!(peers, vec![peer_id]); - assert_eq!(relay_parent, _relay_parent); - } - ); + + match version { + CollationVersion::V1 => { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( + peers, + Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol( + protocol_v1::CollatorProtocolMessage::CollationSeconded( + _relay_parent, + .., + ), + )), + )) => { + assert_eq!(peers, vec![peer_id]); + assert_eq!(relay_parent, _relay_parent); + } + ); + }, + CollationVersion::V2 => { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( + peers, + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded( + _relay_parent, + .., + ), + )), + )) => { + assert_eq!(peers, vec![peer_id]); + assert_eq!(relay_parent, _relay_parent); + } + ); + }, + } } #[test] -fn v1_advertisement_rejected() { +fn v1_advertisement_accepted_and_seconded() { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; + let TestHarness { mut virtual_overseer, keystore } = test_harness; let pair_a = CollatorPair::generate().0; @@ -267,9 +290,94 @@ fn v1_advertisement_rejected() { advertise_collation(&mut virtual_overseer, peer_a, head_b, None).await; - // Not reported. - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + None, + ) + .await; + + let mut candidate = dummy_candidate_receipt_bad_sig(head_b, Some(Default::default())); + candidate.descriptor.para_id = test_state.chain_ids[0]; + candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + let commitments = CandidateCommitments { + head_data: HeadData(vec![1 as u8]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + + let pov = PoV { block_data: BlockData(vec![1]) }; + + response_channel + .send(Ok(request_v2::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode())) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + &pov, + CollationVersion::V1, + ) + .await; + + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; + + assert_collation_seconded(&mut virtual_overseer, head_b, peer_a, CollationVersion::V1) + .await; + + virtual_overseer + }); +} + +#[test] +fn v1_advertisement_rejected_on_non_active_leave() { + let test_state = TestState::default(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 5; + + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + + // Accept both collators from the implicit view. + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V1, + ) + .await; + + advertise_collation(&mut virtual_overseer, peer_a, get_parent_hash(head_b), None).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep.value, COST_PROTOCOL_MISUSE.cost_or_benefit()); + } + ); virtual_overseer }); @@ -469,10 +577,7 @@ fn second_multiple_candidates_per_relay_parent() { head_c, test_state.chain_ids[0], &pov, - ProspectiveParachainsMode::Enabled { - max_candidate_depth: ASYNC_BACKING_PARAMETERS.max_candidate_depth as _, - allowed_ancestry_len: ASYNC_BACKING_PARAMETERS.allowed_ancestry_len as _, - }, + CollationVersion::V2, ) .await; @@ -481,7 +586,8 @@ fn second_multiple_candidates_per_relay_parent() { send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; - assert_collation_seconded(&mut virtual_overseer, head_c, peer_a).await; + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; } // No more advertisements can be made for this relay parent. diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 5d8e245d289a7637f9c64c069f6038ab41946c49..f4ea358c41b513c29c24e477bd9ba009f6c55b9a 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-dispute-distribution" version = "1.0.0" +description = "Polkadot Dispute Distribution subsystem, which ensures all concerned validators are aware of a dispute and have the relevant votes." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index a5bfda73833f32d354bd111948104d9aa73e709f..a9f68261addf4b0fd349bc212287009e1936ab5c 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-gossip-support" version = "1.0.0" +description = "Polkadot Gossip Support subsystem. Responsible for keeping track of session changes and issuing a connection request to the relevant validators on every new session." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 4fa23507e86bd145782d068b0aa84d0cf2c98ba6..674c86e5ce27a122253a3363364375af5bd0f8dc 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -104,7 +104,7 @@ pub struct GossipSupport { /// By `PeerId`. /// /// Needed for efficient handling of disconnect events. - connected_authorities_by_peer_id: HashMap>, + connected_peers: HashMap>, /// Authority discovery service. authority_discovery: AD, @@ -130,7 +130,7 @@ where failure_start: None, resolved_authorities: HashMap::new(), connected_authorities: HashMap::new(), - connected_authorities_by_peer_id: HashMap::new(), + connected_peers: HashMap::new(), authority_discovery, metrics, } @@ -407,19 +407,42 @@ where } } - for (peer_id, auths) in authority_ids { - if self.connected_authorities_by_peer_id.get(&peer_id) != Some(&auths) { + // peer was authority and now isn't + for (peer_id, current) in self.connected_peers.iter_mut() { + // empty -> nonempty is handled in the next loop + if !current.is_empty() && !authority_ids.contains_key(peer_id) { + sender + .send_message(NetworkBridgeRxMessage::UpdatedAuthorityIds { + peer_id: *peer_id, + authority_ids: HashSet::new(), + }) + .await; + + for a in current.drain() { + self.connected_authorities.remove(&a); + } + } + } + + // peer has new authority set. + for (peer_id, new) in authority_ids { + // If the peer is connected _and_ the authority IDs have changed. + if let Some(prev) = self.connected_peers.get(&peer_id).filter(|x| x != &&new) { sender .send_message(NetworkBridgeRxMessage::UpdatedAuthorityIds { peer_id, - authority_ids: auths.clone(), + authority_ids: new.clone(), }) .await; - auths.iter().for_each(|a| { + prev.iter().for_each(|a| { + self.connected_authorities.remove(a); + }); + new.iter().for_each(|a| { self.connected_authorities.insert(a.clone(), peer_id); }); - self.connected_authorities_by_peer_id.insert(peer_id, auths); + + self.connected_peers.insert(peer_id, new); } } } @@ -431,12 +454,13 @@ where authority_ids.iter().for_each(|a| { self.connected_authorities.insert(a.clone(), peer_id); }); - self.connected_authorities_by_peer_id.insert(peer_id, authority_ids); + self.connected_peers.insert(peer_id, authority_ids); + } else { + self.connected_peers.insert(peer_id, HashSet::new()); } }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { - if let Some(authority_ids) = self.connected_authorities_by_peer_id.remove(&peer_id) - { + if let Some(authority_ids) = self.connected_peers.remove(&peer_id) { authority_ids.into_iter().for_each(|a| { self.connected_authorities.remove(&a); }); @@ -453,6 +477,7 @@ where match message { Versioned::V1(m) => match m {}, Versioned::V2(m) => match m {}, + Versioned::VStaging(m) => match m {}, } }, } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 379334ded24acde67100de5b61069d5cb5909ce4..c33b9eae3252606e8d2fcbd954a0e180f4a47acb 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -27,3 +27,6 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" + +[features] +network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 901ac99b66933a77b95ccb1c55fd22930fc5a34a..9aeeb98ea9d6f6217668aa5dc3376cadc3acfb53 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,25 +253,29 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), /// V2 type. V2(V2), + /// VStaging type + VStaging(VStaging), } -impl Versioned<&'_ V1, &'_ V2> { +impl Versioned<&'_ V1, &'_ V2, &'_ VStaging> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), Versioned::V2(inner) => Versioned::V2(inner.clone()), + Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), } } } /// All supported versions of the validation protocol message. -pub type VersionedValidationProtocol = Versioned; +pub type VersionedValidationProtocol = + Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -285,6 +289,12 @@ impl From for VersionedValidationProtocol { } } +impl From for VersionedValidationProtocol { + fn from(vstaging: vstaging::ValidationProtocol) -> Self { + VersionedValidationProtocol::VStaging(vstaging) + } +} + /// All supported versions of the collation protocol message. pub type VersionedCollationProtocol = Versioned; @@ -307,12 +317,12 @@ macro_rules! impl_versioned_full_protocol_from { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), Versioned::V2(x) => Versioned::V2(x.into()), + Versioned::VStaging(x) => Versioned::VStaging(x.into()), } } } }; } - /// Implement `TryFrom` for one versioned enum variant into the inner type. /// `$m_ty::$variant(inner) -> Ok(inner)` macro_rules! impl_versioned_try_from { @@ -320,7 +330,8 @@ macro_rules! impl_versioned_try_from { $from:ty, $out:ty, $v1_pat:pat => $v1_out:expr, - $v2_pat:pat => $v2_out:expr + $v2_pat:pat => $v2_out:expr, + $vstaging_pat:pat => $vstaging_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -330,6 +341,7 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), + Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), _ => Err(crate::WrongVariant), } } @@ -343,6 +355,8 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), + Versioned::VStaging($vstaging_pat) => + Ok(Versioned::VStaging($vstaging_out.clone())), _ => Err(crate::WrongVariant), } } @@ -351,8 +365,11 @@ macro_rules! impl_versioned_try_from { } /// Version-annotated messages used by the bitfield distribution subsystem. -pub type BitfieldDistributionMessage = - Versioned; +pub type BitfieldDistributionMessage = Versioned< + v1::BitfieldDistributionMessage, + v2::BitfieldDistributionMessage, + vstaging::BitfieldDistributionMessage, +>; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -362,12 +379,16 @@ impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, - v2::ValidationProtocol::BitfieldDistribution(x) => x + v2::ValidationProtocol::BitfieldDistribution(x) => x, + vstaging::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. -pub type StatementDistributionMessage = - Versioned; +pub type StatementDistributionMessage = Versioned< + v1::StatementDistributionMessage, + v2::StatementDistributionMessage, + vstaging::StatementDistributionMessage, +>; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -377,12 +398,16 @@ impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, - v2::ValidationProtocol::StatementDistribution(x) => x + v2::ValidationProtocol::StatementDistribution(x) => x, + vstaging::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. -pub type ApprovalDistributionMessage = - Versioned; +pub type ApprovalDistributionMessage = Versioned< + v1::ApprovalDistributionMessage, + v2::ApprovalDistributionMessage, + vstaging::ApprovalDistributionMessage, +>; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -392,13 +417,18 @@ impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, - v2::ValidationProtocol::ApprovalDistribution(x) => x + v2::ValidationProtocol::ApprovalDistribution(x) => x, + vstaging::ValidationProtocol::ApprovalDistribution(x) => x ); /// Version-annotated messages used by the gossip-support subsystem (this is void). -pub type GossipSupportNetworkMessage = - Versioned; +pub type GossipSupportNetworkMessage = Versioned< + v1::GossipSupportNetworkMessage, + v2::GossipSupportNetworkMessage, + vstaging::GossipSupportNetworkMessage, +>; + // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -426,6 +456,7 @@ impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, v1::CollationProtocol::CollatorProtocol(x) => x, + v2::CollationProtocol::CollatorProtocol(x) => x, v2::CollationProtocol::CollatorProtocol(x) => x ); @@ -439,7 +470,7 @@ pub mod v1 { }; use polkadot_node_primitives::{ - approval::{IndirectAssignmentCert, IndirectSignedApprovalVote}, + approval::v1::{IndirectAssignmentCert, IndirectSignedApprovalVote}, UncheckedSignedFullStatement, }; @@ -598,7 +629,7 @@ pub mod v2 { }; use polkadot_node_primitives::{ - approval::{IndirectAssignmentCert, IndirectSignedApprovalVote}, + approval::v1::{IndirectAssignmentCert, IndirectSignedApprovalVote}, UncheckedSignedFullStatement, }; @@ -839,3 +870,64 @@ pub mod v2 { payload } } + +/// vstaging network protocol types, intended to become v3. +/// Initial purpose is for chaning ApprovalDistributionMessage to +/// include more than one assignment in the message. +pub mod vstaging { + use parity_scale_codec::{Decode, Encode}; + + use polkadot_node_primitives::approval::{ + v1::IndirectSignedApprovalVote, + v2::{CandidateBitfield, IndirectAssignmentCertV2}, + }; + + /// This parts of the protocol did not change from v2, so just alias them in vstaging, + /// no reason why they can't be change untill vstaging becomes v3 and is released. + pub use super::v2::{ + declare_signature_payload, BackedCandidateAcknowledgement, BackedCandidateManifest, + BitfieldDistributionMessage, GossipSupportNetworkMessage, StatementDistributionMessage, + StatementFilter, + }; + + /// Network messages used by the approval distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum ApprovalDistributionMessage { + /// Assignments for candidates in recent, unfinalized blocks. + /// We use a bitfield to reference claimed candidates, where the bit index is equal to + /// candidate index. + /// + /// Actually checking the assignment may yield a different result. + /// TODO: Look at getting rid of bitfield in the future. + #[codec(index = 0)] + Assignments(Vec<(IndirectAssignmentCertV2, CandidateBitfield)>), + /// Approvals for candidates in some recent, unfinalized block. + #[codec(index = 1)] + Approvals(Vec), + } + + /// All network messages on the validation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)] + pub enum ValidationProtocol { + /// Bitfield distribution messages + #[codec(index = 1)] + #[from] + BitfieldDistribution(BitfieldDistributionMessage), + /// Statement distribution messages + #[codec(index = 3)] + #[from] + StatementDistribution(StatementDistributionMessage), + /// Approval distribution messages + #[codec(index = 4)] + #[from] + ApprovalDistribution(ApprovalDistributionMessage), + } +} + +/// Returns the subset of `peers` with the specified `version`. +pub fn filter_by_peer_version( + peers: &[(PeerId, peer_set::ProtocolVersion)], + version: peer_set::ProtocolVersion, +) -> Vec { + peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() +} diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index 8dd68b297e3000ec4fc0847b047a1323aae97353..eb483dec9709a249b31e150f846085759b21319e 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -118,10 +118,17 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { + #[cfg(not(feature = "network-protocol-staging"))] match self { PeerSet::Validation => ValidationVersion::V2.into(), PeerSet::Collation => CollationVersion::V2.into(), } + + #[cfg(feature = "network-protocol-staging")] + match self { + PeerSet::Validation => ValidationVersion::VStaging.into(), + PeerSet::Collation => CollationVersion::V2.into(), + } } /// Get the max notification size for this peer set. @@ -147,6 +154,8 @@ impl PeerSet { Some("validation/1") } else if version == ValidationVersion::V2.into() { Some("validation/2") + } else if version == ValidationVersion::VStaging.into() { + Some("validation/3") } else { None }, @@ -218,6 +227,9 @@ pub enum ValidationVersion { V1 = 1, /// The second version. V2 = 2, + /// The staging version to gather changes + /// that before the release become v3. + VStaging = 3, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index fc2aff0da305edbb7f6f39834ffd017e2701a550..d9866af1ee233627911fc2a6b2ff2e5a63a3d93e 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use net_protocol::{filter_by_peer_version, peer_set::ProtocolVersion}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ @@ -21,7 +22,8 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - v2 as protocol_v2, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, + v2 as protocol_v2, vstaging as protocol_vstaging, IfDisconnected, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1061,7 +1063,7 @@ async fn circulate_statement<'a, Context>( "We filter out duplicates above. qed.", ); - let (v1_peers_to_send, v2_peers_to_send) = peers_to_send + let (v1_peers_to_send, non_v1_peers_to_send) = peers_to_send .into_iter() .map(|peer_id| { let peer_data = @@ -1073,7 +1075,7 @@ async fn circulate_statement<'a, Context>( }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::V2 => false, + ValidationVersion::V2 | ValidationVersion::VStaging => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1093,6 +1095,22 @@ async fn circulate_statement<'a, Context>( )) .await; } + + let peers_to_send: Vec<(PeerId, ProtocolVersion)> = non_v1_peers_to_send + .iter() + .map(|(p, _, version)| (*p, (*version).into())) + .collect(); + + let peer_needs_dependent_statement = v1_peers_to_send + .into_iter() + .chain(non_v1_peers_to_send) + .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) + .collect(); + + let v2_peers_to_send = filter_by_peer_version(&peers_to_send, ValidationVersion::V2.into()); + let vstaging_to_send = + filter_by_peer_version(&peers_to_send, ValidationVersion::VStaging.into()); + if !v2_peers_to_send.is_empty() { gum::trace!( target: LOG_TARGET, @@ -1102,17 +1120,28 @@ async fn circulate_statement<'a, Context>( "Sending statement to v2 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v2_peers_to_send.iter().map(|(p, _, _)| *p).collect(), + v2_peers_to_send, compatible_v1_message(ValidationVersion::V2, payload.clone()).into(), )) .await; } - v1_peers_to_send - .into_iter() - .chain(v2_peers_to_send) - .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) - .collect() + if !vstaging_to_send.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?vstaging_to_send, + ?relay_parent, + statement = ?stored.statement, + "Sending statement to vstaging peers", + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_to_send, + compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + )) + .await; + } + + peer_needs_dependent_statement } /// Send all statements about a given candidate hash to a peer. @@ -1442,8 +1471,11 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, - Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) => m, - Versioned::V2(_) => { + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) | + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( + m, + )) => m, + Versioned::V2(_) | Versioned::VStaging(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2169,5 +2201,8 @@ fn compatible_v1_message( ValidationVersion::V1 => Versioned::V1(message), ValidationVersion::V2 => Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), + ValidationVersion::VStaging => Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), + ), } } diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index eead7df5224d57ddf92bc90a3ba0b18e70a72c9e..0a80c1491a901068a0838bfb69463b699e0c2c97 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -27,7 +27,7 @@ use std::time::Duration; use polkadot_node_network_protocol::{ request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, - v2 as protocol_v2, Versioned, + v2 as protocol_v2, vstaging as protocol_vstaging, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -320,6 +320,7 @@ impl StatementDistributionSubsystem { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { v2::handle_active_leaves_update(ctx, state, activated, mode).await?; + v2::handle_deactivate_leaves(state, &deactivated); } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); @@ -398,9 +399,12 @@ impl StatementDistributionSubsystem { NetworkBridgeEvent::PeerMessage(_, message) => match message { Versioned::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), + ) | + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::V2(_) => VersionTarget::Current, + Versioned::V2(_) | Versioned::VStaging(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index e660df5da173e24dce8be7afdbe3ea55c17f94c9..ad56ad4a2365b94e6ced0e43ce0747d99fb6a80d 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -353,7 +353,13 @@ impl Candidates { ); c.has_claims() }, - }) + }); + + gum::trace!( + target: crate::LOG_TARGET, + "Candidates remaining after cleanup: {}", + self.candidates.len(), + ); } } diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index 8adb8353ca929d75219252c2ea33e894dea41002..619114de9670c8aef50ce129b6c26bf6ff206f70 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -331,6 +331,13 @@ impl ClusterTracker { self.validator_seconded(validator, candidate_hash) } + /// Whether a validator can request a candidate from us. + pub fn can_request(&self, target: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.validators.contains(&target) && + self.we_sent_seconded(target, candidate_hash) && + !self.they_sent_seconded(target, candidate_hash) + } + /// Returns a Vec of pending statements to be sent to a particular validator /// index. `Seconded` statements are sorted to the front of the vector. /// diff --git a/polkadot/node/network/statement-distribution/src/v2/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs index 3d53ff6d321e2321c7b559aaaf7adc5504e18f49..19bad34c44ff9de34596595480c0b1bb92cd0d3e 100644 --- a/polkadot/node/network/statement-distribution/src/v2/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -2245,4 +2245,73 @@ mod tests { ); assert_eq!(tracker.all_pending_statements_for(counterparty), vec![]); } + + #[test] + fn session_grid_topology_consistent() { + let n_validators = 300; + let group_size = 5; + + let validator_indices = + (0..n_validators).map(|i| ValidatorIndex(i as u32)).collect::>(); + let groups = validator_indices.chunks(group_size).map(|x| x.to_vec()).collect::>(); + + let topology = SessionGridTopology::new( + (0..n_validators).collect::>(), + (0..n_validators) + .map(|i| TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(i as u32), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(), + ); + + let computed_topologies = validator_indices + .iter() + .cloned() + .map(|v| build_session_topology(groups.iter(), &topology, Some(v))) + .collect::>(); + + let pairwise_check_topologies = |i, j| { + let v_i = ValidatorIndex(i); + let v_j = ValidatorIndex(j); + + for group in (0..groups.len()).map(|i| GroupIndex(i as u32)) { + let g_i = computed_topologies[i as usize].group_views.get(&group).unwrap(); + let g_j = computed_topologies[j as usize].group_views.get(&group).unwrap(); + + if g_i.sending.contains(&v_j) { + assert!( + g_j.receiving.contains(&v_i), + "{:?}: {:?}, sending but not receiving", + group, + &(i, j) + ); + } + + if g_j.sending.contains(&v_i) { + assert!( + g_i.receiving.contains(&v_j), + "{:?}: {:?}, sending but not receiving", + group, + &(j, i) + ); + } + + if g_i.receiving.contains(&v_j) { + assert!(g_j.sending.contains(&v_i), "{:?}, receiving but not sending", &(i, j)); + } + + if g_j.receiving.contains(&v_i) { + assert!(g_i.sending.contains(&v_j), "{:?}, receiving but not sending", &(j, i)); + } + } + }; + + for i in 0..n_validators { + for j in (i + 1)..n_validators { + pairwise_check_topologies(i as u32, j as u32); + } + } + } } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index e11d66c41a04b62377008bb7c0e2b5f9f44cde5b..406f1130590902f12caa9058c55a80b0fbb54c8a 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -17,6 +17,7 @@ //! Implementation of the v2 statement distribution protocol, //! designed for asynchronous backing. +use net_protocol::{filter_by_peer_version, peer_set::ProtocolVersion}; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::SessionGridTopology, @@ -28,7 +29,8 @@ use polkadot_node_network_protocol::{ MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, v2::{self as protocol_v2, StatementFilter}, - IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, + vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -100,6 +102,8 @@ const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); +const COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN: Rep = + Rep::CostMinor("Unexpected Manifest, Peer Unknown"); const COST_CONFLICTING_MANIFEST: Rep = Rep::CostMajor("Manifest conflicts with previous"); const COST_INSUFFICIENT_MANIFEST: Rep = Rep::CostMajor("Manifest statements insufficient to back candidate"); @@ -138,8 +142,27 @@ struct PerRelayParentState { session: SessionIndex, } +impl PerRelayParentState { + fn active_validator_state(&self) -> Option<&ActiveValidatorState> { + self.local_validator.as_ref().and_then(|local| local.active.as_ref()) + } + + fn active_validator_state_mut(&mut self) -> Option<&mut ActiveValidatorState> { + self.local_validator.as_mut().and_then(|local| local.active.as_mut()) + } +} + // per-relay-parent local validator state. struct LocalValidatorState { + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, + // additional fields in case local node is an active validator. + active: Option, + // local index actually exists in case node is inactive validator, however, + // it's not needed outside of `build_session_topology`, where it's known. +} + +struct ActiveValidatorState { // The index of the validator. index: ValidatorIndex, // our validator group @@ -148,8 +171,14 @@ struct LocalValidatorState { assignment: Option, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, - // the grid-level communication at this relay-parent. - grid_tracker: GridTracker, +} + +#[derive(Debug, Copy, Clone)] +enum LocalValidatorIndex { + // Local node is an active validator. + Active(ValidatorIndex), + // Local node is not in active validator set. + Inactive, } #[derive(Debug)] @@ -160,7 +189,7 @@ struct PerSessionState { // is only `None` in the time between seeing a session and // getting the topology from the gossip-support subsystem grid_view: Option, - local_validator: Option, + local_validator: Option, } impl PerSessionState { @@ -174,25 +203,37 @@ impl PerSessionState { let local_validator = polkadot_node_subsystem_util::signing_key_and_index( session_info.validators.iter(), keystore, - ); + ) + .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator: local_validator.map(|(_key, index)| index), - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } - fn supply_topology(&mut self, topology: &SessionGridTopology) { + fn supply_topology( + &mut self, + topology: &SessionGridTopology, + local_index: Option, + ) { + // Note: we use the local index rather than the `self.local_validator` as the + // former may be `Some` when the latter is `None`, due to the set of nodes in + // discovery being a superset of the active validators for consensus. let grid_view = grid::build_session_topology( self.session_info.validator_groups.iter(), topology, - self.local_validator, + local_index, ); self.grid_view = Some(grid_view); + if local_index.is_some() { + self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); + } + } + + /// Returns `true` if local is neither active or inactive validator node. + /// + /// `false` is also returned if session topology is not known yet. + fn is_not_validator(&self) -> bool { + self.grid_view.is_some() && self.local_validator.is_none() } } @@ -251,6 +292,7 @@ fn connected_validator_peer( struct PeerState { view: View, + protocol_version: ValidationVersion, implicit_view: HashSet, discovery_ids: Option>, } @@ -323,9 +365,13 @@ pub(crate) async fn handle_network_update( NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); - if protocol_version != ValidationVersion::V2.into() { + let versioned_protocol = if protocol_version != ValidationVersion::V2.into() && + protocol_version != ValidationVersion::VStaging.into() + { return - } + } else { + protocol_version.try_into().expect("Qed, we checked above") + }; if let Some(ref mut authority_ids) = authority_ids { authority_ids.retain(|a| match state.authorities.entry(a.clone()) { @@ -334,7 +380,7 @@ pub(crate) async fn handle_network_update( true }, Entry::Occupied(e) => { - gum::trace!( + gum::debug!( target: LOG_TARGET, authority_id = ?a, existing_peer = ?e.get(), @@ -352,6 +398,7 @@ pub(crate) async fn handle_network_update( PeerState { view: View::default(), implicit_view: HashSet::new(), + protocol_version: versioned_protocol, discovery_ids: authority_ids, }, ); @@ -366,9 +413,10 @@ pub(crate) async fn handle_network_update( NetworkBridgeEvent::NewGossipTopology(topology) => { let new_session_index = topology.session; let new_topology = topology.topology; + let local_index = topology.local_index; if let Some(per_session) = state.per_session.get_mut(&new_session_index) { - per_session.supply_topology(&new_topology); + per_session.supply_topology(&new_topology, local_index); } // TODO [https://github.com/paritytech/polkadot/issues/6194] @@ -383,17 +431,29 @@ pub(crate) async fn handle_network_update( net_protocol::StatementDistributionMessage::V1(_) => return, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), + ) | + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), ) => return, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) | + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), + ) | + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), + ) | + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -409,6 +469,12 @@ pub(crate) async fn handle_network_update( "Updated `AuthorityDiscoveryId`s" ); + // defensive: ensure peers are actually connected + let peer_state = match state.peers.get_mut(&peer_id) { + None => return, + Some(p) => p, + }; + // Remove the authority IDs which were previously mapped to the peer // but aren't part of the new set. state.authorities.retain(|a, p| p != &peer_id || authority_ids.contains(a)); @@ -418,9 +484,7 @@ pub(crate) async fn handle_network_update( state.authorities.insert(a, peer_id); } - if let Some(peer_state) = state.peers.get_mut(&peer_id) { - peer_state.discovery_ids = Some(authority_ids); - } + peer_state.discovery_ids = Some(authority_ids); }, } } @@ -520,13 +584,17 @@ pub(crate) async fn handle_active_leaves_update( .expect("either existed or just inserted; qed"); let local_validator = per_session.local_validator.and_then(|v| { - find_local_validator_state( - v, - &per_session.groups, - &availability_cores, - &group_rotation_info, - seconding_limit, - ) + if let LocalValidatorIndex::Active(idx) = v { + find_active_validator_state( + idx, + &per_session.groups, + &availability_cores, + &group_rotation_info, + seconding_limit, + ) + } else { + Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) + } }); state.per_relay_parent.insert( @@ -542,6 +610,13 @@ pub(crate) async fn handle_active_leaves_update( ); } + gum::debug!( + target: LOG_TARGET, + "Activated leaves. Now tracking {} relay-parents across {} sessions", + state.per_relay_parent.len(), + state.per_session.len(), + ); + // Reconcile all peers' views with the active leaf and any relay parents // it implies. If they learned about the block before we did, this reconciliation will give // non-empty results and we should send them messages concerning all activated relay-parents. @@ -566,7 +641,7 @@ pub(crate) async fn handle_active_leaves_update( Ok(()) } -fn find_local_validator_state( +fn find_active_validator_state( validator_index: ValidatorIndex, groups: &Groups, availability_cores: &[CoreState], @@ -587,11 +662,13 @@ fn find_local_validator_state( let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { - index: validator_index, - group: our_group, - assignment: para, - cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) - .expect("group is non-empty because we are in it; qed"), + active: Some(ActiveValidatorState { + index: validator_index, + group: our_group, + assignment: para, + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), + }), grid_tracker: GridTracker::default(), }) } @@ -599,25 +676,18 @@ fn find_local_validator_state( pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // deactivate the leaf in the implicit view. for leaf in leaves { - state.implicit_view.deactivate_leaf(*leaf); - } - - let relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); - - // fast exit for no-op. - if relay_parents.len() == state.per_relay_parent.len() { - return - } - - // clean up per-relay-parent data based on everything removed. - state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); - - // Clean up all requests - for leaf in leaves { - state.request_manager.remove_by_relay_parent(*leaf); + let pruned = state.implicit_view.deactivate_leaf(*leaf); + for pruned_rp in pruned { + // clean up per-relay-parent data based on everything removed. + state.per_relay_parent.remove(&pruned_rp); + // clean up requests related to this relay parent. + state.request_manager.remove_by_relay_parent(*leaf); + } } - state.candidates.on_deactivate_leaves(&leaves, |h| relay_parents.contains(h)); + state + .candidates + .on_deactivate_leaves(&leaves, |h| state.per_relay_parent.contains_key(h)); // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); @@ -691,13 +761,17 @@ async fn send_peer_messages_for_relay_parent( for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session_state.authority_lookup.get(a) }) { - if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + if let Some(active) = relay_parent_state + .local_validator + .as_mut() + .and_then(|local| local.active.as_mut()) + { send_pending_cluster_statements( ctx, relay_parent, - &peer, + &(peer, peer_data.protocol_version), validator_id, - &mut local_validator_state.cluster_tracker, + &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, ) @@ -707,7 +781,7 @@ async fn send_peer_messages_for_relay_parent( send_pending_grid_messages( ctx, relay_parent, - &peer, + &(peer, peer_data.protocol_version), validator_id, &per_session_state.groups, relay_parent_state, @@ -720,15 +794,34 @@ async fn send_peer_messages_for_relay_parent( fn pending_statement_network_message( statement_store: &StatementStore, relay_parent: Hash, - peer: &PeerId, + peer: &(PeerId, ValidationVersion), originator: ValidatorIndex, compact: CompactStatement, ) -> Option<(Vec, net_protocol::VersionedValidationProtocol)> { - statement_store - .validator_statement(originator, compact) - .map(|s| s.as_unchecked().clone()) - .map(|signed| protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed)) - .map(|msg| (vec![*peer], Versioned::V2(msg).into())) + match peer.1 { + ValidationVersion::V2 => statement_store + .validator_statement(originator, compact) + .map(|s| s.as_unchecked().clone()) + .map(|signed| { + protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed) + }) + .map(|msg| (vec![peer.0], Versioned::V2(msg).into())), + ValidationVersion::VStaging => statement_store + .validator_statement(originator, compact) + .map(|s| s.as_unchecked().clone()) + .map(|signed| { + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + }) + .map(|msg| (vec![peer.0], Versioned::VStaging(msg).into())), + ValidationVersion::V1 => { + gum::error!( + target: LOG_TARGET, + "Bug ValidationVersion::V1 should not be used in statement-distribution v2, + legacy should have handled this" + ); + None + }, + } } /// Send a peer all pending cluster statements for a relay parent. @@ -736,7 +829,7 @@ fn pending_statement_network_message( async fn send_pending_cluster_statements( ctx: &mut Context, relay_parent: Hash, - peer_id: &PeerId, + peer_id: &(PeerId, ValidationVersion), peer_validator_id: ValidatorIndex, cluster_tracker: &mut ClusterTracker, candidates: &Candidates, @@ -780,7 +873,7 @@ async fn send_pending_cluster_statements( async fn send_pending_grid_messages( ctx: &mut Context, relay_parent: Hash, - peer_id: &PeerId, + peer_id: &(PeerId, ValidationVersion), peer_validator_id: ValidatorIndex, groups: &Groups, relay_parent_state: &mut PerRelayParentState, @@ -842,20 +935,37 @@ async fn send_pending_grid_messages( candidate_hash, local_knowledge.clone(), ); - - messages.push(( - vec![*peer_id], - Versioned::V2( - protocol_v2::StatementDistributionMessage::BackedCandidateManifest( - manifest, - ), - ) - .into(), - )); + match peer_id.1 { + ValidationVersion::V2 => messages.push(( + vec![peer_id.0], + Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( + manifest, + ), + ) + .into(), + )), + ValidationVersion::VStaging => messages.push(( + vec![peer_id.0], + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest, + ), + ) + .into(), + )), + ValidationVersion::V1 => { + gum::error!( + target: LOG_TARGET, + "Bug ValidationVersion::V1 should not be used in statement-distribution v2, + legacy should have handled this" + ); + } + }; }, grid::ManifestKind::Acknowledgement => { messages.extend(acknowledgement_and_statement_messages( - *peer_id, + peer_id, peer_validator_id, groups, relay_parent_state, @@ -939,7 +1049,7 @@ pub(crate) async fn share_local_statement( }; let (local_index, local_assignment, local_group) = - match per_relay_parent.local_validator.as_ref() { + match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), Some(l) => (l.index, l.assignment, l.group), }; @@ -1016,7 +1126,7 @@ pub(crate) async fn share_local_statement( } { - let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + let l = per_relay_parent.active_validator_state_mut().expect("checked above; qed"); l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); } @@ -1103,31 +1213,41 @@ async fn circulate_statement( // We're not meant to circulate statements in the cluster until we have the confirmed // candidate. - let cluster_relevant = Some(local_validator.group) == statement_group; - let cluster_targets = if is_confirmed && cluster_relevant { - Some( - local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&&v| { - local_validator + // + // Cluster is only relevant if local node is an active validator. + let (cluster_relevant, cluster_targets, all_cluster_targets) = local_validator + .active + .as_mut() + .map(|active| { + let cluster_relevant = Some(active.group) == statement_group; + let cluster_targets = if is_confirmed && cluster_relevant { + Some( + active .cluster_tracker - .can_send(v, originator, compact_statement.clone()) - .is_ok() - }) - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, DirectTargetKind::Cluster)), - ) - } else { - None - }; + .targets() + .iter() + .filter(|&&v| { + active + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &active.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) + } else { + None + }; + let all_cluster_targets = active.cluster_tracker.targets(); + (cluster_relevant, cluster_targets, all_cluster_targets) + }) + .unwrap_or((false, None, &[])); let grid_targets = local_validator .grid_tracker .direct_statement_targets(&per_session.groups, originator, &compact_statement) .into_iter() - .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .filter(|v| !cluster_relevant || !all_cluster_targets.contains(v)) .map(|v| (v, DirectTargetKind::Grid)); let targets = cluster_targets @@ -1142,33 +1262,39 @@ async fn circulate_statement( (local_validator, targets) }; - let mut statement_to = Vec::new(); + let mut statement_to_peers: Vec<(PeerId, ProtocolVersion)> = Vec::new(); for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. - let peer_id: PeerId = match authorities.get(&authority_id) { - Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => *p, + let peer_id: (PeerId, ProtocolVersion) = match authorities.get(&authority_id) { + Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => ( + *p, + peers + .get(p) + .expect("Qed, can't fail because it was checked above") + .protocol_version + .into(), + ), None | Some(_) => continue, }; match kind { DirectTargetKind::Cluster => { + let active = local_validator + .active + .as_mut() + .expect("cluster target means local is active validator; qed"); + // At this point, all peers in the cluster should 'know' // the candidate, so we don't expect for this to fail. - if let Ok(()) = local_validator.cluster_tracker.can_send( - target, - originator, - compact_statement.clone(), - ) { - local_validator.cluster_tracker.note_sent( - target, - originator, - compact_statement.clone(), - ); - statement_to.push(peer_id); + if let Ok(()) = + active.cluster_tracker.can_send(target, originator, compact_statement.clone()) + { + active.cluster_tracker.note_sent(target, originator, compact_statement.clone()); + statement_to_peers.push(peer_id); } }, DirectTargetKind::Grid => { - statement_to.push(peer_id); + statement_to_peers.push(peer_id); local_validator.grid_tracker.sent_or_received_direct_statement( &per_session.groups, originator, @@ -1179,17 +1305,23 @@ async fn circulate_statement( } } + let statement_to_v2_peers = + filter_by_peer_version(&statement_to_peers, ValidationVersion::V2.into()); + + let statement_to_vstaging_peers = + filter_by_peer_version(&statement_to_peers, ValidationVersion::VStaging.into()); + // ship off the network messages to the network bridge. - if !statement_to.is_empty() { + if !statement_to_v2_peers.is_empty() { gum::debug!( target: LOG_TARGET, ?compact_statement, - n_peers = ?statement_to.len(), - "Sending statement to peers", + n_peers = ?statement_to_v2_peers.len(), + "Sending statement to v2 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - statement_to, + statement_to_v2_peers, Versioned::V2(protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), @@ -1198,6 +1330,25 @@ async fn circulate_statement( )) .await; } + + if !statement_to_vstaging_peers.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?compact_statement, + n_peers = ?statement_to_peers.len(), + "Sending statement to vstaging peers", + ); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + statement_to_vstaging_peers, + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + )) + .into(), + )) + .await; + } } /// Check a statement signature under this parent hash. fn check_statement_signature( @@ -1285,7 +1436,9 @@ async fn handle_incoming_statement( None => { // we shouldn't be receiving statements unless we're a validator // this session. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + if per_session.is_not_validator() { + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + } return }, Some(l) => l, @@ -1300,73 +1453,81 @@ async fn handle_incoming_statement( }, }; - let cluster_sender_index = { + let (active, cluster_sender_index) = { // This block of code only returns `Some` when both the originator and // the sending peer are in the cluster. + let active = local_validator.active.as_mut(); - let allowed_senders = local_validator - .cluster_tracker - .senders_for_originator(statement.unchecked_validator_index()); + let allowed_senders = active + .as_ref() + .map(|active| { + active + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()) + }) + .unwrap_or_default(); - allowed_senders + let idx = allowed_senders .iter() .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) - .next() - }; - - let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { - match handle_cluster_statement( - relay_parent, - &mut local_validator.cluster_tracker, - per_relay_parent.session, - &per_session.session_info, - statement, - cluster_sender_index, - ) { - Ok(Some(s)) => s, - Ok(None) => return, - Err(rep) => { - modify_reputation(reputation, ctx.sender(), peer, rep).await; - return - }, - } - } else { - let grid_sender_index = local_validator - .grid_tracker - .direct_statement_providers( - &per_session.groups, - statement.unchecked_validator_index(), - statement.unchecked_payload(), - ) - .into_iter() - .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) - .filter(|(_, ad)| peer_state.is_authority(ad)) - .map(|(i, _)| i) .next(); + (active, idx) + }; - if let Some(grid_sender_index) = grid_sender_index { - match handle_grid_statement( + let checked_statement = + if let Some((active, cluster_sender_index)) = active.zip(cluster_sender_index) { + match handle_cluster_statement( relay_parent, - &mut local_validator.grid_tracker, + &mut active.cluster_tracker, per_relay_parent.session, - &per_session, + &per_session.session_info, statement, - grid_sender_index, + cluster_sender_index, ) { - Ok(s) => s, + Ok(Some(s)) => s, + Ok(None) => return, Err(rep) => { modify_reputation(reputation, ctx.sender(), peer, rep).await; return }, } } else { - // Not a cluster or grid peer. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - } - }; + let grid_sender_index = local_validator + .grid_tracker + .direct_statement_providers( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + modify_reputation(reputation, ctx.sender(), peer, rep).await; + return + }, + } + } else { + // Not a cluster or grid peer. + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; let statement = checked_statement.payload().clone(); let originator_index = checked_statement.validator_index(); @@ -1434,7 +1595,7 @@ async fn handle_incoming_statement( local_validator.grid_tracker.learned_fresh_statement( &per_session.groups, session_topology, - local_validator.index, + originator_index, &statement, ); } @@ -1683,14 +1844,8 @@ async fn provide_candidate_to_grid( statement_knowledge: filter.clone(), }; - let manifest_message = - Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest)); - let ack_message = Versioned::V2( - protocol_v2::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), - ); - - let mut manifest_peers = Vec::new(); - let mut ack_peers = Vec::new(); + let mut manifest_peers: Vec<(PeerId, ProtocolVersion)> = Vec::new(); + let mut ack_peers: Vec<(PeerId, ProtocolVersion)> = Vec::new(); let mut post_statements = Vec::new(); for (v, action) in actions { @@ -1698,7 +1853,7 @@ async fn provide_candidate_to_grid( None => continue, Some(p) => if peers.get(&p).map_or(false, |d| d.knows_relay_parent(&relay_parent)) { - p + (p, peers.get(&p).expect("Qed, was checked above").protocol_version.into()) } else { continue }, @@ -1724,42 +1879,95 @@ async fn provide_candidate_to_grid( &per_session.groups, group_index, candidate_hash, + &(p.0, p.1.try_into().expect("Qed, can not fail was checked above")), ) .into_iter() - .map(|m| (vec![p], m)), + .map(|m| (vec![p.0], m)), ); } - if !manifest_peers.is_empty() { + let manifest_peers_v2 = filter_by_peer_version(&manifest_peers, ValidationVersion::V2.into()); + let manifest_peers_vstaging = + filter_by_peer_version(&manifest_peers, ValidationVersion::VStaging.into()); + if !manifest_peers_v2.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - n_peers = manifest_peers.len(), - "Sending manifest to peers" + local_validator = ?per_session.local_validator, + n_peers = manifest_peers_v2.len(), + "Sending manifest to v2 peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - manifest_peers, - manifest_message.into(), + manifest_peers_v2, + Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + )) + .into(), )) .await; } - if !ack_peers.is_empty() { + if !manifest_peers_vstaging.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - n_peers = ack_peers.len(), - "Sending acknowledgement to peers" + local_validator = ?per_session.local_validator, + n_peers = manifest_peers_vstaging.len(), + "Sending manifest to vstaging peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - ack_peers, - ack_message.into(), + manifest_peers_vstaging, + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .into(), )) .await; } + let ack_peers_v2 = filter_by_peer_version(&ack_peers, ValidationVersion::V2.into()); + let ack_peers_vstaging = filter_by_peer_version(&ack_peers, ValidationVersion::VStaging.into()); + if !ack_peers_v2.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + local_validator = ?per_session.local_validator, + n_peers = ack_peers_v2.len(), + "Sending acknowledgement to v2 peers" + ); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + ack_peers_v2, + Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement.clone(), + )) + .into(), + )) + .await; + } + + if !ack_peers_vstaging.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + local_validator = ?per_session.local_validator, + n_peers = ack_peers_vstaging.len(), + "Sending acknowledgement to vstaging peers" + ); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + ack_peers_vstaging, + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + ), + ) + .into(), + )) + .await; + } if !post_statements.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)) .await; @@ -1937,13 +2145,15 @@ async fn handle_incoming_manifest_common<'a, Context>( let local_validator = match relay_parent_state.local_validator.as_mut() { None => { - modify_reputation( - reputation, - ctx.sender(), - peer, - COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, - ) - .await; + if per_session.is_not_validator() { + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, + ) + .await; + } return None }, Some(x) => x, @@ -1974,8 +2184,13 @@ async fn handle_incoming_manifest_common<'a, Context>( let sender_index = match sender_index { None => { - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED) - .await; + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN, + ) + .await; return None }, Some(s) => s, @@ -2029,6 +2244,17 @@ async fn handle_incoming_manifest_common<'a, Context>( return None } + if acknowledge { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + from = ?sender_index, + local_index = ?per_session.local_validator, + ?manifest_kind, + "immediate ack, known candidate" + ); + } + Some(ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index }) } @@ -2042,6 +2268,7 @@ fn post_acknowledgement_statement_messages( groups: &Groups, group_index: GroupIndex, candidate_hash: CandidateHash, + peer: &(PeerId, ValidationVersion), ) -> Vec { let sending_filter = match grid_tracker.pending_statements_for(recipient, candidate_hash) { None => return Vec::new(), @@ -2058,14 +2285,29 @@ fn post_acknowledgement_statement_messages( recipient, statement.payload(), ); - - messages.push(Versioned::V2( - protocol_v2::StatementDistributionMessage::Statement( - relay_parent, - statement.as_unchecked().clone(), - ) - .into(), - )); + match peer.1.into() { + ValidationVersion::V2 => messages.push(Versioned::V2( + protocol_v2::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ) + .into(), + )), + ValidationVersion::VStaging => messages.push(Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ) + .into(), + )), + ValidationVersion::V1 => { + gum::error!( + target: LOG_TARGET, + "Bug ValidationVersion::V1 should not be used in statement-distribution v2, + legacy should have handled this" + ); + }, + }; } messages @@ -2135,7 +2377,15 @@ async fn handle_incoming_manifest( }; let messages = acknowledgement_and_statement_messages( - peer, + &( + peer, + state + .peers + .get(&peer) + .map(|val| val.protocol_version) + // Assume the latest stable version, if we don't have info about peer version. + .unwrap_or(ValidationVersion::V2), + ), sender_index, &per_session.groups, relay_parent_state, @@ -2166,7 +2416,7 @@ async fn handle_incoming_manifest( /// Produces acknowledgement and statement messages to be sent over the network, /// noting that they have been sent within the grid topology tracker as well. fn acknowledgement_and_statement_messages( - peer: PeerId, + peer: &(PeerId, ValidationVersion), validator_index: ValidatorIndex, groups: &Groups, relay_parent_state: &mut PerRelayParentState, @@ -2185,11 +2435,28 @@ fn acknowledgement_and_statement_messages( statement_knowledge: local_knowledge.clone(), }; - let msg = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( - acknowledgement, + let msg_v2 = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement.clone(), )); - let mut messages = vec![(vec![peer], msg.into())]; + let mut messages = match peer.1 { + ValidationVersion::V2 => vec![(vec![peer.0], msg_v2.into())], + ValidationVersion::VStaging => vec![( + vec![peer.0], + Versioned::VStaging(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )) + .into(), + )], + ValidationVersion::V1 => { + gum::error!( + target: LOG_TARGET, + "Bug ValidationVersion::V1 should not be used in statement-distribution v2, + legacy should have handled this" + ); + return Vec::new() + }, + }; local_validator.grid_tracker.manifest_sent_to( groups, @@ -2206,9 +2473,10 @@ fn acknowledgement_and_statement_messages( &groups, group_index, candidate_hash, + peer, ); - messages.extend(statement_messages.into_iter().map(|m| (vec![peer], m))); + messages.extend(statement_messages.into_iter().map(|m| (vec![peer.0], m))); messages } @@ -2288,6 +2556,15 @@ async fn handle_incoming_acknowledgement( &per_session.groups, group_index, candidate_hash, + &( + peer, + state + .peers + .get(&peer) + .map(|val| val.protocol_version) + // Assume the latest stable version, if we don't have info about peer version. + .unwrap_or(ValidationVersion::V2), + ), ); if !messages.is_empty() { @@ -2377,7 +2654,7 @@ async fn send_cluster_candidate_statements( Some(s) => s, }; - let local_group = match relay_parent_state.local_validator.as_mut() { + let local_group = match relay_parent_state.active_validator_state_mut() { None => return, Some(v) => v.group, }; @@ -2464,11 +2741,10 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St }) { // For cluster members, they haven't advertised any statements in particular, // but have surely sent us some. - if local_validator - .cluster_tracker - .knows_candidate(validator_id, identifier.candidate_hash) - { - return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) + if let Some(active) = local_validator.active.as_ref() { + if active.cluster_tracker.knows_candidate(validator_id, identifier.candidate_hash) { + return Some(StatementFilter::blank(active.cluster_tracker.targets().len())) + } } let filter = local_validator @@ -2499,7 +2775,11 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } // don't require a backing threshold for cluster candidates. - let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + let local_validator = relay_parent_state.local_validator.as_ref()?; + let require_backing = local_validator + .active + .as_ref() + .map_or(true, |active| active.group != group_index); Some(RequestProperties { unwanted_mask, @@ -2558,6 +2838,13 @@ pub(crate) async fn handle_response( let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + peer = ?response.requested_peer(), + "Received response", + ); + let post_confirmation = { let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { None => return, @@ -2596,12 +2883,29 @@ pub(crate) async fn handle_response( let (candidate, pvd, statements) = match res.request_status { requests::CandidateRequestStatus::Outdated => return, - requests::CandidateRequestStatus::Incomplete => return, + requests::CandidateRequestStatus::Incomplete => { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + "Response incomplete. Retrying" + ); + + return + }, requests::CandidateRequestStatus::Complete { candidate, persisted_validation_data, statements, - } => (candidate, persisted_validation_data, statements), + } => { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + n_statements = statements.len(), + "Successfully received candidate" + ); + + (candidate, persisted_validation_data, statements) + }, }; for statement in statements { @@ -2673,6 +2977,13 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { let ResponderMessage { request, sent_feedback } = message; let AttestedCandidateRequest { candidate_hash, ref mask } = &request.payload; + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + peer = ?request.peer, + "Received request" + ); + // Signal to the responder that we started processing this request. let _ = sent_feedback.send(()); @@ -2681,12 +2992,12 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { Some(c) => c, }; - let relay_parent_state = match state.per_relay_parent.get(&confirmed.relay_parent()) { + let relay_parent_state = match state.per_relay_parent.get_mut(&confirmed.relay_parent()) { None => return, Some(s) => s, }; - let local_validator = match relay_parent_state.local_validator.as_ref() { + let local_validator = match relay_parent_state.local_validator.as_mut() { None => return, Some(s) => s, }; @@ -2718,28 +3029,43 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { return } - // check peer is allowed to request the candidate (i.e. we've sent them a manifest) - { - let mut can_request = false; - for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + // check peer is allowed to request the candidate (i.e. they're in the cluster or we've sent + // them a manifest) + let (validator_id, is_cluster) = { + let mut validator_id = None; + let mut is_cluster = false; + for v in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session.authority_lookup.get(a) }) { - if local_validator.grid_tracker.can_request(validator_id, *candidate_hash) { - can_request = true; + if local_validator + .active + .as_ref() + .map_or(false, |active| active.cluster_tracker.can_request(v, *candidate_hash)) + { + validator_id = Some(v); + is_cluster = true; + break + } + + if local_validator.grid_tracker.can_request(v, *candidate_hash) { + validator_id = Some(v); break } } - if !can_request { - let _ = request.send_outgoing_response(OutgoingResponse { - result: Err(()), - reputation_changes: vec![COST_UNEXPECTED_REQUEST], - sent_feedback: None, - }); + match validator_id { + Some(v) => (v, is_cluster), + None => { + let _ = request.send_outgoing_response(OutgoingResponse { + result: Err(()), + reputation_changes: vec![COST_UNEXPECTED_REQUEST], + sent_feedback: None, + }); - return + return + }, } - } + }; // Transform mask with 'OR' semantics into one with 'AND' semantics for the API used // below. @@ -2748,19 +3074,39 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { validated_in_group: !mask.validated_in_group.clone(), }; + let statements: Vec<_> = relay_parent_state + .statement_store + .group_statements(&per_session.groups, confirmed.group_index(), *candidate_hash, &and_mask) + .map(|s| s.as_unchecked().clone()) + .collect(); + + // Update bookkeeping about which statements peers have received. + for statement in &statements { + if is_cluster { + local_validator + .active + .as_mut() + .expect("cluster peer means local is active validator; qed") + .cluster_tracker + .note_sent( + validator_id, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ); + } else { + local_validator.grid_tracker.sent_or_received_direct_statement( + &per_session.groups, + statement.unchecked_validator_index(), + validator_id, + statement.unchecked_payload(), + ); + } + } + let response = AttestedCandidateResponse { candidate_receipt: (&**confirmed.candidate_receipt()).clone(), persisted_validation_data: confirmed.persisted_validation_data().clone(), - statements: relay_parent_state - .statement_store - .group_statements( - &per_session.groups, - confirmed.group_index(), - *candidate_hash, - &and_mask, - ) - .map(|s| s.as_unchecked().clone()) - .collect(), + statements, }; let _ = request.send_response(response); diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index f13496024fcfee52f48a3cdb2377c2e24eadda79..8507a4b827690acb6b7fca2a4e2de427245b437d 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -265,6 +265,12 @@ impl RequestManager { HEntry::Vacant(_) => (), } } + + gum::debug!( + target: LOG_TARGET, + "Requests remaining after cleanup: {}", + self.by_priority.len(), + ); } /// Returns true if there are pending requests that are dispatchable. @@ -355,6 +361,13 @@ impl RequestManager { Some(t) => t, }; + gum::debug!( + target: crate::LOG_TARGET, + candidate_hash = ?id.candidate_hash, + peer = ?target, + "Issuing candidate request" + ); + let (request, response_fut) = OutgoingRequest::new( RequestRecipient::Peer(target), AttestedCandidateRequest { @@ -498,6 +511,11 @@ impl UnhandledResponse { &self.response.identifier } + /// Get the peer we made the request to. + pub fn requested_peer(&self) -> &PeerId { + &self.response.requested_peer + } + /// Validate the response. If the response is valid, this will yield the /// candidate, the [`PersistedValidationData`] of the candidate, and requested /// checked statements. @@ -582,12 +600,19 @@ impl UnhandledResponse { request_status: CandidateRequestStatus::Incomplete, } }, - Err(RequestError::NetworkError(_) | RequestError::Canceled(_)) => + Err(e @ RequestError::NetworkError(_) | e @ RequestError::Canceled(_)) => { + gum::trace!( + target: LOG_TARGET, + err = ?e, + peer = ?requested_peer, + "Request error" + ); return ResponseValidationOutput { requested_peer, reputation_changes: vec![], request_status: CandidateRequestStatus::Incomplete, - }, + } + }, Ok(response) => response, }; diff --git a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 74db431eda1d214e0031e98ab304cb0fd0fabd96..96d976e22cd518e350b9c7de19aac127b3eeccc9 100644 --- a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -212,6 +212,7 @@ impl StatementStore { } /// Get an iterator over all statements marked as being unknown by the backing subsystem. + /// This provides `Seconded` statements prior to `Valid` statements. pub fn fresh_statements_for_backing<'a>( &'a self, validators: &'a [ValidatorIndex], @@ -220,14 +221,15 @@ impl StatementStore { let s_st = CompactStatement::Seconded(candidate_hash); let v_st = CompactStatement::Valid(candidate_hash); - validators - .iter() - .flat_map(move |v| { - let a = self.known_statements.get(&(*v, s_st.clone())); - let b = self.known_statements.get(&(*v, v_st.clone())); + let fresh_seconded = + validators.iter().map(move |v| self.known_statements.get(&(*v, s_st.clone()))); - a.into_iter().chain(b) - }) + let fresh_valid = + validators.iter().map(move |v| self.known_statements.get(&(*v, v_st.clone()))); + + fresh_seconded + .chain(fresh_valid) + .flatten() .filter(|stored| !stored.known_by_backing) .map(|stored| &stored.statement) } @@ -250,6 +252,7 @@ impl StatementStore { } /// Error indicating that the validator was unknown. +#[derive(Debug)] pub struct ValidatorUnknown; type Fingerprint = (ValidatorIndex, CompactStatement); @@ -281,3 +284,78 @@ impl GroupStatements { self.valid.set(within_group_index, true); } } + +#[cfg(test)] +mod tests { + use super::*; + + use polkadot_primitives::v6::{Hash, SigningContext, ValidatorPair}; + use sp_application_crypto::Pair as PairT; + + #[test] + fn always_provides_fresh_statements_in_order() { + let validator_a = ValidatorIndex(1); + let validator_b = ValidatorIndex(2); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let valid_statement = CompactStatement::Valid(candidate_hash); + let seconded_statement = CompactStatement::Seconded(candidate_hash); + let signing_context = + SigningContext { parent_hash: Hash::repeat_byte(0), session_index: 1 }; + + let groups = Groups::new(vec![vec![validator_a, validator_b]].into(), 2); + + let mut store = StatementStore::new(&groups); + + // import a Valid statement from A and a Seconded statement from B. + let signed_valid_by_a = { + let payload = valid_statement.signing_payload(&signing_context); + let pair = ValidatorPair::generate().0; + let signature = pair.sign(&payload[..]); + + SignedStatement::new( + valid_statement.clone(), + validator_a, + signature, + &signing_context, + &pair.public(), + ) + .unwrap() + }; + store.insert(&groups, signed_valid_by_a, StatementOrigin::Remote).unwrap(); + + let signed_seconded_by_b = { + let payload = seconded_statement.signing_payload(&signing_context); + let pair = ValidatorPair::generate().0; + let signature = pair.sign(&payload[..]); + + SignedStatement::new( + seconded_statement.clone(), + validator_b, + signature, + &signing_context, + &pair.public(), + ) + .unwrap() + }; + store.insert(&groups, signed_seconded_by_b, StatementOrigin::Remote).unwrap(); + + // Regardless of the order statements are requested, + // we will get them in the order [B, A] because seconded statements must be first. + let vals = &[validator_a, validator_b]; + let statements = + store.fresh_statements_for_backing(vals, candidate_hash).collect::>(); + + assert_eq!(statements.len(), 2); + assert_eq!(statements[0].payload(), &seconded_statement); + assert_eq!(statements[1].payload(), &valid_statement); + + let vals = &[validator_b, validator_a]; + let statements = + store.fresh_statements_for_backing(vals, candidate_hash).collect::>(); + + assert_eq!(statements.len(), 2); + assert_eq!(statements[0].payload(), &seconded_statement); + assert_eq!(statements[1].payload(), &valid_statement); + } +} diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 80dec1d75ab98be43b820bf7382b4436ef87c3b2..a9f5b537b3238ab8b13ff37a0e952b35ba92a066 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -23,7 +23,7 @@ fn share_seconded_circulated_to_cluster() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -34,7 +34,8 @@ fn share_seconded_circulated_to_cluster() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -52,7 +53,7 @@ fn share_seconded_circulated_to_cluster() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -130,7 +131,7 @@ fn cluster_valid_statement_before_seconded_ignored() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -139,12 +140,13 @@ fn cluster_valid_statement_before_seconded_ignored() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( &mut overseer, @@ -197,7 +199,7 @@ fn cluster_statement_bad_signature() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -206,12 +208,13 @@ fn cluster_statement_bad_signature() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -277,7 +280,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -286,13 +289,13 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let not_our_group = - if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + let not_our_group = if local_group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; let that_group_validators = state.group_validators(not_our_group, false); let v_non = that_group_validators[0]; @@ -346,7 +349,7 @@ fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -355,12 +358,13 @@ fn statement_from_non_cluster_originator_unexpected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer(&mut overseer, peer_a.clone(), None).await; @@ -408,7 +412,7 @@ fn seconded_statement_leads_to_request() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -417,7 +421,8 @@ fn seconded_statement_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -432,7 +437,7 @@ fn seconded_statement_leads_to_request() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( @@ -503,7 +508,7 @@ fn cluster_statements_shared_seconded_first() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -512,7 +517,8 @@ fn cluster_statements_shared_seconded_first() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -528,7 +534,7 @@ fn cluster_statements_shared_seconded_first() { // peer A is in group, no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -624,7 +630,7 @@ fn cluster_accounts_for_implicit_view() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -634,7 +640,8 @@ fn cluster_accounts_for_implicit_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -651,7 +658,7 @@ fn cluster_accounts_for_implicit_view() { // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -775,7 +782,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -784,7 +791,8 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -799,7 +807,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -907,7 +915,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -916,7 +924,8 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -931,7 +940,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -1048,7 +1057,7 @@ fn ensure_seconding_limit_is_respected() { let config = TestConfig { validator_count: 20, group_size: 4, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3, @@ -1060,7 +1069,8 @@ fn ensure_seconding_limit_is_respected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1092,7 +1102,7 @@ fn ensure_seconding_limit_is_respected() { let candidate_hash_2 = candidate_2.hash(); let candidate_hash_3 = candidate_3.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; // peers A,B,C are in group, have relay parent in view. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index a0af9579823597514383f57c9cbe691a7282bb20..9802db060821407ef9c14650701ef3a82a74f858 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -29,7 +29,7 @@ fn backed_candidate_leads_to_advertisement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -41,7 +41,8 @@ fn backed_candidate_leads_to_advertisement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -55,9 +56,9 @@ fn backed_candidate_leads_to_advertisement() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -219,7 +220,7 @@ fn backed_candidate_leads_to_advertisement() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -244,7 +245,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -256,9 +257,9 @@ fn received_advertisement_before_confirmation_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -273,7 +274,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = state.group_validators(other_group, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -424,7 +425,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -435,9 +436,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -672,7 +673,7 @@ fn received_advertisement_after_confirmation_before_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -683,9 +684,9 @@ fn received_advertisement_after_confirmation_before_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -858,7 +859,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -869,9 +870,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1155,7 +1156,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1167,7 +1168,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1181,9 +1183,9 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1336,7 +1338,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let expected_manifest = BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1377,7 +1379,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1389,7 +1391,8 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1403,9 +1406,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1567,7 +1570,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1599,7 +1602,7 @@ fn grid_statements_imported_to_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1610,9 +1613,9 @@ fn grid_statements_imported_to_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1797,12 +1800,13 @@ fn grid_statements_imported_to_backing() { #[test] fn advertisements_rejected_from_incorrect_peers() { + sp_tracing::try_init_simple(); let validator_count = 6; let group_size = 3; let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1814,9 +1818,9 @@ fn advertisements_rejected_from_incorrect_peers() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1831,7 +1835,7 @@ fn advertisements_rejected_from_incorrect_peers() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = state.group_validators(other_group, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -1911,10 +1915,11 @@ fn advertisements_rejected_from_incorrect_peers() { ) .await; + // Message not expected from peers of our own group. assert_matches!( overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_a && r == COST_UNEXPECTED_MANIFEST_DISALLOWED.into() => { } + if p == peer_a && r == COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN.into() => { } ); } @@ -1927,10 +1932,11 @@ fn advertisements_rejected_from_incorrect_peers() { ) .await; + // Message not expected from peers of our own group. assert_matches!( overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_b && r == COST_UNEXPECTED_MANIFEST_DISALLOWED.into() => { } + if p == peer_b && r == COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN.into() => { } ); } @@ -1945,7 +1951,7 @@ fn manifest_rejected_with_unknown_relay_parent() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1956,9 +1962,9 @@ fn manifest_rejected_with_unknown_relay_parent() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2051,7 +2057,7 @@ fn manifest_rejected_when_not_a_validator() { let config = TestConfig { validator_count, group_size, - local_validator: false, + local_validator: LocalRole::None, async_backing_params: None, }; @@ -2153,7 +2159,7 @@ fn manifest_rejected_when_group_does_not_match_para() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2163,9 +2169,9 @@ fn manifest_rejected_when_group_does_not_match_para() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); // Create a mismatch between group and para. let other_para = next_group_index(other_group, validator_count, group_size); let other_para = ParaId::from(other_para.0); @@ -2260,7 +2266,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2271,9 +2277,9 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2451,3 +2457,141 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { overseer }); } + +#[test] +fn inactive_local_participates_in_grid() { + let validator_count = 11; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::InactiveValidator, + async_backing_params: None, + }; + + let dummy_relay_parent = Hash::repeat_byte(2); + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + assert_eq!(local_validator.validator_index.0, validator_count as u32); + + let group_idx = GroupIndex::from(0); + let para = ParaId::from(0); + + // Dummy leaf is needed to update topology. + let dummy_leaf = state.make_dummy_leaf(Hash::repeat_byte(2)); + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + para, + test_leaf.para_data(para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let first_group = state.group_validators(group_idx, true); + let v_a = first_group.last().unwrap().clone(); + let v_b = first_group.first().unwrap().clone(); + + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &dummy_leaf, &state, true).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(dummy_relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + activate_leaf(&mut overseer, &test_leaf, &state, false).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Receive an advertisement from A. + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: group_idx, + para_id: para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + // Inactive node requests this candidate. + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + for _ in 0..2 { + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + } + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 4150377a0c6c219832104024f6d6c046dccec709..4e6269775245dde88f9debd19c60dce9717c09af 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -61,19 +61,30 @@ const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); +#[derive(Debug, Copy, Clone)] +enum LocalRole { + /// Active validator. + Validator, + /// Authority, not in active validator set. + InactiveValidator, + /// Not a validator. + None, +} + struct TestConfig { + // number of active validators. validator_count: usize, // how many validators to place in each group. group_size: usize, // whether the local node should be a validator - local_validator: bool, + local_validator: LocalRole, async_backing_params: Option, } #[derive(Debug, Clone)] struct TestLocalValidator { validator_index: ValidatorIndex, - group_index: GroupIndex, + group_index: Option, } struct TestState { @@ -99,7 +110,7 @@ impl TestState { let mut assignment_keys = Vec::new(); let mut validator_groups = Vec::new(); - let local_validator_pos = if config.local_validator { + let local_validator_pos = if let LocalRole::Validator = config.local_validator { // ensure local validator is always in a full group. Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) } else { @@ -128,13 +139,19 @@ impl TestState { } } - let local = if let Some(local_pos) = local_validator_pos { - Some(TestLocalValidator { + let local = match (config.local_validator, local_validator_pos) { + (LocalRole::Validator, Some(local_pos)) => Some(TestLocalValidator { validator_index: ValidatorIndex(local_pos as _), - group_index: GroupIndex((local_pos / config.group_size) as _), - }) - } else { - None + group_index: Some(GroupIndex((local_pos / config.group_size) as _)), + }), + (LocalRole::InactiveValidator, None) => { + discovery_keys.push(AuthorityDiscoveryPair::generate().0.public()); + Some(TestLocalValidator { + validator_index: ValidatorIndex(config.validator_count as u32), + group_index: None, + }) + }, + _ => None, }; let validator_public = validator_pubkeys(&validators); @@ -181,15 +198,23 @@ impl TestState { fn make_dummy_topology(&self) -> NewGossipTopology { let validator_count = self.config.validator_count; + let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator); + + let mut indices: Vec = (0..validator_count).collect(); + if is_local_inactive { + indices.push(validator_count); + } + NewGossipTopology { session: 1, topology: SessionGridTopology::new( - (0..validator_count).collect(), - (0..validator_count) + indices.clone(), + indices + .into_iter() .map(|i| TopologyPeerInfo { peer_ids: Vec::new(), validator_index: ValidatorIndex(i as u32), - discovery_id: AuthorityDiscoveryPair::generate().0.public(), + discovery_id: self.session_info.discovery_keys[i].clone(), }) .collect(), ), @@ -276,7 +301,7 @@ fn test_harness>( test: impl FnOnce(TestState, VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - let keystore = if config.local_validator { + let keystore = if let LocalRole::Validator = config.local_validator { test_helpers::mock::make_ferdie_keystore() } else { Arc::new(LocalKeystore::in_memory()) as KeystorePtr diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 0734b75c9712d50dc8be0f78bde52001ae26b8f0..1eec8290fabaeec37c1dea2b53de3d8c32385336 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -32,7 +32,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -43,7 +43,8 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -57,7 +58,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -188,7 +189,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { // Makes `seconding_limit: 2` (easier to hit the limit). max_candidate_depth: 1, @@ -203,9 +204,9 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -475,7 +476,7 @@ fn peer_reported_for_not_enough_statements() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -486,9 +487,9 @@ fn peer_reported_for_not_enough_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -670,7 +671,7 @@ fn peer_reported_for_duplicate_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -681,7 +682,8 @@ fn peer_reported_for_duplicate_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -695,7 +697,7 @@ fn peer_reported_for_duplicate_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -830,7 +832,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -841,7 +843,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -855,8 +858,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -968,7 +971,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -979,7 +982,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -993,9 +997,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let next_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + let next_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_c = next_group_validators[0]; @@ -1105,7 +1108,7 @@ fn local_node_sanity_checks_incoming_requests() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1117,7 +1120,8 @@ fn local_node_sanity_checks_incoming_requests() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1135,7 +1139,7 @@ fn local_node_sanity_checks_incoming_requests() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1306,6 +1310,209 @@ fn local_node_sanity_checks_incoming_requests() { }); } +#[test] +fn local_node_checks_that_peer_can_request_before_responding() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |mut state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + // Peers A and B are in group and have relay parent in view. + let other_group_validators = state.group_validators(local_group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + let peer_b_index = other_group_validators[1]; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_b.clone(), view![relay_parent]).await; + + // Finish setup + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let mask = StatementFilter::blank(state.config.group_size); + + // Confirm candidate. + let signed = state.sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ); + let full_signed = signed + .clone() + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone(), peer_b.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + // Local node should respond to requests from peers in the same group + // which appear to not have already seen the candidate + { + // Peer requests candidate and local responds + let response = state + .send_request( + peer_a, + request_v2::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask: mask.clone(), + }, + ) + .await + .await; + + let expected_statements = vec![signed.into_unchecked()]; + assert_matches!(response, full_response => { + // Response is the same for vstaging. + let request_v2::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = + request_v2::AttestedCandidateResponse::decode( + &mut full_response.result.expect("We should have a proper answer").as_ref(), + ).expect("Decoding should work"); + assert_eq!(candidate_receipt, candidate); + assert_eq!(persisted_validation_data, pvd); + assert_eq!(statements, expected_statements); + }); + } + + // Local node should reject requests if the requester appears to know + // the candidate (has sent them a Seconded statement) + { + let statement = state + .sign_statement( + peer_b_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + + let response = state + .send_request( + peer_b, + request_v2::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask: mask.clone(), + }, + ) + .await + .await; + + // Peer already knows about this candidate. Should reject. + assert_matches!( + response, + RawOutgoingResponse { + result, + reputation_changes, + sent_feedback + } => { + assert_matches!(result, Err(())); + assert_eq!(reputation_changes, vec![COST_UNEXPECTED_REQUEST.into()]); + assert_matches!(sent_feedback, None); + } + ); + + // Handling leftover statement distribution message + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), peer_b_index); + } + ); + } + + overseer + }); +} + #[test] fn local_node_respects_statement_mask() { let validator_count = 6; @@ -1313,7 +1520,7 @@ fn local_node_respects_statement_mask() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1325,7 +1532,8 @@ fn local_node_respects_statement_mask() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1339,9 +1547,9 @@ fn local_node_respects_statement_mask() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1505,7 +1713,7 @@ fn local_node_respects_statement_mask() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1559,7 +1767,7 @@ fn should_delay_before_retrying_dropped_requests() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1570,9 +1778,9 @@ fn should_delay_before_retrying_dropped_requests() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 5d41407ef83a832351ff29a4026352afbb21bf5c..ac1e4443f0c8d92eea96d01496df71d7ff2b856a 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "System overseer of the Polkadot node" [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } @@ -18,7 +19,6 @@ polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } -schnellru = "0.2.1" sp-core = { path = "../../../substrate/primitives/core" } async-trait = "0.1.57" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index e78941776d5ec1d79d3cd52096e3bb2e649c6e54..857cdba673db267d2824d8a995257316fc67225f 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -32,7 +32,7 @@ use polkadot_overseer::{ gen::{FromOrchestra, SpawnedSubsystem}, HeadSupportsParachains, SubsystemError, }; -use polkadot_primitives::{CandidateReceipt, Hash, PvfExecTimeoutKind}; +use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind}; struct AlwaysSupportsParachains; @@ -73,13 +73,13 @@ impl Subsystem1 { commitments_hash: Hash::zero(), }; - let msg = CandidateValidationMessage::ValidateFromChainState( + let msg = CandidateValidationMessage::ValidateFromChainState { candidate_receipt, - PoV { block_data: BlockData(Vec::new()) }.into(), - Default::default(), - PvfExecTimeoutKind::Backing, - tx, - ); + pov: PoV { block_data: BlockData(Vec::new()) }.into(), + executor_params: Default::default(), + exec_kind: PvfExecKind::Backing, + response_sender: tx, + }; ctx.send_message(msg).await; } () @@ -163,7 +163,6 @@ fn main() { .unwrap(); let overseer_fut = overseer.run().fuse(); - let timer_stream = timer_stream; pin_mut!(timer_stream); pin_mut!(overseer_fut); diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs index fea96e5dbab73a05e72fa90e8d4cef1765b9921e..fc5f0070773b74e671d6510b380bd27b861ef84c 100644 --- a/polkadot/node/overseer/src/dummy.rs +++ b/polkadot/node/overseer/src/dummy.rs @@ -17,11 +17,9 @@ use crate::{ prometheus::Registry, HeadSupportsParachains, InitializedOverseerBuilder, MetricsTrait, Overseer, OverseerMetrics, OverseerSignal, OverseerSubsystemContext, SpawnGlue, - KNOWN_LEAVES_CACHE_SIZE, }; use orchestra::{FromOrchestra, SpawnedSubsystem, Subsystem, SubsystemContext}; use polkadot_node_subsystem_types::{errors::SubsystemError, messages::*}; -use schnellru::{ByLength, LruMap}; // Generated dummy messages use crate::messages::*; @@ -193,7 +191,6 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .known_leaves(LruMap::new(ByLength::new(KNOWN_LEAVES_CACHE_SIZE))) .spawner(SpawnGlue(spawner)) .metrics(metrics) .supports_parachains(supports_parachains); diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 84d5d19c3b93c708110fc35d2f124b94984f8a86..5207bb830d8c1506eb92ce702900a39c44b74834 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -17,7 +17,7 @@ //! # Overseer //! //! `overseer` implements the Overseer architecture described in the -//! [implementers-guide](https://w3f.github.io/parachain-implementers-guide/node/index.html). +//! [implementers' guide][overseer-page]. //! For the motivations behind implementing the overseer itself you should //! check out that guide, documentation in this crate will be mostly discussing //! technical stuff. @@ -53,6 +53,8 @@ //! . +--------------------+ +---------------------+ . //! .................................................................. //! ``` +//! +//! [overseer-page]: https://paritytech.github.io/polkadot-sdk/book/node/overseer.html // #![deny(unused_results)] // unused dependencies can not work for test and examples at the same time @@ -68,7 +70,6 @@ use std::{ }; use futures::{channel::oneshot, future::BoxFuture, select, Future, FutureExt, StreamExt}; -use schnellru::LruMap; use client::{BlockImportNotification, BlockchainEvents, FinalityNotification}; use polkadot_primitives::{Block, BlockNumber, Hash}; @@ -86,8 +87,8 @@ use polkadot_node_subsystem_types::messages::{ pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, - jaeger, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, OverseerSignal, - RuntimeApiSubsystemClient, UnpinHandle, + jaeger, ActivatedLeaf, ActiveLeavesUpdate, OverseerSignal, RuntimeApiSubsystemClient, + UnpinHandle, }; pub mod metrics; @@ -110,10 +111,6 @@ pub use orchestra::{ SubsystemSender, TimeoutExt, ToOrchestra, TrySendError, }; -/// Store 2 days worth of blocks, not accounting for forks, -/// in the LRU cache. Assumes a 6-second block time. -pub const KNOWN_LEAVES_CACHE_SIZE: u32 = 2 * 24 * 3600 / 6; - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] mod memory_stats; #[cfg(test)] @@ -281,6 +278,11 @@ impl From> for BlockInfo { /// as the substrate framework or user interaction. pub enum Event { /// A new block was imported. + /// + /// This event is not sent if the block was already known + /// and we reorged to it e.g. due to a reversion. + /// + /// Also, these events are not sent during a major sync. BlockImported(BlockInfo), /// A block was finalized with i.e. babe or another consensus algorithm. BlockFinalized(BlockInfo), @@ -496,7 +498,6 @@ pub struct Overseer { #[subsystem(AvailabilityDistributionMessage, sends: [ AvailabilityStoreMessage, - AvailabilityRecoveryMessage, ChainApiMessage, RuntimeApiMessage, NetworkBridgeTxMessage, @@ -639,9 +640,6 @@ pub struct Overseer { /// An implementation for checking whether a header supports parachain consensus. pub supports_parachains: SupportsParachains, - /// An LRU cache for keeping track of relay-chain heads that have already been seen. - pub known_leaves: LruMap, - /// Various Prometheus metrics. pub metrics: OverseerMetrics, } @@ -800,10 +798,9 @@ where }; let mut update = match self.on_head_activated(&block.hash, Some(block.parent_hash)).await { - Some((span, status)) => ActiveLeavesUpdate::start_work(ActivatedLeaf { + Some(span) => ActiveLeavesUpdate::start_work(ActivatedLeaf { hash: block.hash, number: block.number, - status, unpin_handle: block.unpin_handle, span, }), @@ -862,7 +859,7 @@ where &mut self, hash: &Hash, parent_hash: Option, - ) -> Option<(Arc, LeafStatus)> { + ) -> Option> { if !self.supports_parachains.head_supports_parachains(hash).await { return None } @@ -889,14 +886,7 @@ where let span = Arc::new(span); self.span_per_active_leaf.insert(*hash, span.clone()); - let status = if self.known_leaves.get(hash).is_some() { - LeafStatus::Stale - } else { - self.known_leaves.insert(*hash, ()); - LeafStatus::Fresh - }; - - Some((span, status)) + Some(span) } fn on_head_deactivated(&mut self, hash: &Hash) { diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index c17613fb7ea5dc3d12f04a6d3a9162b7de4dbbe6..0494274367d953146ad06e93c3ee00cfc9c3b983 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem_types::messages::{ }; use polkadot_primitives::{ CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, - PvfExecTimeoutKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, + PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, }; use crate::{ @@ -102,13 +102,13 @@ where }; let (tx, _) = oneshot::channel(); - ctx.send_message(CandidateValidationMessage::ValidateFromChainState( + ctx.send_message(CandidateValidationMessage::ValidateFromChainState { candidate_receipt, - PoV { block_data: BlockData(Vec::new()) }.into(), - Default::default(), - PvfExecTimeoutKind::Backing, - tx, - )) + pov: PoV { block_data: BlockData(Vec::new()) }.into(), + executor_params: Default::default(), + exec_kind: PvfExecKind::Backing, + response_sender: tx, + }) .await; c += 1; continue @@ -793,20 +793,20 @@ where } fn test_candidate_validation_msg() -> CandidateValidationMessage { - let (sender, _) = oneshot::channel(); + let (response_sender, _) = oneshot::channel(); let pov = Arc::new(PoV { block_data: BlockData(Vec::new()) }); let candidate_receipt = CandidateReceipt { descriptor: dummy_candidate_descriptor(dummy_hash()), commitments_hash: Hash::zero(), }; - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, - Default::default(), - PvfExecTimeoutKind::Backing, - sender, - ) + executor_params: Default::default(), + exec_kind: PvfExecKind::Backing, + response_sender, + } } fn test_candidate_backing_msg() -> CandidateBackingMessage { diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 55dfa67387094198c2126b79b5a31abc23dc2b4e..c39fd5947b0b7b80b2bf78bce9f1bb76e364302b 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -20,6 +20,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime" } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" thiserror = "1.0.48" +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } serde = { version = "1.0.188", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index 01f45a20787461c2cf6a0f39f88c6d082ac87468..e5ae24f7a51e2a3f000b25a11e6c62d4592ae904 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -16,190 +16,515 @@ //! Types relevant for approval. -pub use sp_consensus_babe::{Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript}; - -use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex, - ValidatorIndex, ValidatorSignature, -}; -use sp_application_crypto::ByteArray; -use sp_consensus_babe as babe_primitives; - -/// Validators assigning to check a particular candidate are split up into tranches. -/// Earlier tranches of validators check first, with later tranches serving as backup. -pub type DelayTranche = u32; - -/// A static context used to compute the Relay VRF story based on the -/// VRF output included in the header-chain. -pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF"; - -/// A static context used for all relay-vrf-modulo VRFs. -pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD"; - -/// A static context used for all relay-vrf-modulo VRFs. -pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY"; - -/// A static context used for transcripts indicating assigned availability core. -pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED"; - -/// A static context associated with producing randomness for a core. -pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE"; - -/// A static context associated with producing randomness for a tranche. -pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE"; - -/// random bytes derived from the VRF submitted within the block by the -/// block author as a credential and used as input to approval assignment criteria. -#[derive(Debug, Clone, Encode, Decode, PartialEq)] -pub struct RelayVRFStory(pub [u8; 32]); - -/// Different kinds of input data or criteria that can prove a validator's assignment -/// to check a particular parachain. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum AssignmentCertKind { - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with a sample number. +/// A list of primitives introduced in v1. +pub mod v1 { + use sp_consensus_babe as babe_primitives; + pub use sp_consensus_babe::{ + Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + }; + + use parity_scale_codec::{Decode, Encode}; + use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex, + ValidatorIndex, ValidatorSignature, + }; + use sp_application_crypto::ByteArray; + + /// Validators assigning to check a particular candidate are split up into tranches. + /// Earlier tranches of validators check first, with later tranches serving as backup. + pub type DelayTranche = u32; + + /// A static context used to compute the Relay VRF story based on the + /// VRF output included in the header-chain. + pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF"; + + /// A static context used for all relay-vrf-modulo VRFs. + pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD"; + + /// A static context used for all relay-vrf-modulo VRFs. + pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY"; + + /// A static context used for transcripts indicating assigned availability core. + pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED"; + + /// A static context associated with producing randomness for a core. + pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE"; + + /// A static context associated with producing randomness for a tranche. + pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE"; + + /// random bytes derived from the VRF submitted within the block by the + /// block author as a credential and used as input to approval assignment criteria. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub struct RelayVRFStory(pub [u8; 32]); + + /// Different kinds of input data or criteria that can prove a validator's assignment + /// to check a particular parachain. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum AssignmentCertKind { + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, + } + + /// A certification of assignment. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct AssignmentCert { + /// The criterion which is claimed to be met by this cert. + pub kind: AssignmentCertKind, + /// The VRF signature showing the criterion is met. + pub vrf: VrfSignature, + } + + /// An assignment criterion which refers to the candidate under which the assignment is + /// relevant by block hash. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectAssignmentCert { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The validator index. + pub validator: ValidatorIndex, + /// The cert itself. + pub cert: AssignmentCert, + } + + /// A signed approval vote which references the candidate indirectly via the block. /// - /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModulo { - /// The sample number used in this cert. - sample: u32, - }, - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with the index of a particular core. + /// In practice, we have a look-up from block hash and candidate index to candidate hash, + /// so this can be transformed into a `SignedApprovalVote`. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectSignedApprovalVote { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The index of the candidate in the list of candidates fully included as-of the block. + pub candidate_index: CandidateIndex, + /// The validator index. + pub validator: ValidatorIndex, + /// The signature by the validator. + pub signature: ValidatorSignature, + } + + /// Metadata about a block which is now live in the approval protocol. + #[derive(Debug)] + pub struct BlockApprovalMeta { + /// The hash of the block. + pub hash: Hash, + /// The number of the block. + pub number: BlockNumber, + /// The hash of the parent block. + pub parent_hash: Hash, + /// The candidates included by the block. + /// Note that these are not the same as the candidates that appear within the block body. + pub candidates: Vec, + /// The consensus slot of the block. + pub slot: Slot, + /// The session of the block. + pub session: SessionIndex, + } + + /// Errors that can occur during the approvals protocol. + #[derive(Debug, thiserror::Error)] + #[allow(missing_docs)] + pub enum ApprovalError { + #[error("Schnorrkel signature error")] + SchnorrkelSignature(schnorrkel::errors::SignatureError), + #[error("Authority index {0} out of bounds")] + AuthorityOutOfBounds(usize), + } + + /// An unsafe VRF output. Provide BABE Epoch info to create a `RelayVRFStory`. + pub struct UnsafeVRFOutput { + vrf_output: VrfOutput, + slot: Slot, + authority_index: u32, + } + + impl UnsafeVRFOutput { + /// Get the slot. + pub fn slot(&self) -> Slot { + self.slot + } + + /// Compute the randomness associated with this VRF output. + pub fn compute_randomness( + self, + authorities: &[(babe_primitives::AuthorityId, babe_primitives::BabeAuthorityWeight)], + randomness: &babe_primitives::Randomness, + epoch_index: u64, + ) -> Result { + let author = match authorities.get(self.authority_index as usize) { + None => return Err(ApprovalError::AuthorityOutOfBounds(self.authority_index as _)), + Some(x) => &x.0, + }; + + let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice()) + .map_err(ApprovalError::SchnorrkelSignature)?; + + let transcript = + sp_consensus_babe::make_vrf_transcript(randomness, self.slot, epoch_index); + + let inout = self + .vrf_output + .0 + .attach_input_hash(&pubkey, transcript.0) + .map_err(ApprovalError::SchnorrkelSignature)?; + Ok(RelayVRFStory(inout.make_bytes(super::v1::RELAY_VRF_STORY_CONTEXT))) + } + } + + /// Extract the slot number and relay VRF from a header. /// - /// The context is [`RELAY_VRF_DELAY_CONTEXT`] - RelayVRFDelay { - /// The core index chosen in this cert. - core_index: CoreIndex, - }, -} + /// This fails if either there is no BABE `PreRuntime` digest or + /// the digest has type `SecondaryPlain`, which Substrate nodes do + /// not produce or accept anymore. + pub fn babe_unsafe_vrf_info(header: &Header) -> Option { + use babe_primitives::digests::CompatibleDigestItem; -/// A certification of assignment. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct AssignmentCert { - /// The criterion which is claimed to be met by this cert. - pub kind: AssignmentCertKind, - /// The VRF signature showing the criterion is met. - pub vrf: VrfSignature, -} + for digest in &header.digest.logs { + if let Some(pre) = digest.as_babe_pre_digest() { + let slot = pre.slot(); + let authority_index = pre.authority_index(); -/// An assignment criterion which refers to the candidate under which the assignment is -/// relevant by block hash. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct IndirectAssignmentCert { - /// A block hash where the candidate appears. - pub block_hash: Hash, - /// The validator index. - pub validator: ValidatorIndex, - /// The cert itself. - pub cert: AssignmentCert, -} + return pre.vrf_signature().map(|sig| UnsafeVRFOutput { + vrf_output: sig.output.clone(), + slot, + authority_index, + }) + } + } -/// A signed approval vote which references the candidate indirectly via the block. -/// -/// In practice, we have a look-up from block hash and candidate index to candidate hash, -/// so this can be transformed into a `SignedApprovalVote`. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct IndirectSignedApprovalVote { - /// A block hash where the candidate appears. - pub block_hash: Hash, - /// The index of the candidate in the list of candidates fully included as-of the block. - pub candidate_index: CandidateIndex, - /// The validator index. - pub validator: ValidatorIndex, - /// The signature by the validator. - pub signature: ValidatorSignature, + None + } } -/// Metadata about a block which is now live in the approval protocol. -#[derive(Debug)] -pub struct BlockApprovalMeta { - /// The hash of the block. - pub hash: Hash, - /// The number of the block. - pub number: BlockNumber, - /// The hash of the parent block. - pub parent_hash: Hash, - /// The candidates included by the block. - /// Note that these are not the same as the candidates that appear within the block body. - pub candidates: Vec, - /// The consensus slot of the block. - pub slot: Slot, - /// The session of the block. - pub session: SessionIndex, -} +/// A list of primitives introduced by v2. +pub mod v2 { + use parity_scale_codec::{Decode, Encode}; + pub use sp_consensus_babe::{ + Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + }; + use std::ops::BitOr; -/// Errors that can occur during the approvals protocol. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum ApprovalError { - #[error("Schnorrkel signature error")] - SchnorrkelSignature(schnorrkel::errors::SignatureError), - #[error("Authority index {0} out of bounds")] - AuthorityOutOfBounds(usize), -} + use bitvec::{prelude::Lsb0, vec::BitVec}; + use polkadot_primitives::{CandidateIndex, CoreIndex, Hash, ValidatorIndex}; -/// An unsafe VRF output. Provide BABE Epoch info to create a `RelayVRFStory`. -pub struct UnsafeVRFOutput { - vrf_output: VrfOutput, - slot: Slot, - authority_index: u32, -} + /// A static context associated with producing randomness for a core. + pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2"; + /// A static context associated with producing randomness for v2 multi-core assignments. + pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED v2"; + /// A static context used for all relay-vrf-modulo VRFs for v2 multi-core assignments. + pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD v2"; + /// A read-only bitvec wrapper + #[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)] + pub struct Bitfield(BitVec, std::marker::PhantomData); + + /// A `read-only`, `non-zero` bitfield. + /// Each 1 bit identifies a candidate by the bitfield bit index. + pub type CandidateBitfield = Bitfield; + /// A bitfield of core assignments. + pub type CoreBitfield = Bitfield; + + /// Errors that can occur when creating and manipulating bitfields. + #[derive(Debug)] + pub enum BitfieldError { + /// All bits are zero. + NullAssignment, + } + + /// A bit index in `Bitfield`. + #[cfg_attr(test, derive(PartialEq, Clone))] + pub struct BitIndex(pub usize); + + /// Helper trait to convert primitives to `BitIndex`. + pub trait AsBitIndex { + /// Returns the index of the corresponding bit in `Bitfield`. + fn as_bit_index(&self) -> BitIndex; + } + + impl Bitfield { + /// Returns the bit value at specified `index`. If `index` is greater than bitfield size, + /// returns `false`. + pub fn bit_at(&self, index: BitIndex) -> bool { + if self.0.len() <= index.0 { + false + } else { + self.0[index.0] + } + } + + /// Returns number of bits. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns the number of 1 bits. + pub fn count_ones(&self) -> usize { + self.0.count_ones() + } + + /// Returns the index of the first 1 bit. + pub fn first_one(&self) -> Option { + self.0.first_one() + } + + /// Returns an iterator over inner bits. + pub fn iter_ones(&self) -> bitvec::slice::IterOnes { + self.0.iter_ones() + } -impl UnsafeVRFOutput { - /// Get the slot. - pub fn slot(&self) -> Slot { - self.slot + /// For testing purpose, we want a inner mutable ref. + #[cfg(test)] + pub fn inner_mut(&mut self) -> &mut BitVec { + &mut self.0 + } + + /// Returns the inner bitfield and consumes `self`. + pub fn into_inner(self) -> BitVec { + self.0 + } } - /// Compute the randomness associated with this VRF output. - pub fn compute_randomness( - self, - authorities: &[(babe_primitives::AuthorityId, babe_primitives::BabeAuthorityWeight)], - randomness: &babe_primitives::Randomness, - epoch_index: u64, - ) -> Result { - let author = match authorities.get(self.authority_index as usize) { - None => return Err(ApprovalError::AuthorityOutOfBounds(self.authority_index as _)), - Some(x) => &x.0, - }; + impl AsBitIndex for CandidateIndex { + fn as_bit_index(&self) -> BitIndex { + BitIndex(*self as usize) + } + } - let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice()) - .map_err(ApprovalError::SchnorrkelSignature)?; + impl AsBitIndex for CoreIndex { + fn as_bit_index(&self) -> BitIndex { + BitIndex(self.0 as usize) + } + } - let transcript = sp_consensus_babe::make_vrf_transcript(randomness, self.slot, epoch_index); + impl AsBitIndex for usize { + fn as_bit_index(&self) -> BitIndex { + BitIndex(*self) + } + } - let inout = self - .vrf_output - .0 - .attach_input_hash(&pubkey, transcript.0) - .map_err(ApprovalError::SchnorrkelSignature)?; - Ok(RelayVRFStory(inout.make_bytes(RELAY_VRF_STORY_CONTEXT))) + impl From for Bitfield + where + T: AsBitIndex, + { + fn from(value: T) -> Self { + Self( + { + let mut bv = bitvec::bitvec![u8, Lsb0; 0; value.as_bit_index().0 + 1]; + bv.set(value.as_bit_index().0, true); + bv + }, + Default::default(), + ) + } + } + + impl TryFrom> for Bitfield + where + T: Into>, + { + type Error = BitfieldError; + + fn try_from(mut value: Vec) -> Result { + if value.is_empty() { + return Err(BitfieldError::NullAssignment) + } + + let initial_bitfield = + value.pop().expect("Just checked above it's not empty; qed").into(); + + Ok(Self( + value.into_iter().fold(initial_bitfield.0, |initial_bitfield, element| { + let mut bitfield: Bitfield = element.into(); + bitfield + .0 + .resize(std::cmp::max(initial_bitfield.len(), bitfield.0.len()), false); + bitfield.0.bitor(initial_bitfield) + }), + Default::default(), + )) + } + } + + /// Certificate is changed compared to `AssignmentCertKind`: + /// - introduced RelayVRFModuloCompact + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum AssignmentCertKindV2 { + /// Multiple assignment stories based on the VRF that authorized the relay-chain block + /// where the candidates were included. + /// + /// The context is [`super::v2::RELAY_VRF_MODULO_CONTEXT`] + #[codec(index = 0)] + RelayVRFModuloCompact { + /// A bitfield representing the core indices claimed by this assignment. + core_bitfield: CoreBitfield, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`super::v1::RELAY_VRF_DELAY_CONTEXT`] + #[codec(index = 1)] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, + /// Deprectated assignment. Soon to be removed. + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`super::v1::RELAY_VRF_MODULO_CONTEXT`] + #[codec(index = 2)] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, + } + + /// A certification of assignment. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct AssignmentCertV2 { + /// The criterion which is claimed to be met by this cert. + pub kind: AssignmentCertKindV2, + /// The VRF showing the criterion is met. + pub vrf: VrfSignature, + } + + impl From for AssignmentCertV2 { + fn from(cert: super::v1::AssignmentCert) -> Self { + Self { + kind: match cert.kind { + super::v1::AssignmentCertKind::RelayVRFDelay { core_index } => + AssignmentCertKindV2::RelayVRFDelay { core_index }, + super::v1::AssignmentCertKind::RelayVRFModulo { sample } => + AssignmentCertKindV2::RelayVRFModulo { sample }, + }, + vrf: cert.vrf, + } + } } -} -/// Extract the slot number and relay VRF from a header. -/// -/// This fails if either there is no BABE `PreRuntime` digest or -/// the digest has type `SecondaryPlain`, which Substrate nodes do -/// not produce or accept anymore. -pub fn babe_unsafe_vrf_info(header: &Header) -> Option { - use babe_primitives::digests::CompatibleDigestItem; - - for digest in &header.digest.logs { - if let Some(pre) = digest.as_babe_pre_digest() { - let slot = pre.slot(); - let authority_index = pre.authority_index(); - - return pre.vrf_signature().map(|sig| UnsafeVRFOutput { - vrf_output: sig.output.clone(), - slot, - authority_index, + /// Errors that can occur when trying to convert to/from assignment v1/v2 + #[derive(Debug)] + pub enum AssignmentConversionError { + /// Assignment certificate is not supported in v1. + CertificateNotSupported, + } + + impl TryFrom for super::v1::AssignmentCert { + type Error = AssignmentConversionError; + fn try_from(cert: AssignmentCertV2) -> Result { + Ok(Self { + kind: match cert.kind { + AssignmentCertKindV2::RelayVRFDelay { core_index } => + super::v1::AssignmentCertKind::RelayVRFDelay { core_index }, + AssignmentCertKindV2::RelayVRFModulo { sample } => + super::v1::AssignmentCertKind::RelayVRFModulo { sample }, + // Not supported + _ => return Err(AssignmentConversionError::CertificateNotSupported), + }, + vrf: cert.vrf, }) } } - None + /// An assignment criterion which refers to the candidate under which the assignment is + /// relevant by block hash. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectAssignmentCertV2 { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The validator index. + pub validator: ValidatorIndex, + /// The cert itself. + pub cert: AssignmentCertV2, + } + + impl From for IndirectAssignmentCertV2 { + fn from(indirect_cert: super::v1::IndirectAssignmentCert) -> Self { + Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.into(), + } + } + } + + impl TryFrom for super::v1::IndirectAssignmentCert { + type Error = AssignmentConversionError; + fn try_from( + indirect_cert: IndirectAssignmentCertV2, + ) -> Result { + Ok(Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.try_into()?, + }) + } + } +} + +#[cfg(test)] +mod test { + use super::v2::{BitIndex, Bitfield}; + + use polkadot_primitives::{CandidateIndex, CoreIndex}; + + #[test] + fn test_assignment_bitfield_from_vec() { + let candidate_indices = vec![1u32, 7, 3, 10, 45, 8, 200, 2]; + let max_index = *candidate_indices.iter().max().unwrap(); + let bitfield = Bitfield::try_from(candidate_indices.clone()).unwrap(); + let candidate_indices = + candidate_indices.into_iter().map(|i| BitIndex(i as usize)).collect::>(); + + // Test 1 bits. + for index in candidate_indices.clone() { + assert!(bitfield.bit_at(index)); + } + + // Test 0 bits. + for index in 0..max_index { + if candidate_indices.contains(&BitIndex(index as usize)) { + continue + } + assert!(!bitfield.bit_at(BitIndex(index as usize))); + } + } + + #[test] + fn test_assignment_bitfield_invariant_msb() { + let core_indices = vec![CoreIndex(1), CoreIndex(3), CoreIndex(10), CoreIndex(20)]; + let mut bitfield = Bitfield::try_from(core_indices.clone()).unwrap(); + assert!(bitfield.inner_mut().pop().unwrap()); + + for i in 0..1024 { + assert!(Bitfield::try_from(CoreIndex(i)).unwrap().inner_mut().pop().unwrap()); + assert!(Bitfield::try_from(i).unwrap().inner_mut().pop().unwrap()); + } + } + + #[test] + fn test_assignment_bitfield_basic() { + let bitfield = Bitfield::try_from(CoreIndex(0)).unwrap(); + assert!(bitfield.bit_at(BitIndex(0))); + assert!(!bitfield.bit_at(BitIndex(1))); + assert_eq!(bitfield.len(), 1); + + let mut bitfield = Bitfield::try_from(20 as CandidateIndex).unwrap(); + assert!(bitfield.bit_at(BitIndex(20))); + assert_eq!(bitfield.inner_mut().count_ones(), 1); + assert_eq!(bitfield.len(), 21); + } } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 463c7f960ba089c26bda8d3dad916cd1641f5ef5..dab72bb2a5ed80b1842d4ca341c609109dee8c75 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -53,6 +53,13 @@ pub use disputes::{ ValidDisputeVote, ACTIVE_DURATION_SECS, }; +/// The current node version, which takes the basic SemVer form `..`. +/// In general, minor should be bumped on every release while major or patch releases are +/// relatively rare. +/// +/// The associated worker binaries should use the same version as the node that spawns them. +pub const NODE_VERSION: &'static str = "1.1.0"; + // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: // header 1 + bitmap 2 + max partial_key 8 + children 16 * (32 + len 1) + value 32 + value len 1 diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index ba5976fdceef89683150d68ec38e2d90ff803c6c..e7a4f4a825c0bc37713a95447b8c6336358c3c62 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -5,6 +5,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Utils to tie different Polkadot components together and allow instantiation of a node." [dependencies] # Substrate Client @@ -74,22 +75,21 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } # External Crates +async-trait = "0.1.57" futures = "0.3.21" hex-literal = "0.4.1" +is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } +log = "0.4.17" +schnellru = "0.2.1" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0.48" kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.8", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } -async-trait = "0.1.57" -schnellru = "0.2.1" -log = "0.4.17" -is_executable = "1.0.1" - # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } polkadot-node-core-parachains-inherent = { path = "../core/parachains-inherent" } @@ -140,6 +140,7 @@ polkadot-statement-distribution = { path = "../network/statement-distribution", [dev-dependencies] polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } +test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } env_logger = "0.9.0" assert_matches = "1.5.0" serial_test = "2.0.0" @@ -223,3 +224,7 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] + +network-protocol-staging = [ + "polkadot-node-network-protocol/network-protocol-staging", +] diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 4ee48cf7d9840caaa434f27767957b3f6cfb98da..6676bbe154b0591f4271aa727b64a9bdaf9e82ce 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -2,23 +2,12 @@ "name": "Kusama", "id": "ksmcc3", "bootNodes": [ - "/dns/kusama-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWBjxpFhVNM9poSsMEfdnXJaSWSZQ7otK9aV1SPA9zJp5W", - "/dns/kusama-connect-1.parity.io/tcp/443/wss/p2p/12D3KooWAJRVca93jLm4zft4rtTLLxNV4ZrHPMBkbGy5XkXooBFt", - "/dns/kusama-connect-2.parity.io/tcp/443/wss/p2p/12D3KooWLn22TSPR3HXMRSSmWoK4pkDtspdCVi5j86QyyUNViDeL", - "/dns/kusama-connect-3.parity.io/tcp/443/wss/p2p/12D3KooWSwnJSP3QJ6cnFCTpcXq4EEFotVEiQuCWVprzCnWj5e4G", - "/dns/kusama-connect-4.parity.io/tcp/443/wss/p2p/12D3KooWHi7zHUev7n1zs9kSQwh4KMPJcS8Jky2JN58cNabcXGvK", - "/dns/kusama-connect-5.parity.io/tcp/443/wss/p2p/12D3KooWMBF6DXADrNLg6kNt1A1zmKzw478gJw79NmTQhSDxuZvR", - "/dns/kusama-connect-6.parity.io/tcp/443/wss/p2p/12D3KooWNnG7YqYB9eEoACRuSEax8qhuPQzRn878AWKN4vUUtQXd", - "/dns/kusama-connect-7.parity.io/tcp/443/wss/p2p/12D3KooWMmtoLnkVCGyuCpsWw4zoNtWPH4nsVLn92mutvjQknEqR", - "/dns/p2p.0.kusama.network/tcp/30333/p2p/12D3KooWJDohybWd7FvRmyeGjgi56yy36mRWLHmgRprFdUadUt6b", - "/dns/p2p.1.kusama.network/tcp/30333/p2p/12D3KooWC7dnTvDY97afoLrvQSBrh7dDFEkWniTwyxAsBjfpaZk6", - "/dns/p2p.2.kusama.network/tcp/30333/p2p/12D3KooWGGK6Mj1pWF1bk4R1HjBQ4E7bgkfSJ5gmEfVRuwRZapT5", - "/dns/p2p.3.kusama.network/tcp/30333/p2p/12D3KooWRp4qgusMiUobJ9Uw1XAwtsokqx9YwgHDv5wQXjxqETji", - "/dns/p2p.4.kusama.network/tcp/30333/p2p/12D3KooWMVXPbqWR1erNKRSWDVPjcAQ9XtxqLTVzV4ccox9Y8KNL", - "/dns/p2p.5.kusama.network/tcp/30333/p2p/12D3KooWBsJKGJFuv83ixryzMsUS53A8JzEVeTA8PGi4U6T2dnif", - "/dns/kusama-bootnode-0.paritytech.net/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", - "/dns/kusama-bootnode-0.paritytech.net/tcp/30334/ws/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", - "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", + "/dns/kusama-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", + "/dns/kusama-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", + "/dns/kusama-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", + "/dns/kusama-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", + "/dns/kusama-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", + "/dns/kusama-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", "/dns/kusama-boot.dwellir.com/tcp/30333/ws/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ", "/dns/kusama-boot.dwellir.com/tcp/443/wss/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ", "/dns/boot.stake.plus/tcp/31333/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 6452c892715cf3525d1d033de6d59e420b86eb78..533492088161b3fb7e3862b706ef22716b30ee71 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -3,22 +3,12 @@ "id": "polkadot", "chainType": "Live", "bootNodes": [ - "/dns/polkadot-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWEPmjoRpDSUuiTjvyNDd8fejZ9eNWH5bE965nyBMDrB4o", - "/dns/polkadot-connect-1.parity.io/tcp/443/wss/p2p/12D3KooWLvcA24g6sT9YTaQyinwowMbLF5z7iMLoxZpEiV9pSmNf", - "/dns/polkadot-connect-2.parity.io/tcp/443/wss/p2p/12D3KooWDhp18HYzJuVX2jLhtjQgAhT1XWGqah42StoUJpkLvh2o", - "/dns/polkadot-connect-3.parity.io/tcp/443/wss/p2p/12D3KooWEsPEadSjLAPyxckqVJkp54aVdPuX3DD6a1FTL2y5cB9x", - "/dns/polkadot-connect-4.parity.io/tcp/443/wss/p2p/12D3KooWFfG1SQvcPoUK2N41cx7r52KYXKpRtZxfLZk8xtVzpp4d", - "/dns/polkadot-connect-5.parity.io/tcp/443/wss/p2p/12D3KooWDmQPkBvQGg9wjBdFThtWj3QCDVQyHJ1apfWrHvjwbYS8", - "/dns/polkadot-connect-6.parity.io/tcp/443/wss/p2p/12D3KooWBKtPpCnVTTzD7fPpCdFsrsYZ5K8fwmsLabb1JBuCycYs", - "/dns/polkadot-connect-7.parity.io/tcp/443/wss/p2p/12D3KooWP3BsFY6UaiLjEJ3YbDp6q6SMQgAHB15qKj41DUZQLMqD", - "/dns/p2p.0.polkadot.network/tcp/30333/p2p/12D3KooWHsvEicXjWWraktbZ4MQBizuyADQtuEGr3NbDvtm5rFA5", - "/dns/p2p.1.polkadot.network/tcp/30333/p2p/12D3KooWQz2q2UWVCiy9cFX1hHYEmhSKQB2hjEZCccScHLGUPjcc", - "/dns/p2p.2.polkadot.network/tcp/30333/p2p/12D3KooWNHxjYbDLLbDNZ2tq1kXgif5MSiLTUWJKcDdedKu4KaG8", - "/dns/p2p.3.polkadot.network/tcp/30333/p2p/12D3KooWGJQysxrQcSvUWWNw88RkqYvJhH3ZcDpWJ8zrXKhLP5Vr", - "/dns/p2p.4.polkadot.network/tcp/30333/p2p/12D3KooWKer8bYqpYjwurVABu13mkELpX2X7mSpEicpjShLeg7D6", - "/dns/p2p.5.polkadot.network/tcp/30333/p2p/12D3KooWSRjL9LcEQd5u2fQTbyLxTEHq1tUFgQ6amXSp8Eu7TfKP", - "/dns/cc1-0.parity.tech/tcp/30333/p2p/12D3KooWSz8r2WyCdsfWHgPyvD8GKQdJ1UAiRmrcrs8sQB3fe2KU", - "/dns/cc1-1.parity.tech/tcp/30333/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", + "/dns/polkadot-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWSz8r2WyCdsfWHgPyvD8GKQdJ1UAiRmrcrs8sQB3fe2KU", + "/dns/polkadot-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWSz8r2WyCdsfWHgPyvD8GKQdJ1UAiRmrcrs8sQB3fe2KU", + "/dns/polkadot-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWSz8r2WyCdsfWHgPyvD8GKQdJ1UAiRmrcrs8sQB3fe2KU", + "/dns/polkadot-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", + "/dns/polkadot-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", + "/dns/polkadot-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", "/dns/polkadot-boot.dwellir.com/tcp/30334/ws/p2p/12D3KooWKvdDyRKqUfSAaUCbYiLwKY8uK3wDWpCuy2FiDLbkPTDJ", "/dns/polkadot-boot.dwellir.com/tcp/443/wss/p2p/12D3KooWKvdDyRKqUfSAaUCbYiLwKY8uK3wDWpCuy2FiDLbkPTDJ", "/dns/boot.stake.plus/tcp/30333/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", diff --git a/polkadot/node/service/chain-specs/rococo.json b/polkadot/node/service/chain-specs/rococo.json index 43dc959b57677145629019e0b57924f5a251f280..e16962981e19af5d3c66af5a4157458b2faabf60 100644 --- a/polkadot/node/service/chain-specs/rococo.json +++ b/polkadot/node/service/chain-specs/rococo.json @@ -3,22 +3,12 @@ "id": "rococo_v2_2", "chainType": "Live", "bootNodes": [ - "/dns/rococo-bootnode-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm", - "/dns/rococo-bootnode-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d", - "/dns/rococo-bootnode-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha", - "/dns/rococo-bootnode-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPeKuW1BBPv4pNr8xqEv7jqy7rQnS3oq9U7xTCvj9qt2k", - "/dns/rococo-bootnode-4.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWNy7K8TNaP2Whcp3tsjBVUg2HcKMUvAArsimjvd1g31w4", - "/dns/rococo-bootnode-5.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWAVV9DZfvJp2brvs5zcQDTBFxNmEFJKy2dsvezWL4Bmy8", - "/dns/rococo-bootnode-6.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWM3hvXvaShyp7drQCavFHuwobkYdnCp2uHU5iRRAQwsw2", - "/dns/rococo-bootnode-7.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWSbGtxfWCwn1tdmfZYESbmxzbTG2LKwKUrioDaZBcdMY4", + "/dns/rococo-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm", + "/dns/rococo-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d", + "/dns/rococo-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm", + "/dns/rococo-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d", "/dns/rococo-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm", - "/dns/rococo-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d", - "/dns/rococo-bootnode-2.polkadot.io/tcp/443/wss/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha", - "/dns/rococo-bootnode-3.polkadot.io/tcp/443/wss/p2p/12D3KooWPeKuW1BBPv4pNr8xqEv7jqy7rQnS3oq9U7xTCvj9qt2k", - "/dns/rococo-bootnode-4.polkadot.io/tcp/443/wss/p2p/12D3KooWNy7K8TNaP2Whcp3tsjBVUg2HcKMUvAArsimjvd1g31w4", - "/dns/rococo-bootnode-5.polkadot.io/tcp/443/wss/p2p/12D3KooWAVV9DZfvJp2brvs5zcQDTBFxNmEFJKy2dsvezWL4Bmy8", - "/dns/rococo-bootnode-6.polkadot.io/tcp/443/wss/p2p/12D3KooWM3hvXvaShyp7drQCavFHuwobkYdnCp2uHU5iRRAQwsw2", - "/dns/rococo-bootnode-7.polkadot.io/tcp/443/wss/p2p/12D3KooWSbGtxfWCwn1tdmfZYESbmxzbTG2LKwKUrioDaZBcdMY4" + "/dns/rococo-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index e57786f78a6419748ab85f5c0a9f02f6e9a48029..b2ffba9304bcc840a2b869a57bc549d2cdf95bda 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -2,16 +2,12 @@ "name": "Westend", "id": "westend2", "bootNodes": [ - "/dns/0.westend.paritytech.net/tcp/30333/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", - "/dns/0.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", - "/dns/1.westend.paritytech.net/tcp/30333/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", - "/dns/1.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", - "/dns/2.westend.paritytech.net/tcp/30333/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po", - "/dns/2.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po", - "/dns/3.westend.paritytech.net/tcp/30333/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K", - "/dns/3.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K", - "/dns/westend-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWNg8iUqhux7X7voNU9Nty5pzehrFJwkQwg1CJnqN3CTzE", - "/dns/westend-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAq2A7UNFS6725XFatD5QW7iYBezTLdAUx1SmRkxN79Ne", + "/dns/westend-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", + "/dns/westend-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", + "/dns/westend-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", + "/dns/westend-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", + "/dns/westend-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", + "/dns/westend-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", "/dns/boot.stake.plus/tcp/32333/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index ce25c08b8773059497cdc9624f9fb1cd65d20031..871d7e82911ab2494dd5aac22a625bd2938222c3 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -75,7 +75,7 @@ pub type GenericChainSpec = service::GenericChainSpec<(), Extensions>; /// The `ChainSpec` parameterized for the westend runtime. #[cfg(feature = "westend-native")] -pub type WestendChainSpec = service::GenericChainSpec; +pub type WestendChainSpec = service::GenericChainSpec<(), Extensions>; /// The `ChainSpec` parameterized for the westend runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. @@ -84,12 +84,7 @@ pub type WestendChainSpec = GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. #[cfg(feature = "rococo-native")] -pub type RococoChainSpec = service::GenericChainSpec; - -/// The `ChainSpec` parameterized for the `versi` runtime. -/// -/// As of now `Versi` will just be a clone of `Rococo`, until we need it to differ. -pub type VersiChainSpec = RococoChainSpec; +pub type RococoChainSpec = service::GenericChainSpec<(), Extensions>; /// The `ChainSpec` parameterized for the rococo runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. @@ -206,7 +201,7 @@ fn rococo_session_keys( } #[cfg(feature = "westend-native")] -fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisConfig { +fn westend_staging_testnet_config_genesis() -> serde_json::Value { use hex_literal::hex; use sp_core::crypto::UncheckedInto; @@ -344,19 +339,16 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim const ENDOWMENT: u128 = 1_000_000 * WND; const STASH: u128 = 100 * WND; - westend::RuntimeGenesisConfig { - system: westend::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - balances: westend::BalancesConfig { - balances: endowed_accounts + serde_json::json!({ + "balances": { + "balances": endowed_accounts .iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), + .collect::>(), }, - beefy: Default::default(), - indices: westend::IndicesConfig { indices: vec![] }, - session: westend::SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -375,51 +367,32 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim }) .collect::>(), }, - staking: westend::StakingConfig { - validator_count: 50, - minimum_validator_count: 4, - stakers: initial_authorities + "staking": { + "validatorCount": 50, + "minimumValidatorCount": 4, + "stakers": initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::ForceNone, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: westend::BabeConfig { - authorities: Default::default(), - epoch_config: Some(westend::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() + .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::::Validator)) + .collect::>(), + "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + "forceEra": Forcing::ForceNone, + "slashRewardFraction": Perbill::from_percent(10), }, - grandpa: Default::default(), - im_online: Default::default(), - authority_discovery: westend::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() + "babe": { + "epochConfig": Some(westend::BABE_GENESIS_EPOCH_CONFIG), }, - vesting: westend::VestingConfig { vesting: vec![] }, - sudo: westend::SudoConfig { key: Some(endowed_accounts[0].clone()) }, - hrmp: Default::default(), - treasury: Default::default(), - configuration: westend::ConfigurationConfig { - config: default_parachains_host_configuration(), + "sudo": { "key": Some(endowed_accounts[0].clone()) }, + "configuration": { + "config": default_parachains_host_configuration(), }, - paras: Default::default(), - registrar: westend_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() + "registrar": { + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, }, - xcm_pallet: Default::default(), - nomination_pools: Default::default(), - assigned_slots: Default::default(), - } + }) } #[cfg(feature = "rococo-native")] -fn rococo_staging_testnet_config_genesis( - wasm_binary: &[u8], -) -> rococo_runtime::RuntimeGenesisConfig { +fn rococo_staging_testnet_config_genesis() -> serde_json::Value { use hex_literal::hex; use sp_core::crypto::UncheckedInto; @@ -662,19 +635,16 @@ fn rococo_staging_testnet_config_genesis( const ENDOWMENT: u128 = 1_000_000 * ROC; const STASH: u128 = 100 * ROC; - rococo_runtime::RuntimeGenesisConfig { - system: rococo_runtime::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - balances: rococo_runtime::BalancesConfig { - balances: endowed_accounts + serde_json::json!({ + "balances": { + "balances": endowed_accounts .iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), + .collect::>(), }, - beefy: Default::default(), - indices: rococo_runtime::IndicesConfig { indices: vec![] }, - session: rococo_runtime::SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -693,80 +663,55 @@ fn rococo_staging_testnet_config_genesis( }) .collect::>(), }, - babe: rococo_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() + "babe": { + "epochConfig": Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - grandpa: Default::default(), - im_online: Default::default(), - treasury: Default::default(), - authority_discovery: rococo_runtime::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() + "sudo": { "key": Some(endowed_accounts[0].clone()) }, + "configuration": { + "config": default_parachains_host_configuration(), }, - claims: rococo::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: rococo::VestingConfig { vesting: vec![] }, - sudo: rococo_runtime::SudoConfig { key: Some(endowed_accounts[0].clone()) }, - paras: rococo_runtime::ParasConfig { paras: vec![], ..Default::default() }, - hrmp: Default::default(), - configuration: rococo_runtime::ConfigurationConfig { - config: default_parachains_host_configuration(), + "registrar": { + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, }, - registrar: rococo_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() - }, - xcm_pallet: Default::default(), - nis_counterpart_balances: Default::default(), - assigned_slots: Default::default(), - } + }) } /// Westend staging testnet config. #[cfg(feature = "westend-native")] pub fn westend_staging_testnet_config() -> Result { - let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; - let boot_nodes = vec![]; - - Ok(WestendChainSpec::from_genesis( - "Westend Staging Testnet", - "westend_staging_testnet", - ChainType::Live, - move || westend_staging_testnet_config_genesis(wasm_binary), - boot_nodes, - Some( - TelemetryEndpoints::new(vec![(WESTEND_STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Westend Staging telemetry url is valid; qed"), - ), - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(WestendChainSpec::builder( + westend::WASM_BINARY.ok_or("Westend development wasm not available")?, Default::default(), - )) + ) + .with_name("Westend Staging Testnet") + .with_id("westend_staging_testnet") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(westend_staging_testnet_config_genesis()) + .with_telemetry_endpoints( + TelemetryEndpoints::new(vec![(WESTEND_STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Westend Staging telemetry url is valid; qed"), + ) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } /// Rococo staging testnet config. #[cfg(feature = "rococo-native")] pub fn rococo_staging_testnet_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?; - let boot_nodes = vec![]; - - Ok(RococoChainSpec::from_genesis( - "Rococo Staging Testnet", - "rococo_staging_testnet", - ChainType::Live, - move || rococo_staging_testnet_config_genesis(wasm_binary), - boot_nodes, - Some( - TelemetryEndpoints::new(vec![(ROCOCO_STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Rococo Staging telemetry url is valid; qed"), - ), - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?, Default::default(), - )) + ) + .with_name("Rococo Staging Testnet") + .with_id("rococo_staging_testnet") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(rococo_staging_testnet_config_genesis()) + .with_telemetry_endpoints( + TelemetryEndpoints::new(vec![(ROCOCO_STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Rococo Staging telemetry url is valid; qed"), + ) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } pub fn versi_chain_spec_properties() -> serde_json::map::Map { @@ -783,24 +728,21 @@ pub fn versi_chain_spec_properties() -> serde_json::map::Map Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Versi development wasm not available")?; - let boot_nodes = vec![]; - - Ok(RococoChainSpec::from_genesis( - "Versi Staging Testnet", - "versi_staging_testnet", - ChainType::Live, - move || rococo_staging_testnet_config_genesis(wasm_binary), - boot_nodes, - Some( - TelemetryEndpoints::new(vec![(VERSI_STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Versi Staging telemetry url is valid; qed"), - ), - Some("versi"), - None, - Some(versi_chain_spec_properties()), + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Versi development wasm not available")?, Default::default(), - )) + ) + .with_name("Versi Staging Testnet") + .with_id("versi_staging_testnet") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(rococo_staging_testnet_config_genesis()) + .with_telemetry_endpoints( + TelemetryEndpoints::new(vec![(VERSI_STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Versi Staging telemetry url is valid; qed"), + ) + .with_protocol_id("versi") + .with_properties(versi_chain_spec_properties()) + .build()) } /// Helper function to generate a crypto pair from seed @@ -879,10 +821,9 @@ fn testnet_accounts() -> Vec { ] } -/// Helper function to create westend `RuntimeGenesisConfig` for testing +/// Helper function to create westend runtime `GenesisConfig` patch for testing #[cfg(feature = "westend-native")] pub fn westend_testnet_genesis( - wasm_binary: &[u8], initial_authorities: Vec<( AccountId, AccountId, @@ -896,21 +837,18 @@ pub fn westend_testnet_genesis( )>, root_key: AccountId, endowed_accounts: Option>, -) -> westend::RuntimeGenesisConfig { +) -> serde_json::Value { let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); const ENDOWMENT: u128 = 1_000_000 * WND; const STASH: u128 = 100 * WND; - westend::RuntimeGenesisConfig { - system: westend::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - indices: westend::IndicesConfig { indices: vec![] }, - balances: westend::BalancesConfig { - balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, - beefy: Default::default(), - session: westend::SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -929,51 +867,33 @@ pub fn westend_testnet_genesis( }) .collect::>(), }, - staking: westend::StakingConfig { - minimum_validator_count: 1, - validator_count: initial_authorities.len() as u32, - stakers: initial_authorities + "staking": { + "minimumValidatorCount": 1, + "validatorCount": initial_authorities.len() as u32, + "stakers": initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::NotForcing, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: westend::BabeConfig { - authorities: Default::default(), - epoch_config: Some(westend::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() + .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::::Validator)) + .collect::>(), + "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + "forceEra": Forcing::NotForcing, + "slashRewardFraction": Perbill::from_percent(10), }, - grandpa: Default::default(), - im_online: Default::default(), - authority_discovery: westend::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() + "babe": { + "epochConfig": Some(westend::BABE_GENESIS_EPOCH_CONFIG), }, - vesting: westend::VestingConfig { vesting: vec![] }, - sudo: westend::SudoConfig { key: Some(root_key) }, - hrmp: Default::default(), - treasury: Default::default(), - configuration: westend::ConfigurationConfig { - config: default_parachains_host_configuration(), + "sudo": { "key": Some(root_key) }, + "configuration": { + "config": default_parachains_host_configuration(), }, - paras: Default::default(), - registrar: westend_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() + "registrar": { + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, }, - xcm_pallet: Default::default(), - nomination_pools: Default::default(), - assigned_slots: Default::default(), - } + }) } -/// Helper function to create rococo `RuntimeGenesisConfig` for testing +/// Helper function to create rococo runtime `GenesisConfig` patch for testing #[cfg(feature = "rococo-native")] pub fn rococo_testnet_genesis( - wasm_binary: &[u8], initial_authorities: Vec<( AccountId, AccountId, @@ -987,20 +907,17 @@ pub fn rococo_testnet_genesis( )>, root_key: AccountId, endowed_accounts: Option>, -) -> rococo_runtime::RuntimeGenesisConfig { +) -> serde_json::Value { let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); const ENDOWMENT: u128 = 1_000_000 * ROC; - rococo_runtime::RuntimeGenesisConfig { - system: rococo_runtime::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - beefy: Default::default(), - indices: rococo_runtime::IndicesConfig { indices: vec![] }, - balances: rococo_runtime::BalancesConfig { - balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, - session: rococo_runtime::SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -1019,43 +936,25 @@ pub fn rococo_testnet_genesis( }) .collect::>(), }, - babe: rococo_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: Default::default(), - im_online: Default::default(), - treasury: Default::default(), - claims: rococo::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: rococo::VestingConfig { vesting: vec![] }, - authority_discovery: rococo_runtime::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() + "babe": { + "epochConfig": Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - sudo: rococo_runtime::SudoConfig { key: Some(root_key.clone()) }, - hrmp: Default::default(), - configuration: rococo_runtime::ConfigurationConfig { - config: polkadot_runtime_parachains::configuration::HostConfiguration { + "sudo": { "key": Some(root_key.clone()) }, + "configuration": { + "config": polkadot_runtime_parachains::configuration::HostConfiguration { max_validators_per_core: Some(1), ..default_parachains_host_configuration() }, }, - paras: rococo_runtime::ParasConfig { paras: vec![], ..Default::default() }, - registrar: rococo_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() - }, - xcm_pallet: Default::default(), - nis_counterpart_balances: Default::default(), - assigned_slots: Default::default(), - } + "registrar": { + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, + } + }) } #[cfg(feature = "westend-native")] -fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisConfig { +fn westend_development_config_genesis() -> serde_json::Value { westend_testnet_genesis( - wasm_binary, vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::("Alice"), None, @@ -1063,9 +962,8 @@ fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::RuntimeGen } #[cfg(feature = "rococo-native")] -fn rococo_development_config_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGenesisConfig { +fn rococo_development_config_genesis() -> serde_json::Value { rococo_testnet_genesis( - wasm_binary, vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::("Alice"), None, @@ -1075,84 +973,67 @@ fn rococo_development_config_genesis(wasm_binary: &[u8]) -> rococo_runtime::Runt /// Westend development config (single validator Alice) #[cfg(feature = "westend-native")] pub fn westend_development_config() -> Result { - let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; - - Ok(WestendChainSpec::from_genesis( - "Development", - "westend_dev", - ChainType::Development, - move || westend_development_config_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(WestendChainSpec::builder( + westend::WASM_BINARY.ok_or("Westend development wasm not available")?, Default::default(), - )) + ) + .with_name("Development") + .with_id("westend_dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(westend_development_config_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } /// Rococo development config (single validator Alice) #[cfg(feature = "rococo-native")] pub fn rococo_development_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Development", - "rococo_dev", - ChainType::Development, - move || rococo_development_config_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?, Default::default(), - )) + ) + .with_name("Development") + .with_id("rococo_dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(rococo_development_config_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } /// `Versi` development config (single validator Alice) #[cfg(feature = "rococo-native")] pub fn versi_development_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Versi development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Development", - "versi_dev", - ChainType::Development, - move || rococo_development_config_genesis(wasm_binary), - vec![], - None, - Some("versi"), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Versi development wasm not available")?, Default::default(), - )) + ) + .with_name("Development") + .with_id("versi_dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(rococo_development_config_genesis()) + .with_protocol_id("versi") + .build()) } /// Wococo development config (single validator Alice) #[cfg(feature = "rococo-native")] pub fn wococo_development_config() -> Result { const WOCOCO_DEV_PROTOCOL_ID: &str = "woco"; - let wasm_binary = rococo::WASM_BINARY.ok_or("Wococo development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Development", - "wococo_dev", - ChainType::Development, - move || rococo_development_config_genesis(wasm_binary), - vec![], - None, - Some(WOCOCO_DEV_PROTOCOL_ID), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Wococo development wasm not available")?, Default::default(), - )) + ) + .with_name("Development") + .with_id("wococo_dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(rococo_development_config_genesis()) + .with_protocol_id(WOCOCO_DEV_PROTOCOL_ID) + .build()) } #[cfg(feature = "westend-native")] -fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisConfig { +fn westend_local_testnet_genesis() -> serde_json::Value { westend_testnet_genesis( - wasm_binary, vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], get_account_id_from_seed::("Alice"), None, @@ -1162,26 +1043,21 @@ fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisC /// Westend local testnet config (multivalidator Alice + Bob) #[cfg(feature = "westend-native")] pub fn westend_local_testnet_config() -> Result { - let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; - - Ok(WestendChainSpec::from_genesis( - "Westend Local Testnet", - "westend_local_testnet", - ChainType::Local, - move || westend_local_testnet_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(WestendChainSpec::builder( + westend::WASM_BINARY.ok_or("Westend development wasm not available")?, Default::default(), - )) + ) + .with_name("Westend Local Testnet") + .with_id("westend_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(westend_local_testnet_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } #[cfg(feature = "rococo-native")] -fn rococo_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGenesisConfig { +fn rococo_local_testnet_genesis() -> serde_json::Value { rococo_testnet_genesis( - wasm_binary, vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], get_account_id_from_seed::("Alice"), None, @@ -1191,27 +1067,22 @@ fn rococo_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGe /// Rococo local testnet config (multivalidator Alice + Bob) #[cfg(feature = "rococo-native")] pub fn rococo_local_testnet_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Rococo Local Testnet", - "rococo_local_testnet", - ChainType::Local, - move || rococo_local_testnet_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(RococoChainSpec::builder( + rococo::fast_runtime_binary::WASM_BINARY.ok_or("Rococo development wasm not available")?, Default::default(), - )) + ) + .with_name("Rococo Local Testnet") + .with_id("rococo_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(rococo_local_testnet_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } /// Wococo is a temporary testnet that uses almost the same runtime as rococo. #[cfg(feature = "rococo-native")] -fn wococo_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGenesisConfig { +fn wococo_local_testnet_genesis() -> serde_json::Value { rococo_testnet_genesis( - wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -1226,27 +1097,22 @@ fn wococo_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGe /// Wococo local testnet config (multivalidator Alice + Bob + Charlie + Dave) #[cfg(feature = "rococo-native")] pub fn wococo_local_testnet_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Wococo development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Wococo Local Testnet", - "wococo_local_testnet", - ChainType::Local, - move || wococo_local_testnet_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Rococo development wasm (used for wococo) not available")?, Default::default(), - )) + ) + .with_name("Wococo Local Testnet") + .with_id("wococo_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(wococo_local_testnet_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build()) } /// `Versi` is a temporary testnet that uses the same runtime as rococo. #[cfg(feature = "rococo-native")] -fn versi_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGenesisConfig { +fn versi_local_testnet_genesis() -> serde_json::Value { rococo_testnet_genesis( - wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -1261,18 +1127,14 @@ fn versi_local_testnet_genesis(wasm_binary: &[u8]) -> rococo_runtime::RuntimeGen /// `Versi` local testnet config (multivalidator Alice + Bob + Charlie + Dave) #[cfg(feature = "rococo-native")] pub fn versi_local_testnet_config() -> Result { - let wasm_binary = rococo::WASM_BINARY.ok_or("Versi development wasm not available")?; - - Ok(RococoChainSpec::from_genesis( - "Versi Local Testnet", - "versi_local_testnet", - ChainType::Local, - move || versi_local_testnet_genesis(wasm_binary), - vec![], - None, - Some("versi"), - None, - None, + Ok(RococoChainSpec::builder( + rococo::WASM_BINARY.ok_or("Rococo development wasm (used for versi) not available")?, Default::default(), - )) + ) + .with_name("Versi Local Testnet") + .with_id("versi_local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(versi_local_testnet_genesis()) + .with_protocol_id("versi") + .build()) } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 5991744dc3afce5cbc63af412d4c00c6417f8e93..0ed7940b3e80abcb32e11618ba21a8c5fb909d17 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -749,11 +749,6 @@ pub fn new_full( Some(backoff) }; - // Warn the user that BEEFY is still experimental for Polkadot. - if enable_beefy && config.chain_spec.is_polkadot() { - gum::warn!("BEEFY is still experimental, usage on Polkadot network is discouraged."); - } - let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); @@ -970,11 +965,15 @@ pub fn new_full( if let Some(hwbench) = hwbench { sc_sysinfo::print_hwbench(&hwbench); - if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && role.is_authority() { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority' find out more at:\n\ - https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware" + match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + Err(err) if role.is_authority() => { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\ + https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", + err ); + }, + _ => {}, } if let Some(ref mut telemetry) = telemetry { diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 7d1add118241b123ad122b305264f95b46b8ee78..fd618863eeaa737ec4be26b24a0db82e0bdbf307 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -40,7 +40,6 @@ use polkadot_overseer::{ metrics::Metrics as OverseerMetrics, InitializedOverseerBuilder, MetricsTrait, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, }; -use schnellru::{ByLength, LruMap}; use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; @@ -342,7 +341,6 @@ where .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_api_client) - .known_leaves(LruMap::new(ByLength::new(KNOWN_LEAVES_CACHE_SIZE))) .metrics(metrics) .spawner(spawner); @@ -380,8 +378,6 @@ pub trait OverseerGen { // as consequence make this rather annoying to implement and use. } -use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE; - /// The regular set of subsystems. pub struct RealOverseerGen; diff --git a/polkadot/node/service/src/parachains_db/mod.rs b/polkadot/node/service/src/parachains_db/mod.rs index 519afbe0ccd13b088ae7a0efb5aa3c7fa3f16905..92f3f167f22fb6468b53681b4a7c33283cdb29c6 100644 --- a/polkadot/node/service/src/parachains_db/mod.rs +++ b/polkadot/node/service/src/parachains_db/mod.rs @@ -41,7 +41,15 @@ pub(crate) mod columns { pub const COL_SESSION_WINDOW_DATA: u32 = 5; } + // Version 4 only changed structures in approval voting, so we can re-export the v4 definitions. pub mod v3 { + pub use super::v4::{ + COL_APPROVAL_DATA, COL_AVAILABILITY_DATA, COL_AVAILABILITY_META, + COL_CHAIN_SELECTION_DATA, COL_DISPUTE_COORDINATOR_DATA, NUM_COLUMNS, ORDERED_COL, + }; + } + + pub mod v4 { pub const NUM_COLUMNS: u32 = 5; pub const COL_AVAILABILITY_DATA: u32 = 0; pub const COL_AVAILABILITY_META: u32 = 1; @@ -73,14 +81,14 @@ pub struct ColumnsConfig { /// The real columns used by the parachains DB. #[cfg(any(test, feature = "full-node"))] pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { - col_availability_data: columns::v3::COL_AVAILABILITY_DATA, - col_availability_meta: columns::v3::COL_AVAILABILITY_META, - col_approval_data: columns::v3::COL_APPROVAL_DATA, - col_chain_selection_data: columns::v3::COL_CHAIN_SELECTION_DATA, - col_dispute_coordinator_data: columns::v3::COL_DISPUTE_COORDINATOR_DATA, + col_availability_data: columns::v4::COL_AVAILABILITY_DATA, + col_availability_meta: columns::v4::COL_AVAILABILITY_META, + col_approval_data: columns::v4::COL_APPROVAL_DATA, + col_chain_selection_data: columns::v4::COL_CHAIN_SELECTION_DATA, + col_dispute_coordinator_data: columns::v4::COL_DISPUTE_COORDINATOR_DATA, }; -#[derive(PartialEq)] +#[derive(PartialEq, Copy, Clone)] pub(crate) enum DatabaseKind { ParityDB, RocksDB, @@ -125,28 +133,28 @@ pub fn open_creating_rocksdb( let path = root.join("parachains").join("db"); - let mut db_config = DatabaseConfig::with_columns(columns::v3::NUM_COLUMNS); + let mut db_config = DatabaseConfig::with_columns(columns::v4::NUM_COLUMNS); let _ = db_config .memory_budget - .insert(columns::v3::COL_AVAILABILITY_DATA, cache_sizes.availability_data); + .insert(columns::v4::COL_AVAILABILITY_DATA, cache_sizes.availability_data); let _ = db_config .memory_budget - .insert(columns::v3::COL_AVAILABILITY_META, cache_sizes.availability_meta); + .insert(columns::v4::COL_AVAILABILITY_META, cache_sizes.availability_meta); let _ = db_config .memory_budget - .insert(columns::v3::COL_APPROVAL_DATA, cache_sizes.approval_data); + .insert(columns::v4::COL_APPROVAL_DATA, cache_sizes.approval_data); let path_str = path .to_str() .ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?; std::fs::create_dir_all(&path_str)?; - upgrade::try_upgrade_db(&path, DatabaseKind::RocksDB)?; + upgrade::try_upgrade_db(&path, DatabaseKind::RocksDB, upgrade::CURRENT_VERSION)?; let db = Database::open(&db_config, &path_str)?; let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( db, - columns::v3::ORDERED_COL, + columns::v4::ORDERED_COL, ); Ok(Arc::new(db)) @@ -164,14 +172,14 @@ pub fn open_creating_paritydb( .ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?; std::fs::create_dir_all(&path_str)?; - upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB)?; + upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB, upgrade::CURRENT_VERSION)?; let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_3_config(&path)) .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?; let db = polkadot_node_subsystem_util::database::paritydb_impl::DbAdapter::new( db, - columns::v3::ORDERED_COL, + columns::v4::ORDERED_COL, ); Ok(Arc::new(db)) } diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 54ef97afd71c6ffacb643a08e59709838974ae3e..1d76c79d3e32320e29dd781d99eedbd9669320d9 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -22,13 +22,17 @@ use std::{ str::FromStr, }; +use polkadot_node_core_approval_voting::approval_db::v2::{ + migration_helpers::v1_to_v2, Config as ApprovalDbConfig, +}; type Version = u32; /// Version file name. const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. -const CURRENT_VERSION: Version = 3; +/// Version 4 changes approval db format for `OurAssignment`. +pub(crate) const CURRENT_VERSION: Version = 4; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -38,6 +42,10 @@ pub enum Error { CorruptedVersionFile, #[error("Parachains DB has a future version (expected {current:?}, found {got:?})")] FutureVersion { current: Version, got: Version }, + #[error("Parachain DB migration failed")] + MigrationFailed, + #[error("Parachain DB migration would take forever")] + MigrationLoop, } impl From for io::Error { @@ -49,10 +57,42 @@ impl From for io::Error { } } -/// Try upgrading parachain's database to the current version. -pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +/// Try upgrading parachain's database to a target version. +pub(crate) fn try_upgrade_db( + db_path: &Path, + db_kind: DatabaseKind, + target_version: Version, +) -> Result<(), Error> { + // Ensure we don't loop forever below because of a bug. + const MAX_MIGRATIONS: u32 = 30; + + #[cfg(test)] + remove_file_lock(&db_path); + + // Loop migrations until we reach the target version. + for _ in 0..MAX_MIGRATIONS { + let version = try_upgrade_db_to_next_version(db_path, db_kind)?; + + #[cfg(test)] + remove_file_lock(&db_path); + + if version == target_version { + return Ok(()) + } + } + + Err(Error::MigrationLoop) +} + +/// Try upgrading parachain's database to the next version. +/// If successfull, it returns the current version. +pub(crate) fn try_upgrade_db_to_next_version( + db_path: &Path, + db_kind: DatabaseKind, +) -> Result { let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); - if !is_empty { + + let new_version = if !is_empty { match get_db_version(db_path)? { // 0 -> 1 migration Some(0) => migrate_from_version_0_to_1(db_path, db_kind)?, @@ -60,21 +100,26 @@ pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<() Some(1) => migrate_from_version_1_to_2(db_path, db_kind)?, // 2 -> 3 migration Some(2) => migrate_from_version_2_to_3(db_path, db_kind)?, + // 3 -> 4 migration + Some(3) => migrate_from_version_3_to_4(db_path, db_kind)?, // Already at current version, do nothing. - Some(CURRENT_VERSION) => (), + Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. Some(v) => return Err(Error::FutureVersion { current: CURRENT_VERSION, got: v }), // No version file. For `RocksDB` we dont need to do anything. - None if db_kind == DatabaseKind::RocksDB => (), + None if db_kind == DatabaseKind::RocksDB => CURRENT_VERSION, // No version file. `ParityDB` did not previously have a version defined. // We handle this as a `0 -> 1` migration. None if db_kind == DatabaseKind::ParityDB => migrate_from_version_0_to_1(db_path, db_kind)?, None => unreachable!(), } - } + } else { + CURRENT_VERSION + }; - update_version(db_path) + update_version(db_path, new_version)?; + Ok(new_version) } /// Reads current database version from the file at given path. @@ -91,9 +136,9 @@ fn get_db_version(path: &Path) -> Result, Error> { /// Writes current database version to the file. /// Creates a new file if the version file does not exist yet. -fn update_version(path: &Path) -> Result<(), Error> { +fn update_version(path: &Path, new_version: Version) -> Result<(), Error> { fs::create_dir_all(path)?; - fs::write(version_file_path(path), CURRENT_VERSION.to_string()).map_err(Into::into) + fs::write(version_file_path(path), new_version.to_string()).map_err(Into::into) } /// Returns the version file path. @@ -103,7 +148,7 @@ fn version_file_path(path: &Path) -> PathBuf { file_path } -fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 0 to version 1 ..."); match db_kind { @@ -116,7 +161,7 @@ fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } -fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 1 to version 2 ..."); match db_kind { @@ -129,7 +174,48 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } -fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +// Migrade approval voting database. `OurAssignment` has been changed to support the v2 assignments. +// As these are backwards compatible, we'll convert the old entries in the new format. +fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result { + gum::info!(target: LOG_TARGET, "Migrating parachains db from version 3 to version 4 ..."); + use polkadot_node_subsystem_util::database::{ + kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, + }; + use std::sync::Arc; + + let approval_db_config = + ApprovalDbConfig { col_approval_data: super::REAL_COLUMNS.col_approval_data }; + + let _result = match db_kind { + DatabaseKind::ParityDB => { + let db = ParityDbAdapter::new( + parity_db::Db::open(&paritydb_version_3_config(path)) + .map_err(|e| other_io_error(format!("Error opening db {:?}", e)))?, + super::columns::v3::ORDERED_COL, + ); + + v1_to_v2(Arc::new(db), approval_db_config).map_err(|_| Error::MigrationFailed)?; + }, + DatabaseKind::RocksDB => { + let db_path = path + .to_str() + .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; + let db_cfg = + kvdb_rocksdb::DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); + let db = RocksDbAdapter::new( + kvdb_rocksdb::Database::open(&db_cfg, db_path)?, + &super::columns::v3::ORDERED_COL, + ); + + v1_to_v2(Arc::new(db), approval_db_config).map_err(|_| Error::MigrationFailed)?; + }, + }; + + gum::info!(target: LOG_TARGET, "Migration complete! "); + Ok(CURRENT_VERSION) +} + +fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); match db_kind { DatabaseKind::ParityDB => paritydb_migrate_from_version_2_to_3(path), @@ -143,7 +229,7 @@ fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), /// Migration from version 0 to version 1: /// * the number of columns has changed from 3 to 5; -fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -155,12 +241,12 @@ fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { db.add_column()?; db.add_column()?; - Ok(()) + Ok(1) } /// Migration from version 1 to version 2: /// * the number of columns has changed from 5 to 6; -fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -171,10 +257,10 @@ fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { db.add_column()?; - Ok(()) + Ok(2) } -fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -185,7 +271,7 @@ fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { db.remove_last_column()?; - Ok(()) + Ok(3) } // This currently clears columns which had their configs altered between versions. @@ -249,7 +335,7 @@ fn paritydb_fix_columns( pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - for i in columns::v3::ORDERED_COL { + for i in columns::v4::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -260,7 +346,7 @@ pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_2_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v2::NUM_COLUMNS as u8); - for i in columns::v3::ORDERED_COL { + for i in columns::v4::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -278,48 +364,161 @@ pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { options } +/// Database configuration for version 0. This is useful just for testing. +#[cfg(test)] +pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { + let mut options = + parity_db::Options::with_columns(&path, super::columns::v0::NUM_COLUMNS as u8); + options.columns[super::columns::v4::COL_AVAILABILITY_META as usize].btree_index = true; + + options +} + /// Migration from version 0 to version 1. /// Cases covered: /// - upgrading from v0.9.23 or earlier -> the `dispute coordinator column` was changed /// - upgrading from v0.9.24+ -> this is a no op assuming the DB has been manually fixed as per /// release notes -fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result { // Delete the `dispute coordinator` column if needed (if column configuration is changed). paritydb_fix_columns( path, paritydb_version_1_config(path), - vec![super::columns::v3::COL_DISPUTE_COORDINATOR_DATA], + vec![super::columns::v4::COL_DISPUTE_COORDINATOR_DATA], )?; - Ok(()) + Ok(1) } /// Migration from version 1 to version 2: /// - add a new column for session information storage -fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result { let mut options = paritydb_version_1_config(path); // Adds the session info column. parity_db::Db::add_column(&mut options, Default::default()) .map_err(|e| other_io_error(format!("Error adding column {:?}", e)))?; - Ok(()) + Ok(2) } /// Migration from version 2 to version 3: /// - drop the column used by `RollingSessionWindow` -fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result { parity_db::Db::drop_last_column(&mut paritydb_version_2_config(path)) .map_err(|e| other_io_error(format!("Error removing COL_SESSION_WINDOW_DATA {:?}", e)))?; - Ok(()) + Ok(3) +} + +/// Remove the lock file. If file is locked, it will wait up to 1s. +#[cfg(test)] +pub fn remove_file_lock(path: &std::path::Path) { + use std::{io::ErrorKind, thread::sleep, time::Duration}; + + let mut lock_path = std::path::PathBuf::from(path); + lock_path.push("lock"); + + for _ in 0..10 { + let result = std::fs::remove_file(lock_path.as_path()); + match result { + Err(error) => match error.kind() { + ErrorKind::WouldBlock => { + sleep(Duration::from_millis(100)); + continue + }, + _ => return, + }, + Ok(_) => {}, + } + } + + unreachable!("Database is locked, waited 1s for lock file: {:?}", lock_path); } #[cfg(test)] mod tests { use super::{ - columns::{v2::COL_SESSION_WINDOW_DATA, v3::*}, + columns::{v2::COL_SESSION_WINDOW_DATA, v4::*}, *, }; + use polkadot_node_core_approval_voting::approval_db::v2::migration_helpers::v1_to_v2_fill_test_data; + use test_helpers::dummy_candidate_receipt; + + #[test] + fn test_paritydb_migrate_0_to_1() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + { + let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); + + db.commit(vec![( + COL_AVAILABILITY_META as u8, + b"5678".to_vec(), + Some(b"somevalue".to_vec()), + )]) + .unwrap(); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB, 1).unwrap(); + + let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); + assert_eq!( + db.get(COL_AVAILABILITY_META as u8, b"5678").unwrap(), + Some("somevalue".as_bytes().to_vec()) + ); + } + + #[test] + fn test_paritydb_migrate_1_to_2() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "1").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_1_config(&path)).unwrap(); + + // Write some dummy data + db.commit(vec![( + COL_DISPUTE_COORDINATOR_DATA as u8, + b"1234".to_vec(), + Some(b"somevalue".to_vec()), + )]) + .unwrap(); + + assert_eq!(db.num_columns(), columns::v1::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB, 2).unwrap(); + + let db = Db::open(&paritydb_version_2_config(&path)).unwrap(); + + assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); + + assert_eq!( + db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), + Some("somevalue".as_bytes().to_vec()) + ); + + // Test we can write the new column. + db.commit(vec![( + COL_SESSION_WINDOW_DATA as u8, + b"1337".to_vec(), + Some(b"0xdeadb00b".to_vec()), + )]) + .unwrap(); + + // Read back data from new column. + assert_eq!( + db.get(COL_SESSION_WINDOW_DATA as u8, b"1337").unwrap(), + Some("0xdeadb00b".as_bytes().to_vec()) + ); + } #[test] fn test_rocksdb_migrate_1_to_2() { @@ -338,7 +537,7 @@ mod tests { // We need to properly set db version for upgrade to work. fs::write(version_file_path(db_dir.path()), "1").expect("Failed to write DB version"); { - let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); db.write(DBTransaction { ops: vec![DBOp::Insert { col: COL_DISPUTE_COORDINATOR_DATA, @@ -349,14 +548,14 @@ mod tests { .unwrap(); } - try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 2).unwrap(); let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS); - let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); assert_eq!( db.get(COL_DISPUTE_COORDINATOR_DATA, b"1234").unwrap(), @@ -380,6 +579,109 @@ mod tests { ); } + #[test] + fn test_migrate_3_to_4() { + use kvdb_rocksdb::{Database, DatabaseConfig}; + use polkadot_node_core_approval_voting::approval_db::v2::migration_helpers::v1_to_v2_sanity_check; + use polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + let db_cfg: DatabaseConfig = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); + + let approval_cfg = ApprovalDbConfig { + col_approval_data: crate::parachains_db::REAL_COLUMNS.col_approval_data, + }; + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(db_dir.path()), "3").expect("Failed to write DB version"); + let expected_candidates = { + let db = Database::open(&db_cfg, db_path).unwrap(); + assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS as u32); + let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + // Fill the approval voting column with test data. + v1_to_v2_fill_test_data(std::sync::Arc::new(db), approval_cfg, dummy_candidate_receipt) + .unwrap() + }; + + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 4).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v4::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); + + v1_to_v2_sanity_check(std::sync::Arc::new(db), approval_cfg, expected_candidates).unwrap(); + } + + #[test] + fn test_rocksdb_migrate_0_to_4() { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + + fs::write(version_file_path(db_dir.path()), "0").expect("Failed to write DB version"); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 4).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v4::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + + assert_eq!(db.num_columns(), columns::v4::NUM_COLUMNS); + } + + #[test] + fn test_paritydb_migrate_0_to_4() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "0").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); + assert_eq!(db.num_columns(), columns::v0::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB, 4).unwrap(); + + let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); + assert_eq!(db.num_columns(), columns::v4::NUM_COLUMNS as u8); + } + + #[test] + fn test_paritydb_migrate_2_to_3() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + let test_key = b"1337"; + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "2").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_2_config(&path)).unwrap(); + + // Write some dummy data + db.commit(vec![( + COL_SESSION_WINDOW_DATA as u8, + test_key.to_vec(), + Some(b"0xdeadb00b".to_vec()), + )]) + .unwrap(); + + assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB, 3).unwrap(); + + let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); + + assert_eq!(db.num_columns(), columns::v3::NUM_COLUMNS as u8); + } + #[test] fn test_rocksdb_migrate_2_to_3() { use kvdb_rocksdb::{Database, DatabaseConfig}; @@ -387,6 +689,7 @@ mod tests { let db_dir = tempfile::tempdir().unwrap(); let db_path = db_dir.path().to_str().unwrap(); let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + { let db = Database::open(&db_cfg, db_path).unwrap(); assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS as u32); @@ -395,7 +698,7 @@ mod tests { // We need to properly set db version for upgrade to work. fs::write(version_file_path(db_dir.path()), "2").expect("Failed to write DB version"); - try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 3).unwrap(); let db_cfg = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs index 189073783f0dc53eb06ceafe9015e9fd806b1f9e..5fae6a96de4ea73996042be8446220df51fc2e30 100644 --- a/polkadot/node/service/src/relay_chain_selection.rs +++ b/polkadot/node/service/src/relay_chain_selection.rs @@ -31,7 +31,7 @@ //! leaf returned from the chain selection subsystem by calling into other //! subsystems which yield information about approvals and disputes. //! -//! [chain-selection-guide]: https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html +//! [chain-selection-guide]: https://paritytech.github.io/polkadot-sdk/book/protocol-chain-selection.html #![cfg(feature = "full-node")] diff --git a/polkadot/node/service/src/tests.rs b/polkadot/node/service/src/tests.rs index 86119662d9bc08e7ed33999e1b7c78cd0749e154..26c8083185d84e9f18c27bfbc00779fce6a0b91e 100644 --- a/polkadot/node/service/src/tests.rs +++ b/polkadot/node/service/src/tests.rs @@ -17,7 +17,7 @@ use super::{relay_chain_selection::*, *}; use futures::channel::oneshot::Receiver; -use polkadot_node_primitives::approval::VrfSignature; +use polkadot_node_primitives::approval::v2::VrfSignature; use polkadot_node_subsystem::messages::{AllMessages, BlockDescription}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; diff --git a/polkadot/node/service/src/workers.rs b/polkadot/node/service/src/workers.rs index 5f7cc1c2ed49acf0bf170843033f2c5cdbc73701..b35bb8302fdc4405f542fd0cc119066653c76cc9 100644 --- a/polkadot/node/service/src/workers.rs +++ b/polkadot/node/service/src/workers.rs @@ -18,7 +18,7 @@ use super::Error; use is_executable::IsExecutable; -use std::{path::PathBuf, process::Command}; +use std::path::PathBuf; #[cfg(test)] use std::sync::{Mutex, OnceLock}; @@ -75,11 +75,7 @@ pub fn determine_workers_paths( // Do the version check. if let Some(node_version) = node_version { - let worker_version = Command::new(&prep_worker_path).args(["--version"]).output()?.stdout; - let worker_version = std::str::from_utf8(&worker_version) - .expect("version is printed as a string; qed") - .trim() - .to_string(); + let worker_version = polkadot_node_core_pvf::get_worker_version(&prep_worker_path)?; if worker_version != node_version { return Err(Error::WorkerBinaryVersionMismatch { worker_version, @@ -87,11 +83,8 @@ pub fn determine_workers_paths( worker_path: prep_worker_path, }) } - let worker_version = Command::new(&exec_worker_path).args(["--version"]).output()?.stdout; - let worker_version = std::str::from_utf8(&worker_version) - .expect("version is printed as a string; qed") - .trim() - .to_string(); + + let worker_version = polkadot_node_core_pvf::get_worker_version(&exec_worker_path)?; if worker_version != node_version { return Err(Error::WorkerBinaryVersionMismatch { worker_version, @@ -215,11 +208,11 @@ mod tests { use serial_test::serial; use std::{env::temp_dir, fs, os::unix::fs::PermissionsExt, path::Path}; - const NODE_VERSION: &'static str = "v0.1.2"; + const TEST_NODE_VERSION: &'static str = "v0.1.2"; /// Write a dummy executable to the path which satisfies the version check. fn write_worker_exe(path: impl AsRef) -> Result<(), Box> { - let program = get_program(NODE_VERSION); + let program = get_program(TEST_NODE_VERSION); fs::write(&path, program)?; Ok(fs::set_permissions(&path, fs::Permissions::from_mode(0o744))?) } @@ -287,7 +280,7 @@ echo {} // Try with provided workers path that has missing binaries. assert_matches!( - determine_workers_paths(Some(given_workers_path.clone()), None, Some(NODE_VERSION.into())), + determine_workers_paths(Some(given_workers_path.clone()), None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: Some(p1), current_exe_path: p2, workers_names: None }) if p1 == given_workers_path && p2 == exe_path ); @@ -299,7 +292,7 @@ echo {} write_worker_exe(&execute_worker_path)?; fs::set_permissions(&execute_worker_path, fs::Permissions::from_mode(0o644))?; assert_matches!( - determine_workers_paths(Some(given_workers_path.clone()), None, Some(NODE_VERSION.into())), + determine_workers_paths(Some(given_workers_path.clone()), None, Some(TEST_NODE_VERSION.into())), Err(Error::InvalidWorkerBinaries { prep_worker_path: p1, exec_worker_path: p2 }) if p1 == prepare_worker_path && p2 == execute_worker_path ); @@ -307,15 +300,15 @@ echo {} fs::set_permissions(&prepare_worker_path, fs::Permissions::from_mode(0o744))?; fs::set_permissions(&execute_worker_path, fs::Permissions::from_mode(0o744))?; assert_matches!( - determine_workers_paths(Some(given_workers_path), None, Some(NODE_VERSION.into())), + determine_workers_paths(Some(given_workers_path), None, Some(TEST_NODE_VERSION.into())), Ok((p1, p2)) if p1 == prepare_worker_path && p2 == execute_worker_path ); // Try with valid provided workers path that is a binary file. - let given_workers_path = tempdir.join("usr/local/bin/puppet-worker"); + let given_workers_path = tempdir.join("usr/local/bin/test-worker"); write_worker_exe(&given_workers_path)?; assert_matches!( - determine_workers_paths(Some(given_workers_path.clone()), None, Some(NODE_VERSION.into())), + determine_workers_paths(Some(given_workers_path.clone()), None, Some(TEST_NODE_VERSION.into())), Ok((p1, p2)) if p1 == given_workers_path && p2 == given_workers_path ); @@ -330,7 +323,7 @@ echo {} with_temp_dir_structure(|tempdir, exe_path| { // Try with both binaries missing. assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: None, current_exe_path: p, workers_names: None }) if p == exe_path ); @@ -338,7 +331,7 @@ echo {} let prepare_worker_path = tempdir.join("usr/bin/polkadot-prepare-worker"); write_worker_exe(&prepare_worker_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: None, current_exe_path: p, workers_names: None }) if p == exe_path ); @@ -347,7 +340,7 @@ echo {} let execute_worker_path = tempdir.join("usr/bin/polkadot-execute-worker"); write_worker_exe(&execute_worker_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: None, current_exe_path: p, workers_names: None }) if p == exe_path ); @@ -356,7 +349,7 @@ echo {} let prepare_worker_path = tempdir.join("usr/lib/polkadot/polkadot-prepare-worker"); write_worker_exe(&prepare_worker_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: None, current_exe_path: p, workers_names: None }) if p == exe_path ); @@ -365,7 +358,7 @@ echo {} let execute_worker_path = tempdir.join("usr/lib/polkadot/polkadot-execute-worker"); write_worker_exe(execute_worker_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Err(Error::MissingWorkerBinaries { given_workers_path: None, current_exe_path: p, workers_names: None }) if p == exe_path ); @@ -440,8 +433,8 @@ echo {} write_worker_exe_invalid_version(&prepare_worker_bin_path, bad_version)?; write_worker_exe(&execute_worker_bin_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), - Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == NODE_VERSION && p == prepare_worker_bin_path + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), + Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == TEST_NODE_VERSION && p == prepare_worker_bin_path ); // Workers at lib location return bad version. @@ -452,8 +445,8 @@ echo {} write_worker_exe(&prepare_worker_lib_path)?; write_worker_exe_invalid_version(&execute_worker_lib_path, bad_version)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), - Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == NODE_VERSION && p == execute_worker_lib_path + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), + Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == TEST_NODE_VERSION && p == execute_worker_lib_path ); // Workers at provided workers location return bad version. @@ -463,16 +456,16 @@ echo {} write_worker_exe_invalid_version(&prepare_worker_path, bad_version)?; write_worker_exe_invalid_version(&execute_worker_path, bad_version)?; assert_matches!( - determine_workers_paths(Some(given_workers_path), None, Some(NODE_VERSION.into())), - Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == NODE_VERSION && p == prepare_worker_path + determine_workers_paths(Some(given_workers_path), None, Some(TEST_NODE_VERSION.into())), + Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == TEST_NODE_VERSION && p == prepare_worker_path ); // Given worker binary returns bad version. - let given_workers_path = tempdir.join("usr/local/bin/puppet-worker"); + let given_workers_path = tempdir.join("usr/local/bin/test-worker"); write_worker_exe_invalid_version(&given_workers_path, bad_version)?; assert_matches!( - determine_workers_paths(Some(given_workers_path.clone()), None, Some(NODE_VERSION.into())), - Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == NODE_VERSION && p == given_workers_path + determine_workers_paths(Some(given_workers_path.clone()), None, Some(TEST_NODE_VERSION.into())), + Err(Error::WorkerBinaryVersionMismatch { worker_version: v1, node_version: v2, worker_path: p }) if v1 == bad_version && v2 == TEST_NODE_VERSION && p == given_workers_path ); Ok(()) @@ -492,7 +485,7 @@ echo {} write_worker_exe(&execute_worker_bin_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Ok((p1, p2)) if p1 == prepare_worker_bin_path && p2 == execute_worker_bin_path ); @@ -509,7 +502,7 @@ echo {} write_worker_exe(&execute_worker_lib_path)?; assert_matches!( - determine_workers_paths(None, None, Some(NODE_VERSION.into())), + determine_workers_paths(None, None, Some(TEST_NODE_VERSION.into())), Ok((p1, p2)) if p1 == prepare_worker_lib_path && p2 == execute_worker_lib_path ); diff --git a/polkadot/node/subsystem-test-helpers/src/mock.rs b/polkadot/node/subsystem-test-helpers/src/mock.rs index 35d74e27c9c1c237e6cb71382bae9c1cd730ccd6..522bc3c2cc4f4ec91357de9b373f3588a96bde33 100644 --- a/polkadot/node/subsystem-test-helpers/src/mock.rs +++ b/polkadot/node/subsystem-test-helpers/src/mock.rs @@ -16,7 +16,7 @@ use std::sync::Arc; -use polkadot_node_subsystem::{jaeger, ActivatedLeaf, LeafStatus}; +use polkadot_node_subsystem::{jaeger, ActivatedLeaf}; use sc_client_api::UnpinHandle; use sc_keystore::LocalKeystore; use sc_utils::mpsc::tracing_unbounded; @@ -55,7 +55,6 @@ pub fn new_leaf(hash: Hash, number: BlockNumber) -> ActivatedLeaf { ActivatedLeaf { hash, number, - status: LeafStatus::Fresh, unpin_handle: dummy_unpin_handle(hash), span: Arc::new(jaeger::Span::Disabled), } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index a1c00cb0652e2538c590d1c3892eed8a168e1014..9fd3775da5917dd7a571ae2e5f7acec7ecdb29ed 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -25,3 +25,4 @@ smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } thiserror = "1.0.48" async-trait = "0.1.57" +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/lib.rs b/polkadot/node/subsystem-types/src/lib.rs index 02651ace1e5b32829804c62cd2d192c9a0e35fd3..e3d6e4decf20e1c28e99b7d821d04421a1a2211b 100644 --- a/polkadot/node/subsystem-types/src/lib.rs +++ b/polkadot/node/subsystem-types/src/lib.rs @@ -51,35 +51,6 @@ pub use polkadot_node_jaeger as jaeger; /// If there are greater than this number of slots, then we fall back to a heap vector. const ACTIVE_LEAVES_SMALLVEC_CAPACITY: usize = 8; -/// The status of an activated leaf. -#[derive(Clone, Debug, PartialEq)] -pub enum LeafStatus { - /// A leaf is fresh when it's the first time the leaf has been encountered. - /// Most leaves should be fresh. - Fresh, - /// A leaf is stale when it's encountered for a subsequent time. This will happen - /// when the chain is reverted or the fork-choice rule abandons some chain. - Stale, -} - -impl LeafStatus { - /// Returns a `bool` indicating fresh status. - pub fn is_fresh(&self) -> bool { - match *self { - LeafStatus::Fresh => true, - LeafStatus::Stale => false, - } - } - - /// Returns a `bool` indicating stale status. - pub fn is_stale(&self) -> bool { - match *self { - LeafStatus::Fresh => false, - LeafStatus::Stale => true, - } - } -} - /// Activated leaf. #[derive(Debug, Clone)] pub struct ActivatedLeaf { @@ -87,8 +58,6 @@ pub struct ActivatedLeaf { pub hash: Hash, /// The block number. pub number: BlockNumber, - /// The status of the leaf. - pub status: LeafStatus, /// A handle to unpin the block on drop. pub unpin_handle: UnpinHandle, /// An associated [`jaeger::Span`]. diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 01ccee3add904b4b6179b99322f650646dbde152..44c6f27b17cca227b4517c4d6ff47cb3e13f05db 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -32,19 +32,22 @@ use polkadot_node_network_protocol::{ self as net_protocol, peer_set::PeerSet, request_response::Requests, PeerId, }; use polkadot_node_primitives::{ - approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, + approval::{ + v1::{BlockApprovalMeta, IndirectSignedApprovalVote}, + v2::{CandidateBitfield, IndirectAssignmentCertV2}, + }, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, SignedFullStatementWithPVD, SubmitCollationParams, ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, - CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, - CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -140,14 +143,18 @@ pub enum CandidateValidationMessage { /// /// If there is no state available which can provide this data or the core for /// the para is not free at the relay-parent, an error is returned. - ValidateFromChainState( - CandidateReceipt, - Arc, - ExecutorParams, - /// Execution timeout - PvfExecTimeoutKind, - oneshot::Sender>, - ), + ValidateFromChainState { + /// The candidate receipt + candidate_receipt: CandidateReceipt, + /// The proof-of-validity + pov: Arc, + /// Session's executor parameters + executor_params: ExecutorParams, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, + /// The sending side of the response channel + response_sender: oneshot::Sender>, + }, /// Validate a candidate with provided, exhaustive parameters for validation. /// /// Explicitly provide the `PersistedValidationData` and `ValidationCode` so this can do full @@ -157,27 +164,35 @@ pub enum CandidateValidationMessage { /// cases where the validity of the candidate is established. This is the case for the typical /// use-case: secondary checkers would use this request relying on the full prior checks /// performed by the relay-chain. - ValidateFromExhaustive( - PersistedValidationData, - ValidationCode, - CandidateReceipt, - Arc, - ExecutorParams, - /// Execution timeout - PvfExecTimeoutKind, - oneshot::Sender>, - ), + ValidateFromExhaustive { + /// Persisted validation data + validation_data: PersistedValidationData, + /// Validation code + validation_code: ValidationCode, + /// The candidate receipt + candidate_receipt: CandidateReceipt, + /// The proof-of-validity + pov: Arc, + /// Session's executor parameters + executor_params: ExecutorParams, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, + /// The sending side of the response channel + response_sender: oneshot::Sender>, + }, /// Try to compile the given validation code and send back /// the outcome. /// /// The validation code is specified by the hash and will be queried from the runtime API at /// the given relay-parent. - PreCheck( - // Relay-parent - Hash, - ValidationCodeHash, - oneshot::Sender, - ), + PreCheck { + /// Relay-parent + relay_parent: Hash, + /// Validation code hash + validation_code_hash: ValidationCodeHash, + /// The sending side of the response channel + response_sender: oneshot::Sender, + }, } /// Messages received by the Collator Protocol subsystem. @@ -695,12 +710,16 @@ pub enum RuntimeApiRequest { ), /// Get the minimum required backing votes. MinimumBackingVotes(SessionIndex, RuntimeApiSender), + /// Returns all disabled validators at a given block height. + DisabledValidators(RuntimeApiSender>), /// Get the backing state of the given para. ParaBackingState(ParaId, RuntimeApiSender>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. AsyncBackingParams(RuntimeApiSender), + /// Get the node features. + NodeFeatures(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -726,6 +745,12 @@ impl RuntimeApiRequest { /// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`. pub const ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT: u32 = 7; + + /// `DisabledValidators` + pub const DISABLED_VALIDATORS_RUNTIME_REQUIREMENT: u32 = 8; + + /// `Node features` + pub const NODE_FEATURES_RUNTIME_REQUIREMENT: u32 = 9; } /// A message to the Runtime API subsystem. @@ -836,6 +861,8 @@ pub enum AssignmentCheckError { InvalidCert(ValidatorIndex, String), #[error("Internal state mismatch: {0:?}, {1:?}")] Internal(Hash, CandidateHash), + #[error("Oversized candidate or core bitfield >= {0}")] + InvalidBitfield(usize), } /// The result type of [`ApprovalVotingMessage::CheckAndImportApproval`] request. @@ -901,8 +928,8 @@ pub enum ApprovalVotingMessage { /// Check if the assignment is valid and can be accepted by our view of the protocol. /// Should not be sent unless the block hash is known. CheckAndImportAssignment( - IndirectAssignmentCert, - CandidateIndex, + IndirectAssignmentCertV2, + CandidateBitfield, oneshot::Sender, ), /// Check if the approval vote is valid and can be accepted by our view of the @@ -937,7 +964,7 @@ pub enum ApprovalDistributionMessage { NewBlocks(Vec), /// Distribute an assignment cert from the local validator. The cert is assumed /// to be valid, relevant, and for the given relay-parent and validator index. - DistributeAssignment(IndirectAssignmentCert, CandidateIndex), + DistributeAssignment(IndirectAssignmentCertV2, CandidateBitfield), /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 3007e985b4f7b2ec9a561d65ae7435a06959c478..8369fd215f4f05e7e165e5d0629a187cf0202029 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,12 +16,12 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, runtime_api::ParachainHost, slashing, vstaging, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; @@ -255,6 +255,16 @@ pub trait RuntimeApiSubsystemClient { at: Hash, para_id: Id, ) -> Result, ApiError>; + + // === v8 === + + /// Gets the disabled validators at a specific block height + async fn disabled_validators(&self, at: Hash) -> Result, ApiError>; + + // === v9 === + + /// Get the node features. + async fn node_features(&self, at: Hash) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -497,11 +507,18 @@ where self.client.runtime_api().para_backing_state(at, para_id) } - /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. async fn async_backing_params( &self, at: Hash, ) -> Result { self.client.runtime_api().async_backing_params(at) } + + async fn node_features(&self, at: Hash) -> Result { + self.client.runtime_api().node_features(at) + } + + async fn disabled_validators(&self, at: Hash) -> Result, ApiError> { + self.client.runtime_api().disabled_validators(at) + } } diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index 57e4f9cde09af09374214a01b570be5e25cdfa4d..a5f3e9d4a0c0e08ebb3b738054ae758f1c98e822 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -226,6 +226,7 @@ specialize_requests! { fn request_unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; UnappliedSlashes; fn request_key_ownership_proof(validator_id: ValidatorId) -> Option; KeyOwnershipProof; fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; + fn request_disabled_validators() -> Vec; DisabledValidators; fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; } diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 8d7cef88a70e082779cd6e0892502f27e486c7c6..aada7a5d77abb5470294e220604a7b4e0c523773 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + slashing, vstaging::NodeFeatures, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, + EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; @@ -507,3 +507,32 @@ pub async fn request_min_backing_votes( min_backing_votes_res } } + +/// Request the node features enabled in the runtime. +/// Pass in the session index for caching purposes, as it should only change on session boundaries. +/// Prior to runtime API version 9, just return `None`. +pub async fn request_node_features( + parent: Hash, + session_index: SessionIndex, + sender: &mut impl overseer::SubsystemSender, +) -> Result> { + let res = recv_runtime( + request_from_runtime(parent, sender, |tx| { + RuntimeApiRequest::NodeFeatures(session_index, tx) + }) + .await, + ) + .await; + + if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res { + gum::trace!( + target: LOG_TARGET, + ?parent, + "Querying the node features from the runtime is not supported by the current Runtime API", + ); + + Ok(None) + } else { + res.map(Some) + } +} diff --git a/polkadot/node/subsystem/src/lib.rs b/polkadot/node/subsystem/src/lib.rs index df379f2d97b6ab9e2ac29608d64e145638e559b9..8b407c75a0c82722419920e3288e2f83d8fb8419 100644 --- a/polkadot/node/subsystem/src/lib.rs +++ b/polkadot/node/subsystem/src/lib.rs @@ -28,7 +28,7 @@ pub use polkadot_overseer::{self as overseer, *}; pub use polkadot_node_subsystem_types::{ errors::{self, *}, - ActivatedLeaf, LeafStatus, + ActivatedLeaf, }; /// Re-export of all messages type, including the wrapper type. diff --git a/polkadot/node/test/client/src/block_builder.rs b/polkadot/node/test/client/src/block_builder.rs index b4ff050ff1529f29886f281a1a0f0ebffed2a13a..57e6008917af9cf6a5ceb6d173f680f9d90e5ebb 100644 --- a/polkadot/node/test/client/src/block_builder.rs +++ b/polkadot/node/test/client/src/block_builder.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::{Client, FullBackend}; +use crate::Client; use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::{Block, InherentData as ParachainsInherentData}; use polkadot_test_runtime::UncheckedExtrinsic; use polkadot_test_service::GetLastTimestamp; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; use sp_api::ProvideRuntimeApi; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, @@ -34,9 +34,7 @@ pub trait InitPolkadotBlockBuilder { /// /// This will automatically create and push the inherents for you to make the block valid for /// the test runtime. - fn init_polkadot_block_builder( - &self, - ) -> sc_block_builder::BlockBuilder; + fn init_polkadot_block_builder(&self) -> sc_block_builder::BlockBuilder; /// Init a Polkadot specific block builder at a specific block that works for the test runtime. /// @@ -45,11 +43,11 @@ pub trait InitPolkadotBlockBuilder { fn init_polkadot_block_builder_at( &self, hash: ::Hash, - ) -> sc_block_builder::BlockBuilder; + ) -> sc_block_builder::BlockBuilder; } impl InitPolkadotBlockBuilder for Client { - fn init_polkadot_block_builder(&self) -> BlockBuilder { + fn init_polkadot_block_builder(&self) -> BlockBuilder { let chain_info = self.chain_info(); self.init_polkadot_block_builder_at(chain_info.best_hash) } @@ -57,7 +55,7 @@ impl InitPolkadotBlockBuilder for Client { fn init_polkadot_block_builder_at( &self, hash: ::Hash, - ) -> BlockBuilder { + ) -> BlockBuilder { let last_timestamp = self.runtime_api().get_last_timestamp(hash).expect("Get last timestamp"); @@ -90,8 +88,12 @@ impl InitPolkadotBlockBuilder for Client { )], }; - let mut block_builder = self - .new_block_at(hash, digest, false) + let mut block_builder = BlockBuilderBuilder::new(self) + .on_parent_block(hash) + .fetch_parent_block_number(&self) + .expect("Fetches parent block number") + .with_inherent_digests(digest) + .build() .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); @@ -144,7 +146,7 @@ pub trait BlockBuilderExt { ) -> Result<(), sp_blockchain::Error>; } -impl BlockBuilderExt for BlockBuilder<'_, Block, Client, FullBackend> { +impl BlockBuilderExt for BlockBuilder<'_, Block, Client> { fn push_polkadot_extrinsic( &mut self, ext: UncheckedExtrinsic, diff --git a/polkadot/node/test/client/src/lib.rs b/polkadot/node/test/client/src/lib.rs index 5d97ffcdf1da4d4ec42ead64f9b8e78d98227368..6b205c09f2f3297cb8b53bfb62cc80a1ed25c3a7 100644 --- a/polkadot/node/test/client/src/lib.rs +++ b/polkadot/node/test/client/src/lib.rs @@ -51,7 +51,7 @@ pub struct GenesisParameters; impl substrate_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> Storage { - polkadot_test_service::chain_spec::polkadot_local_testnet_genesis() + polkadot_test_service::chain_spec::polkadot_local_testnet_config() .build_storage() .expect("Builds test runtime genesis storage") } diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 1924418a48565e12fa5f1b628943f604ba9c4116..437fa66b75a2218efdc7069ab5fe001db3b71c52 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -11,7 +11,7 @@ futures = "0.3.21" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" -serde_json = "1.0.106" +serde_json = "1.0.108" tempfile = "3.2.0" tokio = "1.24.2" @@ -59,7 +59,6 @@ substrate-test-client = { path = "../../../../substrate/test-utils/client" } [dev-dependencies] pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -serde_json = "1.0.107" substrate-test-utils = { path = "../../../../substrate/test-utils" } tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index bedb1250b2a9853a7bcf96867b717cee92f272c5..0295090b9521551533d012addeda064595dbeac3 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -31,8 +31,7 @@ use test_runtime_constants::currency::DOTS; const DEFAULT_PROTOCOL_ID: &str = "dot"; /// The `ChainSpec` parameterized for polkadot test runtime. -pub type PolkadotChainSpec = - sc_service::GenericChainSpec; +pub type PolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; /// Returns the properties for the [`PolkadotChainSpec`]. pub fn polkadot_chain_spec_properties() -> serde_json::map::Map { @@ -46,22 +45,21 @@ pub fn polkadot_chain_spec_properties() -> serde_json::map::Map PolkadotChainSpec { - PolkadotChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - || polkadot_local_testnet_genesis(), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - Some(polkadot_chain_spec_properties()), + PolkadotChainSpec::builder( + polkadot_test_runtime::WASM_BINARY.expect("Wasm binary must be built for testing"), Default::default(), ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(polkadot_local_testnet_genesis()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .with_properties(polkadot_chain_spec_properties()) + .build() } /// Local testnet genesis config (multivalidator Alice + Bob) -pub fn polkadot_local_testnet_genesis() -> polkadot_test_runtime::RuntimeGenesisConfig { +pub fn polkadot_local_testnet_genesis() -> serde_json::Value { polkadot_testnet_genesis( vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], get_account_id_from_seed::("Alice"), @@ -114,7 +112,7 @@ fn polkadot_testnet_genesis( )>, root_key: AccountId, endowed_accounts: Option>, -) -> polkadot_test_runtime::RuntimeGenesisConfig { +) -> serde_json::Value { use polkadot_test_runtime as runtime; let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); @@ -122,17 +120,12 @@ fn polkadot_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * DOTS; const STASH: u128 = 100 * DOTS; - runtime::RuntimeGenesisConfig { - system: runtime::SystemConfig { - code: runtime::WASM_BINARY.expect("Wasm binary must be built for testing").to_vec(), - ..Default::default() - }, - indices: runtime::IndicesConfig { indices: vec![] }, - balances: runtime::BalancesConfig { - balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, - session: runtime::SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -149,33 +142,23 @@ fn polkadot_testnet_genesis( }) .collect::>(), }, - staking: runtime::StakingConfig { - minimum_validator_count: 1, - validator_count: 2, - stakers: initial_authorities + "staking": { + "minimumValidatorCount": 1, + "validatorCount": 2, + "stakers": initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, runtime::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::NotForcing, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: runtime::BabeConfig { - authorities: vec![], - epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() + .map(|x| (x.0.clone(), x.0.clone(), STASH, runtime::StakerStatus::::Validator)) + .collect::>(), + "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + "forceEra": Forcing::NotForcing, + "slashRewardFraction": Perbill::from_percent(10), }, - grandpa: Default::default(), - authority_discovery: runtime::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() + "babe": { + "epochConfig": Some(BABE_GENESIS_EPOCH_CONFIG), }, - claims: runtime::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: runtime::VestingConfig { vesting: vec![] }, - sudo: runtime::SudoConfig { key: Some(root_key) }, - configuration: runtime::ConfigurationConfig { - config: polkadot_runtime_parachains::configuration::HostConfiguration { + "sudo": { "key": Some(root_key) }, + "configuration": { + "config": polkadot_runtime_parachains::configuration::HostConfiguration { validation_upgrade_cooldown: 10u32, validation_upgrade_delay: 5, code_retention_period: 1200, @@ -188,8 +171,8 @@ fn polkadot_testnet_genesis( minimum_validation_upgrade_delay: 5, ..Default::default() }, - }, - } + } + }) } /// Can be called for a `Configuration` to check if it is a configuration for the `Test` network. diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b1b330913440bdae890c4b0a6092d0c191ff0ced --- /dev/null +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "staging-tracking-allocator" +description = "Tracking allocator to control the amount of memory consumed by the process" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true diff --git a/polkadot/node/tracking-allocator/src/lib.rs b/polkadot/node/tracking-allocator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab8597b5c382d80dc90e55d7f2a2e0ef2c906ea6 --- /dev/null +++ b/polkadot/node/tracking-allocator/src/lib.rs @@ -0,0 +1,247 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tracking/limiting global allocator. Calculates the peak allocation between two checkpoints for +//! the whole process. Accepts an optional limit and a failure handler which is called if the limit +//! is overflown. + +use core::{ + alloc::{GlobalAlloc, Layout}, + ops::{Deref, DerefMut}, +}; +use std::{ + cell::UnsafeCell, + ptr::null_mut, + sync::atomic::{AtomicBool, Ordering}, +}; + +struct Spinlock { + lock: AtomicBool, + data: UnsafeCell, +} + +struct SpinlockGuard<'a, T: 'a> { + lock: &'a Spinlock, +} + +// SAFETY: We require that the data inside of the `SpinLock` is `Send`, so it can be sent +// and accessed by any thread as long as it's accessed by only one thread at a time. +// The `SpinLock` provides an exclusive lock over it, so it guarantees that multiple +// threads cannot access it at the same time, hence it implements `Sync` (that is, it can be +// accessed concurrently from multiple threads, even though the `T` itself might not +// necessarily be `Sync` too). +unsafe impl Sync for Spinlock {} + +impl Spinlock { + pub const fn new(t: T) -> Spinlock { + Spinlock { lock: AtomicBool::new(false), data: UnsafeCell::new(t) } + } + + #[inline] + pub fn lock(&self) -> SpinlockGuard { + loop { + // Try to acquire the lock. + if self + .lock + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + return SpinlockGuard { lock: self } + } + // We failed to acquire the lock; wait until it's unlocked. + // + // In theory this should result in less coherency traffic as unlike `compare_exchange` + // it is a read-only operation, so multiple cores can execute it simultaneously + // without taking an exclusive lock over the cache line. + while self.lock.load(Ordering::Relaxed) { + std::hint::spin_loop(); + } + } + } + + // SAFETY: It should be only called from the guard's destructor. Calling it explicitly while + // the guard is alive is undefined behavior, as it breaks the security contract of `Deref` and + // `DerefMut`, which implies that lock is held at the moment of dereferencing. + #[inline] + unsafe fn unlock(&self) { + self.lock.store(false, Ordering::Release); + } +} + +impl Deref for SpinlockGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: It is safe to dereference a guard to the `UnsafeCell` underlying data as the + // presence of the guard means the data is already locked. + unsafe { &*self.lock.data.get() } + } +} + +impl DerefMut for SpinlockGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: Same as for `Deref::deref`. + unsafe { &mut *self.lock.data.get() } + } +} + +impl Drop for SpinlockGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: Calling `unlock` is only safe when it's guaranteed no guard outlives the + // unlocking point; here, the guard is dropped, so it is safe. + unsafe { self.lock.unlock() } + } +} + +struct TrackingAllocatorData { + current: isize, + peak: isize, + limit: isize, + failure_handler: Option>, +} + +impl TrackingAllocatorData { + fn start_tracking( + mut guard: SpinlockGuard, + limit: isize, + failure_handler: Option>, + ) { + guard.current = 0; + guard.peak = 0; + guard.limit = limit; + // Cannot drop it yet, as it would trigger a deallocation + let old_handler = guard.failure_handler.take(); + guard.failure_handler = failure_handler; + drop(guard); + drop(old_handler); + } + + fn end_tracking(mut guard: SpinlockGuard) -> isize { + let peak = guard.peak; + guard.limit = 0; + // Cannot drop it yet, as it would trigger a deallocation + let old_handler = guard.failure_handler.take(); + drop(guard); + drop(old_handler); + peak + } + + #[inline] + fn track_and_check_limits( + mut guard: SpinlockGuard, + alloc: isize, + ) -> Option> { + guard.current += alloc; + if guard.current > guard.peak { + guard.peak = guard.current; + } + if guard.limit == 0 || guard.peak <= guard.limit { + None + } else { + Some(guard) + } + } +} + +static ALLOCATOR_DATA: Spinlock = + Spinlock::new(TrackingAllocatorData { current: 0, peak: 0, limit: 0, failure_handler: None }); + +pub struct TrackingAllocator(pub A); + +impl TrackingAllocator { + /// Start tracking memory allocations and deallocations. + /// + /// # Safety + /// + /// Failure handler is called with the allocator being in the locked state. Thus, no + /// allocations or deallocations are allowed inside the failure handler; otherwise, a + /// deadlock will occur. + pub unsafe fn start_tracking( + &self, + limit: Option, + failure_handler: Option>, + ) { + TrackingAllocatorData::start_tracking( + ALLOCATOR_DATA.lock(), + limit.unwrap_or(0), + failure_handler, + ); + } + + /// End tracking and return the peak allocation value in bytes (as `isize`). Peak allocation + /// value is not guaranteed to be neither non-zero nor positive. + pub fn end_tracking(&self) -> isize { + TrackingAllocatorData::end_tracking(ALLOCATOR_DATA.lock()) + } +} + +#[cold] +#[inline(never)] +unsafe fn fail_allocation(guard: SpinlockGuard) -> *mut u8 { + if let Some(failure_handler) = &guard.failure_handler { + failure_handler() + } + null_mut() +} + +unsafe impl GlobalAlloc for TrackingAllocator { + // SAFETY: + // * The wrapped methods are as safe as the underlying allocator implementation is + + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let guard = ALLOCATOR_DATA.lock(); + if let Some(guard) = + TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize) + { + fail_allocation(guard) + } else { + self.0.alloc(layout) + } + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let guard = ALLOCATOR_DATA.lock(); + if let Some(guard) = + TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize) + { + fail_allocation(guard) + } else { + self.0.alloc_zeroed(layout) + } + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) -> () { + let guard = ALLOCATOR_DATA.lock(); + TrackingAllocatorData::track_and_check_limits(guard, -(layout.size() as isize)); + self.0.dealloc(ptr, layout) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let guard = ALLOCATOR_DATA.lock(); + if let Some(guard) = TrackingAllocatorData::track_and_check_limits( + guard, + (new_size as isize) - (layout.size() as isize), + ) { + fail_allocation(guard) + } else { + self.0.realloc(ptr, layout, new_size) + } + } +} diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 9bf56b550bbcd7803806bf37d2a40b9ac5a92e01..c1b08b4a2bb94a0a5fce172f54f64dcdf3cd653c 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -19,4 +19,4 @@ reqwest = { version = "0.11", features = ["rustls-tls"], default-features = fals thiserror = "1.0.48" gum = { package = "tracing-gum", path = "../gum" } serde = { version = "1.0", features = ["derive"] } -serde_json = "1" +serde_json = "1.0.108" diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index c44ba02e3aeb0f7ada07698ab69703e9ff2ecae7..27aa117a87f0bb932617cf159bd9e8814ed87a3d 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -11,11 +11,11 @@ version = "1.0.0" # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } -frame-support = { path = "../../substrate/frame/support", default-features = false } +sp-weights = { path = "../../substrate/primitives/weights", default-features = false } polkadot-core-primitives = { path = "../core-primitives", default-features = false } derive_more = "0.99.11" bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } @@ -28,7 +28,6 @@ default = [ "std" ] wasm-api = [] std = [ "bounded-collections/std", - "frame-support/std", "parity-scale-codec/std", "polkadot-core-primitives/std", "scale-info/std", @@ -36,8 +35,6 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-weights/std", ] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] +runtime-benchmarks = [ "sp-runtime/runtime-benchmarks" ] diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 5f77810f5c235adffc5bde8c377b1058ef7a8644..5a1efdf898217a18972cab8c9ea49a41e5a2280f 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -20,12 +20,12 @@ use sp_std::vec::Vec; use bounded_collections::{BoundedVec, ConstU32}; -use frame_support::weights::Weight; use parity_scale_codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_core::{bytes, RuntimeDebug, TypeId}; use sp_runtime::traits::Hash as _; +use sp_weights::Weight; use polkadot_core_primitives::{Hash, OutboundHrmpMessage}; @@ -333,7 +333,7 @@ impl DmpMessageHandler for () { } /// The aggregate XCMP message format. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo, RuntimeDebug)] pub enum XcmpMessageFormat { /// Encoded `VersionedXcm` messages, all concatenated. ConcatenatedVersionedXcm, diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index a30be9c678af83871182389e2c391e041c61bdc1..3252d1f83cd3ab2e72b9fb8945ee14b060c037f8 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -19,4 +19,4 @@ sp-core = { path = "../../../substrate/primitives/core" } [features] default = [ "std" ] -std = [ "adder/std", "halt/std" ] +std = [ "adder/std", "halt/std", "parity-scale-codec/std" ] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index f22b5ccfdcc9894bd1e6cd29212648a1cdd58e16..1a47328b28e9caedc93b99d8c752f7fd0a397342 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -23,4 +23,4 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] default = [ "std" ] -std = [ "parachain/std", "sp-io/std", "sp-std/std" ] +std = [ "parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std" ] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 8690da5a4bdd36ffce083032bed0200d2c9960d7..273eef4b63a0e8de50ff4a9e1b4ecb3762510d85 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -24,4 +24,10 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] default = [ "std" ] -std = [ "parachain/std", "sp-io/std", "sp-std/std" ] +std = [ + "log/std", + "parachain/std", + "parity-scale-codec/std", + "sp-io/std", + "sp-std/std", +] diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 4569d4e153b19b6858f26acc77089d027e18be13..578c3d6715dc30c1130833680465a0398f4c495c 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -39,3 +39,6 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs index cd16133dbf197019bac15e0bca381759eabce5b6..d04122f2f6898a053d5cb74902fa840341465038 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs @@ -18,6 +18,7 @@ use clap::Parser; use sc_cli::SubstrateCli; +use std::path::PathBuf; /// Sub-commands supported by the collator. #[derive(Debug, Parser)] @@ -34,6 +35,10 @@ pub enum Subcommand { /// Command for exporting the genesis state of the parachain #[derive(Debug, Parser)] pub struct ExportGenesisStateCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, + /// Id of the parachain this collator collates for. #[arg(long, default_value_t = 100)] pub parachain_id: u32, @@ -50,7 +55,11 @@ pub struct ExportGenesisStateCommand { /// Command for exporting the genesis wasm file. #[derive(Debug, Parser)] -pub struct ExportGenesisWasmCommand {} +pub struct ExportGenesisWasmCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, +} #[allow(missing_docs)] #[derive(Debug, Parser)] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index e564e221f01376ff8b573ec6a1fd1f2937156f77..c6b338a20dc051f1a899a51eccb64a24a72663f5 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -22,6 +22,10 @@ use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProt use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; use sp_core::hexdisplay::HexDisplay; +use std::{ + fs, + io::{self, Write}, +}; use test_parachain_undying_collator::Collator; mod cli; @@ -35,14 +39,30 @@ fn main() -> Result<()> { // `pov_size` and `pvf_complexity` need to match the ones that we start the collator // with. let collator = Collator::new(params.pov_size, params.pvf_complexity); - println!("0x{:?}", HexDisplay::from(&collator.genesis_head())); + + let output_buf = + format!("0x{:?}", HexDisplay::from(&collator.genesis_head())).into_bytes(); + + if let Some(output) = params.output { + std::fs::write(output, output_buf)?; + } else { + std::io::stdout().write_all(&output_buf)?; + } Ok::<_, Error>(()) }, - Some(cli::Subcommand::ExportGenesisWasm(_params)) => { + Some(cli::Subcommand::ExportGenesisWasm(params)) => { // We pass some dummy values for `pov_size` and `pvf_complexity` as these don't // matter for `wasm` export. - println!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())); + let output_buf = + format!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())) + .into_bytes(); + + if let Some(output) = params.output { + fs::write(output, output_buf)?; + } else { + io::stdout().write_all(&output_buf)?; + } Ok(()) }, diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 9d17b70b817432fae385d5b96a30105d87a77e68..316644a372d60519b4568c3f27310bf935f90643 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -4,12 +4,13 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Shared primitives used by Polkadot runtime" [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 5adb6d25313475cb7ca389d80ba510edecdc58c4..2570bcadf606ab8ef0809a1f258a6068263f1db1 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -35,24 +35,24 @@ pub mod runtime_api; // Primitives requiring versioning must not be exported and must be referred by an exact version. pub use v6::{ async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, - effective_minimum_backing_votes, metric_definitions, slashing, supermajority_threshold, - well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, AccountId, AccountIndex, - AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, - AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, - CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, - CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, - CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, - GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, - InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, - InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, - OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + effective_minimum_backing_votes, executor_params, metric_definitions, slashing, + supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, + AccountId, AccountIndex, AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, + AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, + BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, + CandidateIndex, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, + CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, + EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, + HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, + InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, + OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, + PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, + RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, + RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, + Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, + SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 6cb66d40204d124da3f51c966dbe7c65348ecb31..e4c1d590f4573a676c02630fd83d0c3b46bfafe1 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,10 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + async_backing, slashing, vstaging, AsyncBackingParams, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; @@ -248,6 +248,7 @@ sp_api::decl_runtime_apis! { #[api_version(6)] fn minimum_backing_votes() -> u32; + /***** Added in v7: Asynchronous backing *****/ /// Returns the state of parachain backing for a given para. @@ -257,5 +258,18 @@ sp_api::decl_runtime_apis! { /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. #[api_version(7)] fn async_backing_params() -> AsyncBackingParams; + + /***** Added in v8 *****/ + + /// Returns a list of all disabled validators at the given block. + #[api_version(8)] + fn disabled_validators() -> Vec; + + /***** Added in v9 *****/ + + /// Get node features. + /// This is a staging method! Do not use on production runtimes! + #[api_version(9)] + fn node_features() -> vstaging::NodeFeatures; } } diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index 6fbf3037fd6cd8c99780edda33763817dfb11941..112a529f62b0570e9270d1f09e397e6b9dc358b0 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -21,40 +21,106 @@ //! by the first element of the vector). Decoding to a usable semantics structure is //! done in `polkadot-node-core-pvf`. -use crate::{BlakeTwo256, HashT as _, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives::Hash; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; -use sp_std::{ops::Deref, time::Duration, vec, vec::Vec}; +use sp_std::{collections::btree_map::BTreeMap, ops::Deref, time::Duration, vec, vec::Vec}; + +/// Default maximum number of wasm values allowed for the stack during execution of a PVF. +pub const DEFAULT_LOGICAL_STACK_MAX: u32 = 65536; +/// Default maximum number of bytes devoted for the stack during execution of a PVF. +pub const DEFAULT_NATIVE_STACK_MAX: u32 = 256 * 1024 * 1024; + +/// The limit of [`ExecutorParam::MaxMemoryPages`]. +pub const MEMORY_PAGES_MAX: u32 = 65536; +/// The lower bound of [`ExecutorParam::StackLogicalMax`]. +pub const LOGICAL_MAX_LO: u32 = 1024; +/// The upper bound of [`ExecutorParam::StackLogicalMax`]. +pub const LOGICAL_MAX_HI: u32 = 2 * 65536; +/// The lower bound of [`ExecutorParam::PrecheckingMaxMemory`]. +pub const PRECHECK_MEM_MAX_LO: u64 = 256 * 1024 * 1024; +/// The upper bound of [`ExecutorParam::PrecheckingMaxMemory`]. +pub const PRECHECK_MEM_MAX_HI: u64 = 16 * 1024 * 1024 * 1024; + +// Default PVF timeouts. Must never be changed! Use executor environment parameters to adjust them. +// See also `PvfPrepKind` and `PvfExecKind` docs. + +/// Default PVF preparation timeout for prechecking requests. +pub const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60); +/// Default PVF preparation timeout for execution requests. +pub const DEFAULT_LENIENT_PREPARATION_TIMEOUT: Duration = Duration::from_secs(360); +/// Default PVF execution timeout for backing. +pub const DEFAULT_BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2); +/// Default PVF execution timeout for approval or disputes. +pub const DEFAULT_APPROVAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(12); + +const DEFAULT_PRECHECK_PREPARATION_TIMEOUT_MS: u64 = + DEFAULT_PRECHECK_PREPARATION_TIMEOUT.as_millis() as u64; +const DEFAULT_LENIENT_PREPARATION_TIMEOUT_MS: u64 = + DEFAULT_LENIENT_PREPARATION_TIMEOUT.as_millis() as u64; +const DEFAULT_BACKING_EXECUTION_TIMEOUT_MS: u64 = + DEFAULT_BACKING_EXECUTION_TIMEOUT.as_millis() as u64; +const DEFAULT_APPROVAL_EXECUTION_TIMEOUT_MS: u64 = + DEFAULT_APPROVAL_EXECUTION_TIMEOUT.as_millis() as u64; /// The different executor parameters for changing the execution environment semantics. #[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize)] pub enum ExecutorParam { /// Maximum number of memory pages (64KiB bytes per page) the executor can allocate. + /// A valid value lies within (0, 65536]. #[codec(index = 1)] MaxMemoryPages(u32), - /// Wasm logical stack size limit (max. number of Wasm values on stack) + /// Wasm logical stack size limit (max. number of Wasm values on stack). + /// A valid value lies within [[`LOGICAL_MAX_LO`], [`LOGICAL_MAX_HI`]]. + /// + /// For WebAssembly, the stack limit is subject to implementations, meaning that it may vary on + /// different platforms. However, we want execution to be deterministic across machines of + /// different architectures, including failures like stack overflow. For deterministic + /// overflow, we rely on a **logical** limit, the maximum number of values allowed to be pushed + /// on the stack. #[codec(index = 2)] StackLogicalMax(u32), - /// Executor machine stack size limit, in bytes + /// Executor machine stack size limit, in bytes. + /// If `StackLogicalMax` is also present, a valid value should not fall below + /// 128 * `StackLogicalMax`. + /// + /// For deterministic overflow, `StackLogicalMax` should be reached before the native stack is + /// exhausted. #[codec(index = 3)] StackNativeMax(u32), /// Max. amount of memory the preparation worker is allowed to use during - /// pre-checking, in bytes + /// pre-checking, in bytes. + /// Valid max. memory ranges from [`PRECHECK_MEM_MAX_LO`] to [`PRECHECK_MEM_MAX_HI`]. #[codec(index = 4)] PrecheckingMaxMemory(u64), - /// PVF preparation timeouts, millisec + /// PVF preparation timeouts, in millisecond. + /// Always ensure that `precheck_timeout` < `lenient_timeout`. + /// When absent, the default values will be used. #[codec(index = 5)] - PvfPrepTimeout(PvfPrepTimeoutKind, u64), - /// PVF execution timeouts, millisec + PvfPrepTimeout(PvfPrepKind, u64), + /// PVF execution timeouts, in millisecond. + /// Always ensure that `backing_timeout` < `approval_timeout`. + /// When absent, the default values will be used. #[codec(index = 6)] - PvfExecTimeout(PvfExecTimeoutKind, u64), + PvfExecTimeout(PvfExecKind, u64), /// Enables WASM bulk memory proposal #[codec(index = 7)] WasmExtBulkMemory, } +/// Possible inconsistencies of executor params. +#[derive(Debug)] +pub enum ExecutorParamError { + /// A param is duplicated. + DuplicatedParam(&'static str), + /// A param value exceeds its limitation. + OutsideLimit(&'static str), + /// Two param values are incompatible or senseless when put together. + IncompatibleValues(&'static str, &'static str), +} + /// Unit type wrapper around [`type@Hash`] that represents an execution parameter set hash. /// /// This type is produced by [`ExecutorParams::hash`]. @@ -108,7 +174,7 @@ impl ExecutorParams { } /// Returns a PVF preparation timeout, if any - pub fn pvf_prep_timeout(&self, kind: PvfPrepTimeoutKind) -> Option { + pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfPrepTimeout(k, timeout) = param { if kind == *k { @@ -120,7 +186,7 @@ impl ExecutorParams { } /// Returns a PVF execution timeout, if any - pub fn pvf_exec_timeout(&self, kind: PvfExecTimeoutKind) -> Option { + pub fn pvf_exec_timeout(&self, kind: PvfExecKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfExecTimeout(k, timeout) = param { if kind == *k { @@ -130,6 +196,129 @@ impl ExecutorParams { } None } + + /// Returns pre-checking memory limit, if any + pub fn prechecking_max_memory(&self) -> Option { + for param in &self.0 { + if let ExecutorParam::PrecheckingMaxMemory(limit) = param { + return Some(*limit) + } + } + None + } + + /// Check params coherence. + pub fn check_consistency(&self) -> Result<(), ExecutorParamError> { + use ExecutorParam::*; + use ExecutorParamError::*; + + let mut seen = BTreeMap::<&str, u64>::new(); + + macro_rules! check { + ($param:ident, $val:expr $(,)?) => { + if seen.contains_key($param) { + return Err(DuplicatedParam($param)) + } + seen.insert($param, $val as u64); + }; + + // should check existence before range + ($param:ident, $val:expr, $out_of_limit:expr $(,)?) => { + if seen.contains_key($param) { + return Err(DuplicatedParam($param)) + } + if $out_of_limit { + return Err(OutsideLimit($param)) + } + seen.insert($param, $val as u64); + }; + } + + for param in &self.0 { + // should ensure to be unique + let param_ident = match *param { + MaxMemoryPages(_) => "MaxMemoryPages", + StackLogicalMax(_) => "StackLogicalMax", + StackNativeMax(_) => "StackNativeMax", + PrecheckingMaxMemory(_) => "PrecheckingMaxMemory", + PvfPrepTimeout(kind, _) => match kind { + PvfPrepKind::Precheck => "PvfPrepKind::Precheck", + PvfPrepKind::Prepare => "PvfPrepKind::Prepare", + }, + PvfExecTimeout(kind, _) => match kind { + PvfExecKind::Backing => "PvfExecKind::Backing", + PvfExecKind::Approval => "PvfExecKind::Approval", + }, + WasmExtBulkMemory => "WasmExtBulkMemory", + }; + + match *param { + MaxMemoryPages(val) => { + check!(param_ident, val, val == 0 || val > MEMORY_PAGES_MAX,); + }, + + StackLogicalMax(val) => { + check!(param_ident, val, val < LOGICAL_MAX_LO || val > LOGICAL_MAX_HI,); + }, + + StackNativeMax(val) => { + check!(param_ident, val); + }, + + PrecheckingMaxMemory(val) => { + check!( + param_ident, + val, + val < PRECHECK_MEM_MAX_LO || val > PRECHECK_MEM_MAX_HI, + ); + }, + + PvfPrepTimeout(_, val) => { + check!(param_ident, val); + }, + + PvfExecTimeout(_, val) => { + check!(param_ident, val); + }, + + WasmExtBulkMemory => { + check!(param_ident, 1); + }, + } + } + + if let (Some(lm), Some(nm)) = ( + seen.get("StackLogicalMax").or(Some(&(DEFAULT_LOGICAL_STACK_MAX as u64))), + seen.get("StackNativeMax").or(Some(&(DEFAULT_NATIVE_STACK_MAX as u64))), + ) { + if *nm < 128 * *lm { + return Err(IncompatibleValues("StackLogicalMax", "StackNativeMax")) + } + } + + if let (Some(precheck), Some(lenient)) = ( + seen.get("PvfPrepKind::Precheck") + .or(Some(&DEFAULT_PRECHECK_PREPARATION_TIMEOUT_MS)), + seen.get("PvfPrepKind::Prepare") + .or(Some(&DEFAULT_LENIENT_PREPARATION_TIMEOUT_MS)), + ) { + if *precheck >= *lenient { + return Err(IncompatibleValues("PvfPrepKind::Precheck", "PvfPrepKind::Prepare")) + } + } + + if let (Some(backing), Some(approval)) = ( + seen.get("PvfExecKind::Backing").or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)), + seen.get("PvfExecKind::Approval") + .or(Some(&DEFAULT_APPROVAL_EXECUTION_TIMEOUT_MS)), + ) { + if *backing >= *approval { + return Err(IncompatibleValues("PvfExecKind::Backing", "PvfExecKind::Approval")) + } + } + + Ok(()) + } } impl Deref for ExecutorParams { diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index cf90083551788c6877bd44bd509bf137f5b9bf38..83b590dc32032c4c366ea0f8723f1cc76837effa 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -62,7 +62,7 @@ pub mod executor_params; pub mod slashing; pub use async_backing::AsyncBackingParams; -pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; +pub use executor_params::{ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash}; mod metrics; pub use metrics::{ @@ -1781,30 +1781,22 @@ impl WellKnownKey { } } -/// Type discriminator for PVF preparation timeouts +/// Type discriminator for PVF preparation. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfPrepTimeoutKind { - /// For prechecking requests, the time period after which the preparation worker is considered - /// unresponsive and will be killed. +pub enum PvfPrepKind { + /// For prechecking requests. Precheck, - /// For execution and heads-up requests, the time period after which the preparation worker is - /// considered unresponsive and will be killed. More lenient than the timeout for prechecking - /// to prevent honest validators from timing out on valid PVFs. - Lenient, + /// For execution and heads-up requests. + Prepare, } -/// Type discriminator for PVF execution timeouts +/// Type discriminator for PVF execution. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfExecTimeoutKind { - /// The amount of time to spend on execution during backing. +pub enum PvfExecKind { + /// For backing requests. Backing, - - /// The amount of time to spend on execution during approval or disputes. - /// - /// This should be much longer than the backing execution timeout to ensure that in the - /// absence of extremely large disparities between hardware, blocks that pass backing are - /// considered executable by approval checkers or dispute participants. + /// For approval and dispute request. Approval, } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1429b0c326aceef4b9088bd4ddef6828f8dcfbd8..083e0f42d56468b4cdc0e8b6adeb656c6a61a1ff 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,3 +17,8 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here + +use bitvec::vec::BitVec; + +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; diff --git a/polkadot/roadmap/implementers-guide/README.md b/polkadot/roadmap/implementers-guide/README.md index 996041f176bb2cbce5446ac2e800fe3e155fb884..e03c0c45ddba009821861cc5ba39a77c7c03dd59 100644 --- a/polkadot/roadmap/implementers-guide/README.md +++ b/polkadot/roadmap/implementers-guide/README.md @@ -4,7 +4,7 @@ The implementers' guide is compiled from several source files with [`mdBook`](ht ## Hosted build -This is available [here](https://paritytech.github.io/polkadot/book/). +This is available [here](https://paritytech.github.io/polkadot-sdk/book/). ## Local build diff --git a/polkadot/roadmap/implementers-guide/book.toml b/polkadot/roadmap/implementers-guide/book.toml index 1e6680f6f4b7e3b356af787fe0412fbab177e902..f677c0d59c049281bd2308b88c6e8c1f492c7871 100644 --- a/polkadot/roadmap/implementers-guide/book.toml +++ b/polkadot/roadmap/implementers-guide/book.toml @@ -17,6 +17,6 @@ renderer = ["html"] additional-css = ["last-changed.css"] additional-js = ["mermaid.min.js", "mermaid-init.js"] # Repository URL used in the last-changed link. -git-repository-url = "https://github.com/paritytech/polkadot" +git-repository-url = "https://github.com/paritytech/polkadot-sdk" [output.linkcheck] diff --git a/polkadot/roadmap/implementers-guide/src/disputes-flow.md b/polkadot/roadmap/implementers-guide/src/disputes-flow.md index f9fd8dcce351e2b7a4461c05c35a647a69cfb4fe..b5cc5611c6ff897826e128d240c38761991e0a5c 100644 --- a/polkadot/roadmap/implementers-guide/src/disputes-flow.md +++ b/polkadot/roadmap/implementers-guide/src/disputes-flow.md @@ -52,7 +52,7 @@ stateDiagram-v2 IncomingRequestDisputeAvailabilityData --> RespondUnavailable IncomingRequestDisputeAvailabilityData --> DisputeDataAvail DisputeDataAvail --> RespondWithDisputeAvailabilityData: Send - VoteGossipReceived --> Track: implies source peer has
dispute availablity data + VoteGossipReceived --> Track: implies source peer has
dispute availability data ``` --- diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 6da2f5d9b883a08877928bf6e7c2507ec1f38604..1a17f90d9ba37e1eb8722a28cee22669d08d948e 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -75,22 +75,27 @@ struct TrancheEntry { assignments: Vec<(ValidatorIndex, Tick)>, } -struct OurAssignment { - cert: AssignmentCert, - tranche: DelayTranche, - validator_index: ValidatorIndex, - triggered: bool, +pub struct OurAssignment { + /// Our assignment certificate. + cert: AssignmentCertV2, + /// The tranche for which the assignment refers to. + tranche: DelayTranche, + /// Our validator index for the session in which the candidates were included. + validator_index: ValidatorIndex, + /// Whether the assignment has been triggered already. + triggered: bool, } -struct ApprovalEntry { - tranches: Vec, // sorted ascending by tranche number. - backing_group: GroupIndex, - our_assignment: Option, - our_approval_sig: Option, - assignments: Bitfield, // n_validators bits - approved: bool, +pub struct ApprovalEntry { + tranches: Vec, // sorted ascending by tranche number. + backing_group: GroupIndex, + our_assignment: Option, + our_approval_sig: Option, + assigned_validators: Bitfield, // `n_validators` bits. + approved: bool, } + struct CandidateEntry { candidate: CandidateReceipt, session: SessionIndex, @@ -264,19 +269,25 @@ entry. The cert itself contains information necessary to determine the candidate * Check the assignment cert * If the cert kind is `RelayVRFModulo`, then the certificate is valid as long as `sample < session_info.relay_vrf_samples` and the VRF is valid for the validator's key with the input - `block_entry.relay_vrf_story ++ sample.encode()` as described with [the approvals protocol - section](../../protocol-approval.md#assignment-criteria). We set `core_index = vrf.make_bytes().to_u32() % - session_info.n_cores`. If the `BlockEntry` causes inclusion of a candidate at `core_index`, then this is a valid - assignment for the candidate at `core_index` and has delay tranche 0. Otherwise, it can be ignored. - * If the cert kind is `RelayVRFDelay`, then we check if the VRF is valid for the validator's key with the input - `block_entry.relay_vrf_story ++ cert.core_index.encode()` as described in [the approvals protocol - section](../../protocol-approval.md#assignment-criteria). The cert can be ignored if the block did not cause - inclusion of a candidate on that core index. Otherwise, this is a valid assignment for the included candidate. The - delay tranche for the assignment is determined by reducing `(vrf.make_bytes().to_u64() % - (session_info.n_delay_tranches + - session_info.zeroth_delay_tranche_width)).saturating_sub(session_info.zeroth_delay_tranche_width)`. - * We also check that the core index derived by the output is covered by the `VRFProof` by means of an auxiliary - signature. + `block_entry.relay_vrf_story ++ sample.encode()` as described with + [the approvals protocol section](../../protocol-approval.md#assignment-criteria). We set + `core_index = vrf.make_bytes().to_u32() % session_info.n_cores`. If the `BlockEntry` causes + inclusion of a candidate at `core_index`, then this is a valid assignment for the candidate + at `core_index` and has delay tranche 0. Otherwise, it can be ignored. + * If the cert kind is `RelayVRFModuloCompact`, then the certificate is valid as long as the VRF + is valid for the validator's key with the input `block_entry.relay_vrf_story ++ relay_vrf_samples.encode()` + as described with [the approvals protocol section](../../protocol-approval.md#assignment-criteria). + We enforce that all `core_bitfield` indices are included in the set of the core indices sampled from the + VRF Output. The assignment is considered a valid tranche0 assignment for all claimed candidates if all + `core_bitfield` indices match the core indices where the claimed candidates were included at. + + * If the cert kind is `RelayVRFDelay`, then we check if the VRF is valid for the validator's key with the + input `block_entry.relay_vrf_story ++ cert.core_index.encode()` as described in [the approvals protocol + section](../../protocol-approval.md#assignment-criteria). The cert can be ignored if the block did not + cause inclusion of a candidate on that core index. Otherwise, this is a valid assignment for the included + candidate. The delay tranche for the assignment is determined by reducing + `(vrf.make_bytes().to_u64() % (session_info.n_delay_tranches + session_info.zeroth_delay_tranche_width)).saturating_sub(session_info.zeroth_delay_tranche_width)`. + * We also check that the core index derived by the output is covered by the `VRFProof` by means of an auxiliary signature. * If the delay tranche is too far in the future, return `AssignmentCheckResult::TooFarInFuture`. * Import the assignment. * Load the candidate in question and access the `approval_entry` for the block hash the cert references. diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md index e10a55010b91ca1dab6d3cf09aed409530c2bba5..710055144665cc25b1aed270fc91a7d32b3458f1 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md @@ -149,7 +149,7 @@ The receiver of such a message needs to request the actual payload via request/r This is necessary as distribution of a large payload (mega bytes) via gossip would make the network collapse and timely distribution of statements would no longer be possible. By using request/response it is ensured that each peer only -transferes large data once. We only take good care to detect an overloaded peer early and immediately move on to a +transfers large data once. We only take good care to detect an overloaded peer early and immediately move on to a different peer for fetching the data. This mechanism should result in a good load distribution and therefore a rather optimal distribution path. diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md index 24f2785f8294d32ff3a890f2781d13be3a232d4e..86a1bf1214134d55a9f8f164175f49deffc528fc 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -3,7 +3,7 @@ This subsystem is responsible for distributing signed statements that we have generated and forwarding statements generated by our peers. Received candidate receipts and statements are passed to the [Candidate Backing subsystem](candidate-backing.md) to handle producing local statements. On receiving -`StatementDistributionMessage::Share`, this subsystem distributes the message across the network with redundency to +`StatementDistributionMessage::Share`, this subsystem distributes the message across the network with redundancy to ensure a fast backing process. ## Overview diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index daba416e2639edbd93431a9f65a13120d52d6d25..a9cb2741b0838fb9859435d3e780f16c1025100f 100644 --- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -79,7 +79,7 @@ game, so we are not too woried about colluding approval voters getting away slas maintained anyway. There is however a separate problem, from colluding approval-voters, that is "lazy" approval voters. If it were easy and reliable for approval-voters to reconsider their vote, in case of an actual dispute, then they don't have a direct incentive (apart from playing a part in securing the network) to properly run the validation function at -all - they could just always vote "valid" totally risk free. (While they would alwasy risk a slash by voting invalid.) +all - they could just always vote "valid" totally risk free. (While they would always risk a slash by voting invalid.) So we do want to fetch approval votes from approval-voting. Importing votes is most efficient when batched. At the same @@ -125,7 +125,7 @@ moment the dispute concludes! Two concerns that come to mind, are easily address enough: We are worried about lazy approval checkers, the system does not need to be perfect. It should be enough if there is some risk of getting caught. 2. We are not worried about the dispute not concluding, as nodes will always send their own vote, regardless of it being - an explict or an already existing approval-vote. + an explicit or an already existing approval-vote. Conclusion: As long as we make sure, if our own approval vote gets imported (which would prevent dispute participation) to also distribute it via dispute-distribution, disputes can conclude. To mitigate raciness with approval-voting @@ -307,7 +307,7 @@ spam, then spam slots for the disputed candidate hash are cleared. This decremen which had voted invalid. To keep spam slots from filling up unnecessarily we want to clear spam slots whenever a candidate is seen to be backed -or included. Fortunately this behavior is acheived by clearing slots on vote import as described above. Because on chain +or included. Fortunately this behavior is achieved by clearing slots on vote import as described above. Because on chain backing votes are processed when a block backing the disputed candidate is discovered, spam slots are cleared for every backed candidate. Included candidates have also been seen as backed on the same fork, so decrementing spam slots is handled in that case as well. @@ -422,7 +422,7 @@ from them, so they would be an easy DoS target. In summary: The availability system was designed for raising disputes in a meaningful and secure way after availability was reached. Trying to raise disputes before does not meaningfully contribute to the systems security/might even weaken -it as attackers are warned before availability is reached, while at the same time adding signficant amount of +it as attackers are warned before availability is reached, while at the same time adding significant amount of complexity. We therefore punt on such disputes and concentrate on disputes the system was designed to handle. ### No Disputes for Already Finalized Blocks diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md index 4547e02352ec1c44eecd81c6e03d1b92bb1e26f1..e916d246060e68731fdbf909bf46c0946ca94723 100644 --- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md @@ -297,7 +297,7 @@ is `500ms` and above `RATE_LIMIT` is `100ms`. 1/3 of validators are malicious, so for 1000 this means around 330 malicious actors worst case. All those actors can send a message every `100ms`, that is 10 per second. This -means at the begining of an attack they can open up around 3300 batches. Each +means at the beginning of an attack they can open up around 3300 batches. Each containing two votes. So memory usage is still negligible. In reality it is even less, as we also demand 10 new votes to trickle in per batch in order to keep it alive, every `500ms`. Hence for the first second, each batch requires 20 votes diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md index 376fa187de1bf4b18849ba8c8b134352fbe21630..e252ec237b7971116f505a8dbd4ae1f708bee577 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -31,7 +31,7 @@ all the necessary parameters to the validation function. These are: * The [`ValidationData`](../../types/candidate.md#validationdata). * The [`PoV`](../../types/availability.md#proofofvalidity). -The second category is for PVF pre-checking. This is primarly used by the [PVF pre-checker](pvf-prechecker.md) +The second category is for PVF pre-checking. This is primarily used by the [PVF pre-checker](pvf-prechecker.md) subsystem. ### Determining Parameters diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 6a14a3a013d45f2e823be5ef8cf35617e2614dbe..56bdd48bc0c3f0438525fa59a93ff8dcdbe29701 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -1,7 +1,11 @@ # PVF Host and Workers The PVF host is responsible for handling requests to prepare and execute PVF -code blobs, which it sends to PVF workers running in their own child processes. +code blobs, which it sends to PVF **workers** running in their own child +processes. + +While the workers are generally long-living, they also spawn one-off secure +**job processes** that perform the jobs. See "Job Processes" section below. This system has two high-levels goals that we will touch on here: *determinism* and *security*. @@ -27,24 +31,37 @@ hopefully resolve. We use a more brief delay here (1 second as opposed to 15 minutes for preparation (see above)), because a successful execution must happen in a short amount of time. +If the execution fails during the backing phase, we won't retry to reduce the chance of +supporting nondeterministic candidates. This reduces the chance of nondeterministic blocks +getting backed and honest backers getting slashed. + We currently know of the following specific cases that will lead to a retried execution request: -1. **OOM:** The host might have been temporarily low on memory due to other - processes running on the same machine. **NOTE:** This case will lead to - voting against the candidate (and possibly a dispute) if the retry is still - not successful. -2. **Artifact missing:** The prepared artifact might have been deleted due to +1. **OOM:** We have memory limits to try to prevent attackers from exhausting + host memory. If the memory limit is hit, we kill the job process and retry + the job. Alternatively, the host might have been temporarily low on memory + due to other processes running on the same machine. **NOTE:** This case will + lead to voting against the candidate (and possibly a dispute) if the retry is + still not successful. +2. **Syscall violations:** If the job attempts a system call that is blocked by + the sandbox's security policy, the job process is immediately killed and we + retry. **NOTE:** In the future, if we have a proper way to detect that the + job died due to a security violation, it might make sense not to retry in + this case. +3. **Artifact missing:** The prepared artifact might have been deleted due to operator error or some bug in the system. -3. **Panic:** The worker thread panicked for some indeterminate reason, which - may or may not be independent of the candidate or PVF. +4. **Job errors:** For example, the job process panicked for some indeterminate + reason, which may or may not be independent of the candidate or PVF. +5. **Internal errors:** See "Internal Errors" section. In this case, after the + retry we abstain from voting. ### Preparation timeouts We use timeouts for both preparation and execution jobs to limit the amount of time they can take. As the time for a job can vary depending on the machine and load on the machine, this can potentially lead to disputes where some validators -successfuly execute a PVF and others don't. +successfully execute a PVF and others don't. One dispute mitigation we have in place is a more lenient timeout for preparation during execution than during pre-checking. The rationale is that the @@ -62,10 +79,16 @@ more than the CPU time. ### Internal errors +An internal, or local, error is one that we treat as independent of the PVF +and/or candidate, i.e. local to the running machine. If this happens, then we +will first retry the job and if the errors persists, then we simply do not vote. +This prevents slashes, since otherwise our vote may not agree with that of the +other validators. + In general, for errors not raising a dispute we have to be very careful. This is -only sound, if we either: +only sound, if either: -1. Ruled out that error in pre-checking. If something is not checked in +1. We ruled out that error in pre-checking. If something is not checked in pre-checking, even if independent of the candidate and PVF, we must raise a dispute. 2. We are 100% confident that it is a hardware/local issue: Like corrupted file, @@ -75,11 +98,11 @@ Reasoning: Otherwise it would be possible to register a PVF where candidates can not be checked, but we don't get a dispute - so nobody gets punished. Second, we end up with a finality stall that is not going to resolve! -There are some error conditions where we can't be sure whether the candidate is -really invalid or some internal glitch occurred, e.g. panics. Whenever we are -unsure, we can never treat an error as internal as we would abstain from voting. -So we will first retry the candidate, and if the issue persists we are forced to -vote invalid. +Note that any error from the job process we cannot treat as internal. The job +runs untrusted code and an attacker can therefore return arbitrary errors. If +they were to return errors that we treat as internal, they could make us abstain +from voting. Since we are unsure if such errors are legitimate, we will first +retry the candidate, and if the issue persists we are forced to vote invalid. ## Security @@ -119,6 +142,20 @@ So what are we actually worried about? Things that come to mind: 6. **Intercepting and manipulating packages** - Effect very similar to the above, hard to do without also being able to do 4 or 5. +### Job Processes + +As mentioned above, our architecture includes long-living **worker processes** +and one-off **job processes*. This separation is important so that the handling +of untrusted code can be limited to the job processes. A hijacked job process +can therefore not interfere with other jobs running in separate processes. + +Furthermore, if an unexpected execution error occurred in the worker and not the +job, we generally can be confident that it has nothing to do with the candidate, +so we can abstain from voting. On the other hand, a hijacked job can send back +erroneous responses for candidates, so we know that we should not abstain from +voting on such errors from jobs. Otherwise, an attacker could trigger a finality +stall. (See "Internal Errors" section above.) + ### Restricting file-system access A basic security mechanism is to make sure that any process directly interfacing @@ -126,6 +163,17 @@ with untrusted code does not have unnecessary access to the file-system. This provides some protection against attackers accessing sensitive data or modifying data on the host machine. +*Currently this is only supported on Linux.* + +### Restricting networking + +We also disable networking on PVF threads by disabling certain syscalls, such as +the creation of sockets. This prevents attackers from either downloading +payloads or communicating sensitive data from the validator's machine to the +outside world. + +*Currently this is only supported on Linux.* + ### Clearing env vars We clear environment variables before handling untrusted code, because why give diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index b722bdc6539a6cf34c5870459180dedd590d7ec5..f0de50f2267b943f65ee2d9783869cd180eb07a9 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -24,7 +24,7 @@ relevant. When a PVF just becomes relevant, the subsystem will send a message to the [Candidate Validation] subsystem asking for the pre-check. -Upon receving a message from the candidate-validation subsystem, the pre-checker will note down that the PVF has its +Upon receiving a message from the candidate-validation subsystem, the pre-checker will note down that the PVF has its judgement and will also sign and submit a [`PvfCheckStatement`][PvfCheckStatement] via the [`submit_pvf_check_statement` runtime API][PVF pre-checking runtime API]. In case, a judgement was received for a PVF that is no longer in view it is ignored. diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md index 9ba7f6da17340ab786df423c248905db7ca76b94..70bc0233d65a09edbf3bddff960d52982aa54581 100644 --- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md +++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md @@ -189,9 +189,10 @@ Assignment criteria compute actual assignments using stories and the validators' Assignment criteria output a `Position` consisting of both a `ParaId` to be checked, as well as a precedence `DelayTranche` for when the assignment becomes valid. -Assignment criteria come in three flavors, `RelayVRFModulo`, `RelayVRFDelay` and `RelayEquivocation`. Among these, both -`RelayVRFModulo` and `RelayVRFDelay` run a VRF whose input is the output of a `RelayVRFStory`, while `RelayEquivocation` -runs a VRF whose input is the output of a `RelayEquivocationStory`. +Assignment criteria come in four flavors, `RelayVRFModuloCompact`, `RelayVRFDelay`, `RelayEquivocation` and the +deprecated `RelayVRFModulo`. Among these, `RelayVRFModulo`, `RelayVRFModuloCompact` and `RelayVRFDelay` run a +VRF whose input is the output of a `RelayVRFStory`, while `RelayEquivocation` runs a VRF whose input is the +output of a `RelayEquivocationStory`. Among these, we have two distinct VRF output computations: @@ -203,6 +204,12 @@ sampled availability core in this relay chain block. We choose three samples in secure and efficient by increasing this to four or five, and reducing the backing checks accordingly. All successful `RelayVRFModulo` samples are assigned delay tranche zero. +`RelayVRFModuloCompact` runs a single samples whose VRF input is the `RelayVRFStory` and the sample count. Similar +to `RelayVRFModulo` introduces multiple core assignments for tranche zero. It computes the VRF output with +`schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Core v2" and samples up to 160 bytes of the output +as an array of `u32`. Then reduces each `u32` modulo the number of availability cores, and outputs up +to `relay_vrf_modulo_samples` availability core indices. + There is no sampling process for `RelayVRFDelay` and `RelayEquivocation`. We instead run them on specific candidates and they compute a delay from their VRF output. `RelayVRFDelay` runs for all candidates included under, aka declared available by, a relay chain block, and inputs the associated VRF output via `RelayVRFStory`. `RelayEquivocation` runs @@ -223,14 +230,14 @@ We track all validators' announced approval assignments for each candidate assoc tells us which validators were assigned to which candidates. We permit at most one assignment per candidate per story per validator, so one validator could be assigned under both -the `RelayVRFDelay` and `RelayEquivocation` criteria, but not under both `RelayVRFModulo` and `RelayVRFDelay` criteria, -since those both use the same story. We permit only one approval vote per candidate per validator, which counts for any -applicable criteria. +the `RelayVRFDelay` and `RelayEquivocation` criteria, but not under both `RelayVRFModulo/RelayVRFModuloCompact` +and `RelayVRFDelay` criteria, since those both use the same story. We permit only one approval vote per candidate per +validator, which counts for any applicable criteria. We announce, and start checking for, our own assignments when the delay of their tranche is reached, but only if the -tracker says the assignee candidate requires more approval checkers. We never announce an assignment we believe -unnecessary because early announcements gives an adversary information. All delay tranche zero assignments always get -announced, which includes all `RelayVRFModulo` assignments. +tracker says the assignee candidate requires more approval checkers. We never announce an assignment we believe unnecessary +because early announcements gives an adversary information. All delay tranche zero assignments always get announced, +which includes all `RelayVRFModulo` and `RelayVRFModuloCompact` assignments. In other words, if some candidate `C` needs more approval checkers by the time we reach round `t` then any validators with an assignment to `C` in delay tranche `t` gossip their send assignment notice for `C`, and begin reconstruction and @@ -318,12 +325,12 @@ finality. We might explore limits on postponement too, but this sounds much har ## Parameters -We prefer doing approval checkers assignments under `RelayVRFModulo` as opposed to `RelayVRFDelay` because -`RelayVRFModulo` avoids giving individual checkers too many assignments and tranche zero assignments benefit security -the most. We suggest assigning at least 16 checkers under `RelayVRFModulo` although assignment levels have never been -properly analyzed. +We prefer doing approval checkers assignments under `RelayVRFModulo` or `RelayVRFModuloCompact` as opposed to +`RelayVRFDelay` because `RelayVRFModulo` avoids giving individual checkers too many assignments and tranche zero +assignments benefit security the most. We suggest assigning at least 16 checkers under `RelayVRFModulo` or +`RelayVRFModuloCompact` although assignment levels have never been properly analyzed. -Our delay criteria `RelayVRFDelay` and `RelayEquivocation` both have two primary paramaters, expected checkers per +Our delay criteria `RelayVRFDelay` and `RelayEquivocation` both have two primary parameters, expected checkers per tranche and the zeroth delay tranche width. We require expected checkers per tranche to be less than three because otherwise an adversary with 1/3 stake could force diff --git a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md index aa31491d72f6c6e142059e7bcfbe881516772d25..69d33ca8670db942ecd8f5cb564f8d973ef6e47b 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md @@ -108,7 +108,7 @@ HrmpEgressChannelsIndex: map ParaId => Vec; /// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. HrmpChannelContents: map HrmpChannelId => Vec; /// Maintains a mapping that can be used to answer the question: -/// What paras sent a message at the given block number for a given reciever. +/// What paras sent a message at the given block number for a given receiver. /// Invariants: /// - The inner `Vec` is never empty. /// - The inner `Vec` cannot store two same `ParaId`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index f73df209981af32acf3e99d8023da0d9bed9115d..f6a32a01d5025e2fb824b94f804fd1e89fd0392b 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -133,7 +133,7 @@ All failed checks should lead to an unrecoverable error making the block invalid [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). 1. call `Dmp::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. - 1. call `Hrmp::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. + 1. call `Hrmp::prune_hrmp` with the para id of the candidate and the candidate's `hrmp_watermark`. 1. call `Hrmp::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, 1. Call `Paras::note_new_head` using the `HeadData` from the receipt and `relay_parent_number`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/paras.md b/polkadot/roadmap/implementers-guide/src/runtime/paras.md index ac9b1520c3dfb588e06142d6e40b5fe286e0b6ac..e6e0b53512fe306e59b38812fba2fb015f9a093c 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/paras.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/paras.md @@ -189,7 +189,7 @@ UpgradeGoAheadSignal: map hasher(twox_64_concat) ParaId => Option)`: indicate previosuly-occupied cores which are to be considered returned +- `free_cores(Vec<(CoreIndex, FreedReason)>)`: indicate previously-occupied cores which are to be considered returned and why they are being returned. - All freed lease holding parachain cores should be assigned to their respective parachain - All freed on-demand parachain cores whose reason for freeing was `FreedReason::Concluded` should have the claim diff --git a/polkadot/roadmap/implementers-guide/src/types/approval.md b/polkadot/roadmap/implementers-guide/src/types/approval.md index bc33f024426fefcf0809feeeddef2f83882c1914..c19ffa53762a5fc9e2ac0766604efc22cf20ef4d 100644 --- a/polkadot/roadmap/implementers-guide/src/types/approval.md +++ b/polkadot/roadmap/implementers-guide/src/types/approval.md @@ -22,6 +22,35 @@ enum AssignmentCertKind { } } +enum AssignmentCertKindV2 { + /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the + /// candidates were included. + /// + /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModuloCompact { + /// A bitfield representing the core indices claimed by this assignment. + core_bitfield: CoreBitfield, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, + /// Deprectated assignment. Soon to be removed. + /// + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, +} + struct AssignmentCert { // The criterion which is claimed to be met by this cert. kind: AssignmentCertKind, diff --git a/polkadot/roadmap/implementers-guide/src/types/backing.md b/polkadot/roadmap/implementers-guide/src/types/backing.md index 7e43325ec5cdadcbd7d233d00f9a59dd56dd22df..bfe182383fd5136d6fc3b98c72b270354307e2e8 100644 --- a/polkadot/roadmap/implementers-guide/src/types/backing.md +++ b/polkadot/roadmap/implementers-guide/src/types/backing.md @@ -30,7 +30,7 @@ work, we extract a signed wrapper. ```rust,ignore /// A signed type which encapsulates the common desire to sign some data and validate a signature. /// -/// Note that the internal fields are not public; they are all accessable by immutable getters. +/// Note that the internal fields are not public; they are all accessible by immutable getters. /// This reduces the chance that they are accidentally mutated, invalidating the signature. struct Signed { /// The payload is part of the signed data. The rest is the signing context, diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index fababbff354007c5fcf8f47d6b8e52e69d0879bb..54cdc2edd12d2537c1b80d9f2e973562375fc150 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -32,18 +32,8 @@ Indicates a change in active leaves. Activated leaves should have jobs, whereas winding-down of work based on those leaves. ```rust -enum LeafStatus { - // A leaf is fresh when it's the first time the leaf has been encountered. - // Most leaves should be fresh. - Fresh, - // A leaf is stale when it's encountered for a subsequent time. This will - // happen when the chain is reverted or the fork-choice rule abandons some - // chain. - Stale, -} - struct ActiveLeavesUpdate { - activated: [(Hash, Number, LeafStatus)], // in practice, these should probably be a SmallVec + activated: [(Hash, Number)], deactivated: [Hash], } ``` diff --git a/polkadot/roadmap/implementers-guide/src/types/runtime.md b/polkadot/roadmap/implementers-guide/src/types/runtime.md index 79da899bd35eae6afff6dd8e6fb53bf670f2c4a2..4b97409f8df3e4f3581161ef47713bfdfbcb9654 100644 --- a/polkadot/roadmap/implementers-guide/src/types/runtime.md +++ b/polkadot/roadmap/implementers-guide/src/types/runtime.md @@ -55,7 +55,7 @@ struct HostConfiguration { pub zeroth_delay_tranche_width: u32, /// The number of validators needed to approve a block. pub needed_approvals: u32, - /// The number of samples to do of the RelayVRFModulo approval assignment criterion. + /// The number of samples to use in `RelayVRFModulo` or `RelayVRFModuloCompact` approval assignment criterions. pub relay_vrf_modulo_samples: u32, /// Total number of individual messages allowed in the parachain -> relay-chain message queue. pub max_upward_queue_count: u32, diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index b5f155b5a658fe25aada71fdc454966050c519f1..ce11b26e5549719e7005e026239b034478e5c2dc 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Polkadot specific RPC functionality." [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 2d1aad6a575e7bda03220e433b728192e7ddeba6..4391b6d81eb2a65f928105af757ce758f93d2644 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-runtime-common" version = "1.0.0" +description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true @@ -11,7 +12,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["alloc"] } serde_derive = { version = "1.0.117" } static_assertions = "1.1.0" @@ -29,6 +30,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } +pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } @@ -38,7 +40,7 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } +pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false, optional = true } pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false } frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } @@ -51,6 +53,9 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac slot-range-helper = { path = "slot_range_helper", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false, optional = true } + +pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } [dev-dependencies] @@ -60,7 +65,7 @@ pallet-babe = { path = "../../../substrate/frame/babe" } pallet-treasury = { path = "../../../substrate/frame/treasury" } sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.107" +serde_json = "1.0.108" libsecp256k1 = "0.7.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } @@ -76,11 +81,12 @@ std = [ "inherents/std", "libsecp256k1/std", "log/std", - "pallet-asset-rate/std", + "pallet-asset-rate?/std", "pallet-authorship/std", "pallet-balances/std", "pallet-election-provider-multi-phase/std", "pallet-fast-unstake/std", + "pallet-identity/std", "pallet-session/std", "pallet-staking-reward-fn/std", "pallet-staking/std", @@ -88,6 +94,7 @@ std = [ "pallet-transaction-payment/std", "pallet-treasury/std", "pallet-vesting/std", + "pallet-xcm-benchmarks/std", "parity-scale-codec/std", "primitives/std", "runtime-parachains/std", @@ -104,6 +111,7 @@ std = [ "sp-staking/std", "sp-std/std", "xcm-builder/std", + "xcm-executor/std", "xcm/std", ] runtime-benchmarks = [ @@ -118,15 +126,18 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-fast-unstake/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", "primitives/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", @@ -139,6 +150,7 @@ try-runtime = [ "pallet-balances/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-fast-unstake/try-runtime", + "pallet-identity/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index f65717519d5e1e715f478e37381e943e20bbdca4..59c76a6cabb3c92cd45dc739330921aca11ca9bf 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Helper crate for generating slot ranges for the Polkadot runtime." [dependencies] paste = "1.0" diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index 0e88b27a1ff8ae7bc1c7fba7fbeb4b057fdaa5fe..ba3108c0aa38b9b9a2dabba0559d74ec3c97d0ed 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -25,8 +25,8 @@ use sp_std::vec::Vec; pub mod v1 { use super::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let onchain_version = Pallet::::on_chain_storage_version(); @@ -60,13 +60,13 @@ pub mod v1 { } } - /// [`MigrateToV1`] wrapped in a + /// [`VersionUncheckedMigrateToV1`] wrapped in a /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the /// migration is only performed when on-chain version is 0. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, - MigrateToV1, + VersionUncheckedMigrateToV1, Pallet, ::DbWeight, >; diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index cc8ec339c11847b60aaf74e909ab798a6cb45b51..cb2e5083b0ac64e30e3579c7a1b4bdc387bb9eba 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -720,6 +720,7 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index e35303912fa1dca33d175646dfe75ace3f719d19..267413eb1badda7c4e802b32c9b144451dd97f55 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -747,6 +747,7 @@ mod tests { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 0c736a632842336e30d12381697e4d638a65d74a..548adc6fbd5a77db2ab8e0ad6b41dfc20a8753ec 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -780,6 +780,7 @@ mod tests { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 5a2939145925c453f9c86b9c6e5f7c9f71de9757..f67fc12a67f219573f70e9f0102f39de8cd83c4b 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -941,6 +941,7 @@ mod tests { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc2c3ce7773c250efb72f9d26ed7604a6bd756e9 --- /dev/null +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -0,0 +1,305 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet is designed to go into a source chain and destination chain to migrate data. The +//! design motivations are: +//! +//! - Call some function on the source chain that executes some migration (clearing state, +//! forwarding an XCM program). +//! - Call some function (probably from an XCM program) on the destination chain. +//! - Avoid cluttering the source pallet with new dispatchables that are unrelated to its +//! functionality and only used for migration. +//! +//! After the migration is complete, the pallet may be removed from both chains' runtimes as well as +//! the `polkadot-runtime-common` crate. + +use frame_support::{dispatch::DispatchResult, traits::Currency, weights::Weight}; +pub use pallet::*; +use pallet_identity; +use sp_core::Get; + +#[cfg(feature = "runtime-benchmarks")] +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; + +pub trait WeightInfo { + fn reap_identity(r: u32, s: u32) -> Weight; + fn poke_deposit() -> Weight; +} + +impl WeightInfo for () { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::MAX + } + fn poke_deposit() -> Weight { + Weight::MAX + } +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::zero() + } + fn poke_deposit() -> Weight { + Weight::zero() + } +} + +// Must use the same `Balance` as `T`'s Identity pallet to handle deposits. +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, + pallet_prelude::*, + traits::EnsureOrigin, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_identity::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The origin that can reap identities. Expected to be `EnsureSigned` on the + /// source chain such that anyone can all this function. + type Reaper: EnsureOrigin; + + /// A handler for what to do when an identity is reaped. + type ReapIdentityHandler: OnReapIdentity; + + /// Weight information for the extrinsics in the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The identity and all sub accounts were reaped for `who`. + IdentityReaped { who: T::AccountId }, + /// The deposits held for `who` were updated. `identity` is the new deposit held for + /// identity info, and `subs` is the new deposit held for the sub-accounts. + DepositUpdated { who: T::AccountId, identity: BalanceOf, subs: BalanceOf }, + } + + #[pallet::call] + impl Pallet { + /// Reap the `IdentityInfo` of `who` from the Identity pallet of `T`, unreserving any + /// deposits held and removing storage items associated with `who`. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::reap_identity( + T::MaxRegistrars::get(), + T::MaxSubAccounts::get() + ))] + pub fn reap_identity( + origin: OriginFor, + who: T::AccountId, + ) -> DispatchResultWithPostInfo { + T::Reaper::ensure_origin(origin)?; + // - number of registrars (required to calculate weight) + // - byte size of `IdentityInfo` (required to calculate remote deposit) + // - number of sub accounts (required to calculate both weight and remote deposit) + let (registrars, bytes, subs) = pallet_identity::Pallet::::reap_identity(&who)?; + T::ReapIdentityHandler::on_reap_identity(&who, bytes, subs)?; + Self::deposit_event(Event::IdentityReaped { who }); + let post = PostDispatchInfo { + actual_weight: Some(::WeightInfo::reap_identity( + registrars, subs, + )), + pays_fee: Pays::No, + }; + Ok(post) + } + + /// Update the deposit of `who`. Meant to be called by the system with an XCM `Transact` + /// Instruction. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor, who: T::AccountId) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let (id_deposit, subs_deposit) = pallet_identity::Pallet::::poke_deposit(&who)?; + Self::deposit_event(Event::DepositUpdated { + who, + identity: id_deposit, + subs: subs_deposit, + }); + Ok(Pays::No.into()) + } + } +} + +/// Trait to handle reaping identity from state. +pub trait OnReapIdentity { + /// What to do when an identity is reaped. For example, the implementation could send an XCM + /// program to another chain. Concretely, a type implementing this trait in the Polkadot + /// runtime would teleport enough DOT to the People Chain to cover the Identity deposit there. + /// + /// This could also directly include `Transact { poke_deposit(..), ..}`. + /// + /// Inputs + /// - `who`: Whose identity was reaped. + /// - `bytes`: The byte size of `IdentityInfo`. + /// - `subs`: The number of sub-accounts they had. + fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; +} + +impl OnReapIdentity for () { + fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { + Ok(()) + } +} + +#[cfg(feature = "runtime-benchmarks")] +#[benchmarks] +mod benchmarks { + use super::*; + use frame_support::traits::EnsureOrigin; + use frame_system::RawOrigin; + use pallet_identity::{Data, IdentityInformationProvider, Judgement, Pallet as Identity}; + use parity_scale_codec::Encode; + use sp_runtime::{ + traits::{Bounded, Hash, StaticLookup}, + Saturating, + }; + use sp_std::{boxed::Box, vec::Vec, *}; + + const SEED: u32 = 0; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + #[benchmark] + fn reap_identity( + r: Linear<0, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { + // set up target + let target: T::AccountId = account("target", 0, SEED); + let target_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(target.clone())); + let target_lookup = T::Lookup::unlookup(target.clone()); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + + // set identity + let info = ::IdentityInformation::create_identity_info(); + Identity::::set_identity( + RawOrigin::Signed(target.clone()).into(), + Box::new(info.clone()), + )?; + + // create and set subs + let mut subs = Vec::new(); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); + for ii in 0..s { + let sub_account = account("sub", ii, SEED); + subs.push((sub_account, data.clone())); + } + Identity::::set_subs(target_origin.clone(), subs.clone())?; + + // add registrars and provide judgements + let registrar_origin = T::RegistrarOrigin::try_successful_origin() + .expect("RegistrarOrigin has no successful origin required for the benchmark"); + for ii in 0..r { + // registrar account + let registrar: T::AccountId = account("registrar", ii, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); + let _ = ::Currency::make_free_balance_be( + ®istrar, + ::Currency::minimum_balance(), + ); + + // add registrar + Identity::::add_registrar(registrar_origin.clone(), registrar_lookup)?; + Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), ii, 10u32.into())?; + let fields = ::IdentityInformation::all_fields(); + Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), ii, fields)?; + + // request and provide judgement + Identity::::request_judgement(target_origin.clone(), ii, 10u32.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(registrar).into(), + ii, + target_lookup.clone(), + Judgement::Reasonable, + ::Hashing::hash_of(&info), + )?; + } + + let origin = T::Reaper::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target.clone()); + + assert_last_event::(Event::::IdentityReaped { who: target.clone() }.into()); + + let fields = ::IdentityInformation::all_fields(); + assert!(!Identity::::has_identity(&target, fields)); + assert_eq!(Identity::::subs(&target).len(), 0); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + let target: T::AccountId = account("target", 0, SEED); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + let info = ::IdentityInformation::create_identity_info(); + + let _ = Identity::::set_identity_no_deposit(&target, info.clone()); + + let sub_account: T::AccountId = account("sub", 0, SEED); + let _ = Identity::::set_sub_no_deposit(&target, sub_account.clone()); + + // expected deposits + let expected_id_deposit = ::BasicDeposit::get() + .saturating_add( + ::ByteDeposit::get() + .saturating_mul(>::from(info.encoded_size() as u32)), + ); + // only 1 sub + let expected_sub_deposit = ::SubAccountDeposit::get(); + + #[extrinsic_call] + _(RawOrigin::Root, target.clone()); + + assert_last_event::( + Event::::DepositUpdated { + who: target, + identity: expected_id_deposit, + subs: expected_sub_deposit, + } + .into(), + ); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index 590593745ed040652c79e5aab86877862aeff0e4..e50ffb634b305eda8feb3c1dcf87c9f3366defb5 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -274,6 +274,7 @@ mod tests { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index f14db68267d5b550553bd6f48819890adf011c48..793f75e79cd13b5509ff956acb73a29889b68e30 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -17,7 +17,7 @@ //! Mocking utilities for testing with real pallets. use crate::{ - auctions, crowdloan, + auctions, crowdloan, identity_migrator, mock::{conclude_pvf_checking, validators_public_keys}, paras_registrar, slot_range::SlotRange, @@ -32,6 +32,7 @@ use frame_support::{ }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; +use pallet_identity::{self, legacy::IdentityInfo}; use parity_scale_codec::Encode; use primitives::{ BlockNumber, HeadData, Id as ParaId, SessionIndex, ValidationCode, LOWEST_PUBLIC_ID, @@ -88,6 +89,10 @@ frame_support::construct_runtime!( Auctions: auctions::{Pallet, Call, Storage, Event}, Crowdloan: crowdloan::{Pallet, Call, Storage, Event}, Slots: slots::{Pallet, Call, Storage, Event}, + + // Migrators + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + IdentityMigrator: identity_migrator::{Pallet, Call, Event}, } ); @@ -181,6 +186,7 @@ impl pallet_balances::Config for Test { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -273,6 +279,28 @@ impl crowdloan::Config for Test { type WeightInfo = crate::crowdloan::TestWeightInfo; } +impl pallet_identity::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Slashed = (); + type BasicDeposit = ConstU32<100>; + type ByteDeposit = ConstU32<10>; + type SubAccountDeposit = ConstU32<100>; + type MaxSubAccounts = ConstU32<2>; + type IdentityInformation = IdentityInfo>; + type MaxRegistrars = ConstU32<20>; + type RegistrarOrigin = EnsureRoot; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); +} + +impl identity_migrator::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Reaper = EnsureRoot; + type ReapIdentityHandler = (); + type WeightInfo = crate::identity_migrator::TestWeightInfo; +} + /// Create a new set of test externalities. pub fn new_test_ext() -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 46a9f7d12cd4149839547df0f3d98d42f60af9e1..bd49d3cccc9cac99f3a11b698629dfd51b847816 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -23,6 +23,7 @@ pub mod auctions; pub mod claims; pub mod crowdloan; pub mod elections; +pub mod identity_migrator; pub mod impls; pub mod paras_registrar; pub mod paras_sudo_wrapper; @@ -246,7 +247,8 @@ impl sp_runtime::traits::Convert for U256ToBalance { } /// Macro to set a value (e.g. when using the `parameter_types` macro) to either a production value -/// or to an environment variable or testing value (in case the `fast-runtime` feature is selected). +/// or to an environment variable or testing value (in case the `fast-runtime` feature is selected) +/// or one of two testing values depending on feature. /// Note that the environment variable is evaluated _at compile time_. /// /// Usage: @@ -255,6 +257,8 @@ impl sp_runtime::traits::Convert for U256ToBalance { /// // Note that the env variable version parameter cannot be const. /// pub LaunchPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1, "KSM_LAUNCH_PERIOD"); /// pub const VotingPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1 * MINUTES); +/// pub const EpochDuration: BlockNumber = +/// prod_or_fast!(1 * HOURS, "fast-runtime", 1 * MINUTES, "fast-runtime-10m", 10 * MINUTES); /// } /// ``` #[macro_export] diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index b767985489d3622e6da01e689609c8913929e78a..f977674a1e4e22254bcac6e68cc7bdf20711a043 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -60,11 +60,10 @@ impl> OnRuntimeUpgrade } } -pub type VersionCheckedMigrateToV1 = - frame_support::migrations::VersionedMigration< - 0, - 1, - VersionUncheckedMigrateToV1, - super::Pallet, - ::DbWeight, - >; +pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + super::Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index f2751803a413395b11ccc7126c81ee693ab981ce..2d33cf28993dffdd8ae12f28c45464ba69608b06 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -29,7 +29,7 @@ use frame_system::{self, ensure_root, ensure_signed}; use primitives::{HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID}; use runtime_parachains::{ configuration, ensure_parachain, - paras::{self, ParaGenesisArgs}, + paras::{self, ParaGenesisArgs, SetGoAhead}, Origin, ParaLifecycle, }; use sp_std::{prelude::*, result}; @@ -412,7 +412,7 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::schedule_code_upgrade::(para, new_code)?; + runtime_parachains::schedule_code_upgrade::(para, new_code, SetGoAhead::No)?; Ok(()) } @@ -792,6 +792,7 @@ mod tests { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 58ca19d0288c4973fc3e7d7f325df31b785d5be6..bc95483dd7ede3a6eb29e841d75a7ec59c2595b7 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -552,6 +552,7 @@ mod tests { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index a3efd5bfa30a6c4bd8455834534514712f6458ae..01f6365b791f6fe36d6cf5260a2d86b9eada4cca 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -570,6 +570,7 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<1>; diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index ff529143c5094dff797c420a5040b4f62639701e..4d31c92cdd3a9500fdb3000e7a8f14891dee7087 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -31,38 +31,63 @@ use SendError::*; /// Simple value-bearing trait for determining/expressing the assets required to be paid for a /// messages to be delivered to a parachain. -pub trait PriceForParachainDelivery { +pub trait PriceForMessageDelivery { + /// Type used for charging different prices to different destinations + type Id; /// Return the assets required to deliver `message` to the given `para` destination. - fn price_for_parachain_delivery(para: ParaId, message: &Xcm<()>) -> MultiAssets; + fn price_for_delivery(id: Self::Id, message: &Xcm<()>) -> MultiAssets; } -impl PriceForParachainDelivery for () { - fn price_for_parachain_delivery(_: ParaId, _: &Xcm<()>) -> MultiAssets { +impl PriceForMessageDelivery for () { + type Id = (); + + fn price_for_delivery(_: Self::Id, _: &Xcm<()>) -> MultiAssets { + MultiAssets::new() + } +} + +pub struct NoPriceForMessageDelivery(PhantomData); +impl PriceForMessageDelivery for NoPriceForMessageDelivery { + type Id = Id; + + fn price_for_delivery(_: Self::Id, _: &Xcm<()>) -> MultiAssets { MultiAssets::new() } } -/// Implementation of `PriceForParachainDelivery` which returns a fixed price. +/// Implementation of [`PriceForMessageDelivery`] which returns a fixed price. pub struct ConstantPrice(sp_std::marker::PhantomData); -impl> PriceForParachainDelivery for ConstantPrice { - fn price_for_parachain_delivery(_: ParaId, _: &Xcm<()>) -> MultiAssets { +impl> PriceForMessageDelivery for ConstantPrice { + type Id = (); + + fn price_for_delivery(_: Self::Id, _: &Xcm<()>) -> MultiAssets { T::get() } } -/// Implementation of `PriceForParachainDelivery` which returns an exponentially increasing price. -/// The `A` type parameter is used to denote the asset ID that will be used for paying the delivery -/// fee. -/// +/// Implementation of [`PriceForMessageDelivery`] which returns an exponentially increasing price. /// The formula for the fee is based on the sum of a base fee plus a message length fee, multiplied -/// by a specified factor. In mathematical form, it is `F * (B + encoded_msg_len * M)`. +/// by a specified factor. In mathematical form: +/// +/// `F * (B + encoded_msg_len * M)` +/// +/// Thus, if F = 1 and M = 0, this type is equivalent to [`ConstantPrice`]. +/// +/// The type parameters are understood as follows: +/// +/// - `A`: Used to denote the asset ID that will be used for paying the delivery fee. +/// - `B`: The base fee to pay for message delivery. +/// - `M`: The fee to pay for each and every byte of the message after encoding it. +/// - `F`: A fee factor multiplier. It can be understood as the exponent term in the formula. pub struct ExponentialPrice(sp_std::marker::PhantomData<(A, B, M, F)>); -impl, B: Get, M: Get, F: FeeTracker> PriceForParachainDelivery +impl, B: Get, M: Get, F: FeeTracker> PriceForMessageDelivery for ExponentialPrice { - fn price_for_parachain_delivery(para: ParaId, msg: &Xcm<()>) -> MultiAssets { + type Id = F::Id; + + fn price_for_delivery(id: Self::Id, msg: &Xcm<()>) -> MultiAssets { let msg_fee = (msg.encoded_size() as u128).saturating_mul(M::get()); let fee_sum = B::get().saturating_add(msg_fee); - let amount = F::get_fee_factor(para).saturating_mul_int(fee_sum); + let amount = F::get_fee_factor(id).saturating_mul_int(fee_sum); (A::get(), amount).into() } } @@ -70,8 +95,10 @@ impl, B: Get, M: Get, F: FeeTracker> PriceForParacha /// XCM sender for relay chain. It only sends downward message. pub struct ChildParachainRouter(PhantomData<(T, W, P)>); -impl - SendXcm for ChildParachainRouter +impl SendXcm + for ChildParachainRouter +where + P: PriceForMessageDelivery, { type Ticket = (HostConfiguration>, ParaId, Vec); @@ -91,7 +118,7 @@ impl>::config(); let para = id.into(); - let price = P::price_for_parachain_delivery(para, &xcm); + let price = P::price_for_delivery(para, &xcm); let blob = W::wrap_version(&d, xcm).map_err(|()| DestinationUnsupported)?.encode(); >::can_queue_downward_message(&config, ¶, &blob) .map_err(Into::::into)?; @@ -109,6 +136,94 @@ impl( + sp_std::marker::PhantomData<( + XcmConfig, + ExistentialDeposit, + PriceForDelivery, + ParaId, + ToParaIdHelper, + )>, +); + +#[cfg(feature = "runtime-benchmarks")] +impl< + XcmConfig: xcm_executor::Config, + ExistentialDeposit: Get>, + PriceForDelivery: PriceForMessageDelivery, + Parachain: Get, + ToParachainHelper: EnsureForParachain, + > pallet_xcm_benchmarks::EnsureDelivery + for ToParachainDeliveryHelper< + XcmConfig, + ExistentialDeposit, + PriceForDelivery, + Parachain, + ToParachainHelper, + > +{ + fn ensure_successful_delivery( + origin_ref: &MultiLocation, + _dest: &MultiLocation, + fee_reason: xcm_executor::traits::FeeReason, + ) -> (Option, Option) { + use xcm_executor::{ + traits::{FeeManager, TransactAsset}, + FeesMode, + }; + + let mut fees_mode = None; + if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { + // if not waived, we need to set up accounts for paying and receiving fees + + // mint ED to origin if needed + if let Some(ed) = ExistentialDeposit::get() { + XcmConfig::AssetTransactor::deposit_asset(&ed, &origin_ref, None).unwrap(); + } + + // overestimate delivery fee + let overestimated_xcm = vec![ClearOrigin; 128].into(); + let overestimated_fees = + PriceForDelivery::price_for_delivery(Parachain::get(), &overestimated_xcm); + + // mint overestimated fee to origin + for fee in overestimated_fees.inner() { + XcmConfig::AssetTransactor::deposit_asset(&fee, &origin_ref, None).unwrap(); + } + + // allow more initialization for target parachain + ToParachainHelper::ensure(Parachain::get()); + + // expected worst case - direct withdraw + fees_mode = Some(FeesMode { jit_withdraw: true }); + } + (fees_mode, None) + } +} + +/// Ensure more initialization for `ParaId`. (e.g. open HRMP channels, ...) +#[cfg(feature = "runtime-benchmarks")] +pub trait EnsureForParachain { + fn ensure(para_id: ParaId); +} +#[cfg(feature = "runtime-benchmarks")] +impl EnsureForParachain for () { + fn ensure(_: ParaId) { + // doing nothing + } +} + #[cfg(test)] mod tests { use super::*; @@ -124,7 +239,17 @@ mod tests { struct TestFeeTracker; impl FeeTracker for TestFeeTracker { - fn get_fee_factor(_: ParaId) -> FixedU128 { + type Id = ParaId; + + fn get_fee_factor(_: Self::Id) -> FixedU128 { + FixedU128::from_rational(101, 100) + } + + fn increase_fee_factor(_: Self::Id, _: FixedU128) -> FixedU128 { + FixedU128::from_rational(101, 100) + } + + fn decrease_fee_factor(_: Self::Id) -> FixedU128 { FixedU128::from_rational(101, 100) } } @@ -142,21 +267,21 @@ mod tests { // message_length = 1 let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + m); assert_eq!( - TestExponentialPrice::price_for_parachain_delivery(id, &Xcm(vec![])), + TestExponentialPrice::price_for_delivery(id, &Xcm(vec![])), (FeeAssetId::get(), result).into() ); // message size = 2 let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + (2 * m)); assert_eq!( - TestExponentialPrice::price_for_parachain_delivery(id, &Xcm(vec![ClearOrigin])), + TestExponentialPrice::price_for_delivery(id, &Xcm(vec![ClearOrigin])), (FeeAssetId::get(), result).into() ); // message size = 4 let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + (4 * m)); assert_eq!( - TestExponentialPrice::price_for_parachain_delivery( + TestExponentialPrice::price_for_delivery( id, &Xcm(vec![SetAppendix(Xcm(vec![ClearOrigin]))]) ), diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index cfa6bf3dafb2aa02e29d3ee5865aa314d0e7436b..cdfab82d00c1850ecb6c5e240bf3d4df92ccc75a 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Runtime metric interface for the Polkadot node" [dependencies] sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false} diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 9f1ec57257f821901c2febde5a02c0881aadc1be..b6800fc0844dfacc424ec642d7a06cbb456e360b 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-runtime-parachains" version = "1.0.0" +description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true @@ -11,7 +12,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } derive_more = "0.99.17" bitflags = "1.3.2" @@ -62,7 +63,7 @@ test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../pri sp-tracing = { path = "../../../substrate/primitives/tracing" } thousands = "0.2.0" assert_matches = "1" -serde_json = "1.0.107" +serde_json = "1.0.108" [features] default = [ "std" ] @@ -88,6 +89,8 @@ std = [ "polkadot-parachain-primitives/std", "polkadot-runtime-metrics/std", "primitives/std", + "rand/std", + "rand_chacha/std", "rustc-hex/std", "scale-info/std", "serde/std", diff --git a/polkadot/runtime/parachains/src/assigner.rs b/polkadot/runtime/parachains/src/assigner.rs index b21e857a471379a57af4974d2280e9da69448ecd..9e408df61dc18d3da7205a7a4ef5f8b0d9386c03 100644 --- a/polkadot/runtime/parachains/src/assigner.rs +++ b/polkadot/runtime/parachains/src/assigner.rs @@ -53,7 +53,8 @@ impl Pallet { fn is_bulk_core(core_idx: &CoreIndex) -> bool { let parachain_cores = as AssignmentProvider>>::session_core_count(); - (0..parachain_cores).contains(&core_idx.0) + + core_idx.0 < parachain_cores } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index d605d86605151071b3c5dda9c825585f723ecaf1..866e8290052a8ebdaf7f154def8ab073005978d8 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -39,7 +39,7 @@ pub mod pallet { impl AssignmentProvider> for Pallet { fn session_core_count() -> u32 { - >::parachains().len() as u32 + paras::Parachains::::decode_len().unwrap_or(0) as u32 } fn pop_assignment_for_core( @@ -62,7 +62,7 @@ impl AssignmentProvider> for Pallet { max_availability_timeouts: 0, // The next assignment already goes to the same [`ParaId`], this can be any number // that's high enough to clear the time it takes to clear backing/availability. - ttl: BlockNumberFor::::from(10u32), + ttl: 10u32.into(), } } } diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index f53f986a553fe408b4e9087f9605508baf8e4940..bff9cc34b4fbb84c8e2b4add40d082ca3fcbbe91 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,8 +26,9 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, - MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -55,6 +56,7 @@ const LOG_TARGET: &str = "runtime::configuration"; serde::Serialize, serde::Deserialize, )] +#[serde(deny_unknown_fields)] pub struct HostConfiguration { // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct // requires special treatment. @@ -259,6 +261,8 @@ pub struct HostConfiguration { /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. pub minimum_backing_votes: u32, + /// Node features enablement. + pub node_features: NodeFeatures, } impl> Default for HostConfiguration { @@ -310,6 +314,7 @@ impl> Default for HostConfiguration { MaxHrmpInboundChannelsExceeded, /// `minimum_backing_votes` is set to zero. ZeroMinimumBackingVotes, + /// `executor_params` are inconsistent. + InconsistentExecutorParams { inner: ExecutorParamError }, } impl HostConfiguration @@ -432,6 +439,10 @@ where return Err(ZeroMinimumBackingVotes) } + if let Err(inner) = self.executor_params.check_consistency() { + return Err(InconsistentExecutorParams { inner }) + } + Ok(()) } @@ -455,6 +466,7 @@ pub trait WeightInfo { fn set_hrmp_open_request_ttl() -> Weight; fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; + fn set_node_feature() -> Weight; } pub struct TestWeightInfo; @@ -480,6 +492,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_perbill() -> Weight { Weight::MAX } + fn set_node_feature() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -488,18 +503,19 @@ pub mod pallet { /// The current storage version. /// - /// v0-v1: - /// v1-v2: - /// v2-v3: - /// v3-v4: - /// v4-v5: - /// + - /// + - /// v5-v6: (remove UMP dispatch queue) - /// v6-v7: - /// v7-v8: - /// v8-v9: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + /// v0-v1: + /// v1-v2: + /// v2-v3: + /// v3-v4: + /// v4-v5: + /// + + /// + + /// v5-v6: (remove UMP dispatch queue) + /// v6-v7: + /// v7-v8: + /// v8-v9: + /// v9-v10: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(10); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1187,6 +1203,23 @@ pub mod pallet { config.minimum_backing_votes = new; }) } + /// Set/Unset a node feature. + #[pallet::call_index(53)] + #[pallet::weight(( + T::WeightInfo::set_node_feature(), + DispatchClass::Operational + ))] + pub fn set_node_feature(origin: OriginFor, index: u8, value: bool) -> DispatchResult { + ensure_root(origin)?; + + Self::schedule_config_update(|config| { + let index = usize::from(index); + if config.node_features.len() <= index { + config.node_features.resize(index + 1, false); + } + config.node_features.set(index, value); + }) + } } #[pallet::hooks] diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index d9d11ab56e496980e8090a99ed88452a351e970e..67daf1c459884d42378056b6528605da29578c95 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -17,7 +17,7 @@ use crate::configuration::*; use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; use frame_system::RawOrigin; -use primitives::{ExecutorParam, ExecutorParams, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use sp_runtime::traits::One; benchmarks! { @@ -41,14 +41,16 @@ benchmarks! { ExecutorParam::StackNativeMax(256 * 1024 * 1024), ExecutorParam::WasmExtBulkMemory, ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Precheck, 60_000), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Lenient, 360_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Backing, 2_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000), ][..])) set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 26f8a85b496d5a76385c6c85b52f66d719d88c82..db323d3aad93358eeecb133aaf63257d7c973c3a 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -16,6 +16,7 @@ //! A module that is responsible for migration of storage. +pub mod v10; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c934082dc1e43095b3729347eafccde032427f1 --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -0,0 +1,277 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{vstaging::NodeFeatures, SessionIndex}; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v9::V9HostConfiguration; + +type V10HostConfiguration = configuration::HostConfiguration; + +mod v9 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V9HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V9HostConfiguration>)>, + OptionQuery, + >; +} + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + migrate_to_v10::() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV10"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(10), + "Storage version should be >= 10 after the migration" + ); + + Ok(()) + } +} + +pub type MigrateToV10 = frame_support::migrations::VersionedMigration< + 9, + 10, + VersionUncheckedMigrateToV10, + Pallet, + ::DbWeight, +>; + +// Unusual formatting is justified: +// - make it easier to verify that fields assign what they supposed to assign. +// - this code is transient and will be removed after all migrations are done. +// - this code is important enough to optimize for legibility sacrificing consistency. +#[rustfmt::skip] +fn translate(pre: V9HostConfiguration>) -> V10HostConfiguration> { + V10HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + on_demand_cores : pre.on_demand_cores, + on_demand_retries : pre.on_demand_retries, + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + scheduling_lookahead : pre.scheduling_lookahead, + max_validators_per_core : pre.max_validators_per_core, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_base_fee : pre.on_demand_base_fee, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_ttl : pre.on_demand_ttl, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : NodeFeatures::EMPTY + } +} + +fn migrate_to_v10() -> Weight { + let v9 = v9::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v10 = translate::(v9); + v10::ActiveConfig::::set(Some(v10)); + + // Allowed to be empty. + let pending_v9 = v9::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v9) in pending_v9.into_iter() { + let v10 = translate::(v9); + pending_v10.push((session, v10)); + } + v10::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use primitives::LEGACY_MIN_BACKING_VOTES; + + #[test] + fn v10_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c90180969800000000000000000000000000050000001400000004000000010000000101000000000600000064000000020000001900000000000000020000000200000002000000050000000200000000" + ]; + + let v10 = + V10HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v10.max_code_size, 3_145_728); + assert_eq!(v10.validation_upgrade_cooldown, 2); + assert_eq!(v10.max_pov_size, 5_242_880); + assert_eq!(v10.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v10.n_delay_tranches, 25); + assert_eq!(v10.minimum_validation_upgrade_delay, 5); + assert_eq!(v10.group_rotation_frequency, 20); + assert_eq!(v10.on_demand_cores, 0); + assert_eq!(v10.on_demand_base_fee, 10_000_000); + assert_eq!(v10.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v10.node_features, NodeFeatures::EMPTY); + } + + // Test that `migrate_to_v10`` correctly applies the `translate` function to current and pending + // configs. + #[test] + fn test_migrate_to_v10() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v9 = V9HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v9.clone())); + pending_configs.push((300, v9.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9.clone())); + v9::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v10::(); + + let v10 = translate::(v9); + let mut configs_to_check = v10::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v10::ActiveConfig::::get().unwrap())); + + for (_, config) in configs_to_check { + assert_eq!(config, v10); + assert_eq!(config.node_features, NodeFeatures::EMPTY); + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v10_no_pending() { + let v9 = V9HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9)); + // Ensure there're no pending configs. + v9::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v10::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index 64d71e628f4ead4ee207c6f74b24dd42684e0fca..ca4bbd9dacef3fb645c41a060798e0298a0efc27 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -23,14 +23,116 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{SessionIndex, LEGACY_MIN_BACKING_VOTES}; +use primitives::{ + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v8::V8HostConfiguration; -type V9HostConfiguration = configuration::HostConfiguration; +/// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, Decode, Debug)] +pub struct V9HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, +} + +impl> Default for V9HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + } + } +} mod v8 { use super::*; @@ -145,11 +247,11 @@ pvf_voting_ttl : pre.pvf_voting_ttl, minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, async_backing_params : pre.async_backing_params, executor_params : pre.executor_params, -on_demand_queue_max_size : 10_000u32, -on_demand_base_fee : 10_000_000u128, -on_demand_fee_variability : Perbill::from_percent(3), -on_demand_target_queue_utilization : Perbill::from_percent(25), -on_demand_ttl : 5u32.into(), +on_demand_queue_max_size : pre.on_demand_queue_max_size, +on_demand_base_fee : pre.on_demand_base_fee, +on_demand_fee_variability : pre.on_demand_fee_variability, +on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, +on_demand_ttl : pre.on_demand_ttl, minimum_backing_votes : LEGACY_MIN_BACKING_VOTES } }; diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index ea39628c95876a92b9072bd5adc18584e48df0b5..b62a45355e1dfb486baa3585242bcce1b26fdd12 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -16,6 +16,7 @@ use super::*; use crate::mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test}; +use bitvec::{bitvec, prelude::Lsb0}; use frame_support::{assert_err, assert_noop, assert_ok}; fn on_new_session(session_index: SessionIndex) -> (HostConfiguration, HostConfiguration) { @@ -318,6 +319,7 @@ fn setting_pending_config_members() { on_demand_target_queue_utilization: Perbill::from_percent(25), on_demand_ttl: 5u32, minimum_backing_votes: 5, + node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], }; Configuration::set_validation_upgrade_cooldown( @@ -473,6 +475,12 @@ fn setting_pending_config_members() { new_config.minimum_backing_votes, ) .unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 3, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, false).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 11, true).unwrap(); assert_eq!(PendingConfigs::::get(), vec![(shared::SESSION_DELAY, new_config)],); }) diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index 3ede1c908802558da7af09a7201d4e18a82f4dbc..f075ce5ca737d7597c4ed5d664050e08b9d28bf8 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -51,7 +51,7 @@ where use rand::{RngCore, SeedableRng}; let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Pallet::::bonded(validator).unwrap(); + let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); let keys = { const SESSION_KEY_LEN: usize = 32; diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index bc7491a2c61a9c14804061acbca9702aebb3da29..15147e9210e21c6ced17d5904e60b4a633f7e902 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -17,12 +17,12 @@ //! To prevent Out of Memory errors on the `DownwardMessageQueue`, an //! exponential fee factor (`DeliveryFeeFactor`) is set. The fee factor //! increments exponentially after the number of messages in the -//! `DownwardMessageQueue` pass a threshold. This threshold is set as: +//! `DownwardMessageQueue` passes a threshold. This threshold is set as: //! //! ```ignore //! // Maximum max sized messages that can be send to //! // the DownwardMessageQueue before it runs out of memory -//! max_messsages = MAX_POSSIBLE_ALLOCATION / max_downward_message_size +//! max_messages = MAX_POSSIBLE_ALLOCATION / max_downward_message_size //! threshold = max_messages / THRESHOLD_FACTOR //! ``` //! Based on the THRESHOLD_FACTOR, the threshold is set as a fraction of the @@ -144,7 +144,7 @@ pub mod pallet { FixedU128::from_u32(1) } - /// The number to multiply the base delivery fee by. + /// The factor to multiply the base delivery fee by. #[pallet::storage] pub(crate) type DeliveryFeeFactor = StorageMap<_, Twox64Concat, ParaId, FixedU128, ValueQuery, InitialFactor>; @@ -243,10 +243,9 @@ impl Pallet { let threshold = Self::dmq_max_length(config.max_downward_message_size).saturating_div(THRESHOLD_FACTOR); if q_len > (threshold as usize) { - let message_size_factor = - FixedU128::from_u32(serialized_len.saturating_div(1024) as u32) - .saturating_mul(MESSAGE_SIZE_FEE_BASE); - Self::increment_fee_factor(para, message_size_factor); + let message_size_factor = FixedU128::from((serialized_len / 1024) as u128) + .saturating_mul(MESSAGE_SIZE_FEE_BASE); + Self::increase_fee_factor(para, message_size_factor); } Ok(()) @@ -304,7 +303,7 @@ impl Pallet { let threshold = Self::dmq_max_length(config.max_downward_message_size).saturating_div(THRESHOLD_FACTOR); if q_len <= (threshold as usize) { - Self::decrement_fee_factor(para); + Self::decrease_fee_factor(para); } T::DbWeight::get().reads_writes(1, 1) } @@ -337,32 +336,26 @@ impl Pallet { ) -> Vec>> { DownwardMessageQueues::::get(&recipient) } +} - /// Raise the delivery fee factor by a multiplicative factor and stores the resulting value. - /// - /// Returns the new delivery fee factor after the increment. - pub(crate) fn increment_fee_factor(para: ParaId, message_size_factor: FixedU128) -> FixedU128 { - >::mutate(para, |f| { - *f = f.saturating_mul(EXPONENTIAL_FEE_BASE + message_size_factor); +impl FeeTracker for Pallet { + type Id = ParaId; + + fn get_fee_factor(id: Self::Id) -> FixedU128 { + DeliveryFeeFactor::::get(id) + } + + fn increase_fee_factor(id: Self::Id, message_size_factor: FixedU128) -> FixedU128 { + >::mutate(id, |f| { + *f = f.saturating_mul(EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor)); *f }) } - /// Reduce the delivery fee factor by a multiplicative factor and stores the resulting value. - /// - /// Does not reduce the fee factor below the initial value, which is currently set as 1. - /// - /// Returns the new delivery fee factor after the decrement. - pub(crate) fn decrement_fee_factor(para: ParaId) -> FixedU128 { - >::mutate(para, |f| { + fn decrease_fee_factor(id: Self::Id) -> FixedU128 { + >::mutate(id, |f| { *f = InitialFactor::get().max(*f / EXPONENTIAL_FEE_BASE); *f }) } } - -impl FeeTracker for Pallet { - fn get_fee_factor(para: ParaId) -> FixedU128 { - DeliveryFeeFactor::::get(para) - } -} diff --git a/polkadot/runtime/parachains/src/dmp/tests.rs b/polkadot/runtime/parachains/src/dmp/tests.rs index a65984840da585ec524120fb4fb95111734e4eff..f9197b156a1e1d901d55344d6b3439f39d2afbf8 100644 --- a/polkadot/runtime/parachains/src/dmp/tests.rs +++ b/polkadot/runtime/parachains/src/dmp/tests.rs @@ -233,7 +233,7 @@ fn verify_dmq_mqc_head_is_externally_accessible() { } #[test] -fn verify_fee_increment_and_decrement() { +fn verify_fee_increase_and_decrease() { let a = ParaId::from(123); let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 16777216; diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index b3bbcb433c0b2da20a77fe0243292e9c80a37cd2..42592d9d9f1496c882e0dd2d98e53cf06fcefcc1 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -554,14 +554,26 @@ pub mod pallet { /// /// Origin must be the `ChannelManager`. #[pallet::call_index(3)] - #[pallet::weight(::WeightInfo::force_clean_hrmp(*_inbound, *_outbound))] + #[pallet::weight(::WeightInfo::force_clean_hrmp(*num_inbound, *num_outbound))] pub fn force_clean_hrmp( origin: OriginFor, para: ParaId, - _inbound: u32, - _outbound: u32, + num_inbound: u32, + num_outbound: u32, ) -> DispatchResult { T::ChannelManager::ensure_origin(origin)?; + + ensure!( + HrmpIngressChannelsIndex::::decode_len(para).unwrap_or_default() <= + num_inbound as usize, + Error::::WrongWitness + ); + ensure!( + HrmpEgressChannelsIndex::::decode_len(para).unwrap_or_default() <= + num_outbound as usize, + Error::::WrongWitness + ); + Self::clean_hrmp_after_outgoing(¶); Ok(()) } @@ -575,9 +587,16 @@ pub mod pallet { /// /// Origin must be the `ChannelManager`. #[pallet::call_index(4)] - #[pallet::weight(::WeightInfo::force_process_hrmp_open(*_channels))] - pub fn force_process_hrmp_open(origin: OriginFor, _channels: u32) -> DispatchResult { + #[pallet::weight(::WeightInfo::force_process_hrmp_open(*channels))] + pub fn force_process_hrmp_open(origin: OriginFor, channels: u32) -> DispatchResult { T::ChannelManager::ensure_origin(origin)?; + + ensure!( + HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32 <= + channels, + Error::::WrongWitness + ); + let host_config = configuration::Pallet::::config(); Self::process_hrmp_open_channel_requests(&host_config); Ok(()) @@ -592,9 +611,16 @@ pub mod pallet { /// /// Origin must be the `ChannelManager`. #[pallet::call_index(5)] - #[pallet::weight(::WeightInfo::force_process_hrmp_close(*_channels))] - pub fn force_process_hrmp_close(origin: OriginFor, _channels: u32) -> DispatchResult { + #[pallet::weight(::WeightInfo::force_process_hrmp_close(*channels))] + pub fn force_process_hrmp_close(origin: OriginFor, channels: u32) -> DispatchResult { T::ChannelManager::ensure_origin(origin)?; + + ensure!( + HrmpCloseChannelRequestsList::::decode_len().unwrap_or_default() as u32 <= + channels, + Error::::WrongWitness + ); + Self::process_hrmp_close_channel_requests(); Ok(()) } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index bb16c804150dc7ca35aecaa2ee5b6c16cff98e28..90af9cde00a8faaec13101b490ea3ba92f4a827c 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -21,7 +21,8 @@ use crate::{ configuration::{self, HostConfiguration}, - disputes, dmp, hrmp, paras, + disputes, dmp, hrmp, + paras::{self, SetGoAhead}, scheduler::{self, AvailabilityTimeoutStatus}, shared::{self, AllowedRelayParentsTracker}, }; @@ -29,7 +30,7 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ defensive, pallet_prelude::*, - traits::{Defensive, EnqueueMessage}, + traits::{Defensive, EnqueueMessage, Footprint, QueueFootprint}, BoundedSlice, }; use frame_system::pallet_prelude::*; @@ -230,7 +231,7 @@ pub enum AggregateMessageOrigin { /// Identifies a UMP queue inside the `MessageQueue` pallet. /// -/// It is written in verbose form since future variants like `Loopback` and `Bridged` are already +/// It is written in verbose form since future variants like `Here` and `Bridged` are already /// forseeable. #[derive(Encode, Decode, Clone, MaxEncodedLen, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum UmpQueueId { @@ -448,8 +449,9 @@ impl fmt::Debug for UmpAcceptanceCheckErr { "the ump queue would have grown past the max size permitted by config ({} > {})", total_size, limit, ), - UmpAcceptanceCheckErr::IsOffboarding => - write!(fmt, "upward message rejected because the para is off-boarding",), + UmpAcceptanceCheckErr::IsOffboarding => { + write!(fmt, "upward message rejected because the para is off-boarding") + }, } } } @@ -885,6 +887,7 @@ impl Pallet { new_code, now, &config, + SetGoAhead::Yes, )); } @@ -922,7 +925,7 @@ impl Pallet { pub(crate) fn relay_dispatch_queue_size(para_id: ParaId) -> (u32, u32) { let fp = T::MessageQueue::footprint(AggregateMessageOrigin::Ump(UmpQueueId::Para(para_id))); - (fp.count as u32, fp.size as u32) + (fp.storage.count as u32, fp.storage.size as u32) } /// Check that all the upward messages sent by a candidate pass the acceptance criteria. @@ -1146,10 +1149,11 @@ impl AcceptanceCheckErr { impl OnQueueChanged for Pallet { // Write back the remaining queue capacity into `relay_dispatch_queue_remaining_capacity`. - fn on_queue_changed(origin: AggregateMessageOrigin, count: u64, size: u64) { + fn on_queue_changed(origin: AggregateMessageOrigin, fp: QueueFootprint) { let para = match origin { AggregateMessageOrigin::Ump(UmpQueueId::Para(p)) => p, }; + let QueueFootprint { storage: Footprint { count, size }, .. } = fp; let (count, size) = (count.saturated_into(), size.saturated_into()); // TODO paritytech/polkadot#6283: Remove all usages of `relay_dispatch_queue_size` #[allow(deprecated)] @@ -1326,6 +1330,6 @@ impl QueueFootprinter for Pallet { type Origin = UmpQueueId; fn message_count(origin: Self::Origin) -> u64 { - T::MessageQueue::footprint(AggregateMessageOrigin::Ump(origin)).count + T::MessageQueue::footprint(AggregateMessageOrigin::Ump(origin)).storage.count } } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 7677108d73decf816d62b8ea5491e058dcfac126..6bb731671f6f8afbb350dff2a2821f6ab31da5a0 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1255,7 +1255,13 @@ fn candidate_checks() { let cfg = Configuration::config(); let expected_at = 10 + cfg.validation_upgrade_delay; assert_eq!(expected_at, 12); - Paras::schedule_code_upgrade(chain_a, vec![1, 2, 3, 4].into(), expected_at, &cfg); + Paras::schedule_code_upgrade( + chain_a, + vec![1, 2, 3, 4].into(), + expected_at, + &cfg, + SetGoAhead::Yes, + ); } assert_noop!( @@ -2235,7 +2241,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let cause = &active_vote_state.causes()[0]; // Upgrade block is the block of inclusion, not candidate's parent. assert_matches!(cause, - paras::PvfCheckCause::Upgrade { id, included_at } + paras::PvfCheckCause::Upgrade { id, included_at, set_go_ahead: SetGoAhead::Yes } if id == &chain_a && included_at == &7 ); }); diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 64365f17b7e8b83ce0aef13c00ccdbd1a8b5f744..2509edbee3cbe00420bf734fe0df920d122460cc 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -53,13 +53,27 @@ mod mock; mod ump_tests; pub use origin::{ensure_parachain, Origin}; -pub use paras::ParaLifecycle; +pub use paras::{ParaLifecycle, SetGoAhead}; use primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_runtime::{DispatchResult, FixedU128}; /// Trait for tracking message delivery fees on a transport protocol. pub trait FeeTracker { - fn get_fee_factor(para: ParaId) -> FixedU128; + /// Type used for assigning different fee factors to different destinations + type Id; + /// Returns the evolving exponential fee factor which will be used to calculate the delivery + /// fees. + fn get_fee_factor(id: Self::Id) -> FixedU128; + /// Increases the delivery fee factor by a factor based on message size and records the result. + /// + /// Returns the new delivery fee factor after the increase. + fn increase_fee_factor(id: Self::Id, message_size_factor: FixedU128) -> FixedU128; + /// Decreases the delivery fee factor by a constant factor and records the result. + /// + /// Does not reduce the fee factor below the initial value, which is currently set as 1. + /// + /// Returns the new delivery fee factor after the decrease. + fn decrease_fee_factor(id: Self::Id) -> FixedU128; } /// Schedule a para to be initialized at the start of the next session with the given genesis data. @@ -89,8 +103,9 @@ pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), pub fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, + set_go_ahead: SetGoAhead, ) -> DispatchResult { - paras::Pallet::::schedule_code_upgrade_external(id, new_code) + paras::Pallet::::schedule_code_upgrade_external(id, new_code, set_go_ahead) } /// Sets the current parachain head with the given id. diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index ded7de08e4fa95db7fd9ba7ae172514d38a86129..9df54bf29d3ebbc04fcd21b8f76a3a0c73781c37 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -135,6 +135,7 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 5c060547601f5869a671856b3bf3ceebf49ee812..554f0c15af24ff632e6e5ae9da34baf43aa0975e 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -124,7 +124,13 @@ benchmarks! { let expired = frame_system::Pallet::::block_number().saturating_sub(One::one()); let config = HostConfiguration::>::default(); generate_disordered_pruning::(); - Pallet::::schedule_code_upgrade(para_id, ValidationCode(vec![0]), expired, &config); + Pallet::::schedule_code_upgrade( + para_id, + ValidationCode(vec![0]), + expired, + &config, + SetGoAhead::Yes, + ); }: _(RawOrigin::Root, para_id, new_head) verify { assert_last_event::(Event::NewHeadNoted(para_id).into()); diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index ab5e13124436d076018b6e30dc2cc8a13162d5a1..05c4c9c37b4d9241a64539c13a1960f93c80d7c1 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -177,6 +177,7 @@ where validation_code, /* relay_parent_number */ 1u32.into(), &configuration::Pallet::::config(), + SetGoAhead::Yes, ); } else { let r = Pallet::::schedule_para_initialize( diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 2f370b5bfe472a3994a4aabf6df9e8245d7b4f88..ef9dfedd735078a61849b9543d51b4721bb494c6 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -386,9 +386,18 @@ pub(crate) enum PvfCheckCause { /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, + /// Whether or not the given para should be sent the `GoAhead` signal. + set_go_ahead: SetGoAhead, }, } +/// Should the `GoAhead` signal be set after a successful check of the new wasm binary? +#[derive(Debug, Copy, Clone, PartialEq, TypeInfo, Decode, Encode)] +pub enum SetGoAhead { + Yes, + No, +} + impl PvfCheckCause { /// Returns the ID of the para that initiated or subscribed to the pre-checking vote. fn para_id(&self) -> ParaId { @@ -888,7 +897,13 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; let config = configuration::Pallet::::config(); - Self::schedule_code_upgrade(para, new_code, relay_parent_number, &config); + Self::schedule_code_upgrade( + para, + new_code, + relay_parent_number, + &config, + SetGoAhead::No, + ); Self::deposit_event(Event::CodeUpgradeScheduled(para)); Ok(()) } @@ -1186,6 +1201,7 @@ impl Pallet { pub(crate) fn schedule_code_upgrade_external( id: ParaId, new_code: ValidationCode, + set_go_ahead: SetGoAhead, ) -> DispatchResult { // Check that we can schedule an upgrade at all. ensure!(Self::can_upgrade_validation_code(id), Error::::CannotUpgradeCode); @@ -1193,7 +1209,7 @@ impl Pallet { let current_block = frame_system::Pallet::::block_number(); // Schedule the upgrade with a delay just like if a parachain triggered the upgrade. let upgrade_block = current_block.saturating_add(config.validation_upgrade_delay); - Self::schedule_code_upgrade(id, new_code, upgrade_block, &config); + Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, set_go_ahead); Self::deposit_event(Event::CodeUpgradeScheduled(id)); Ok(()) } @@ -1534,8 +1550,15 @@ impl Pallet { PvfCheckCause::Onboarding(id) => { weight += Self::proceed_with_onboarding(*id, sessions_observed); }, - PvfCheckCause::Upgrade { id, included_at } => { - weight += Self::proceed_with_upgrade(*id, code_hash, now, *included_at, cfg); + PvfCheckCause::Upgrade { id, included_at, set_go_ahead } => { + weight += Self::proceed_with_upgrade( + *id, + code_hash, + now, + *included_at, + cfg, + *set_go_ahead, + ); }, } } @@ -1568,6 +1591,7 @@ impl Pallet { now: BlockNumberFor, relay_parent_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, + set_go_ahead: SetGoAhead, ) -> Weight { let mut weight = Weight::zero(); @@ -1591,12 +1615,15 @@ impl Pallet { weight += T::DbWeight::get().reads_writes(1, 4); FutureCodeUpgrades::::insert(&id, expected_at); - UpcomingUpgrades::::mutate(|upcoming_upgrades| { - let insert_idx = upcoming_upgrades - .binary_search_by_key(&expected_at, |&(_, b)| b) - .unwrap_or_else(|idx| idx); - upcoming_upgrades.insert(insert_idx, (id, expected_at)); - }); + // Only set an upcoming upgrade if `GoAhead` signal should be set for the respective para. + if set_go_ahead == SetGoAhead::Yes { + UpcomingUpgrades::::mutate(|upcoming_upgrades| { + let insert_idx = upcoming_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + upcoming_upgrades.insert(insert_idx, (id, expected_at)); + }); + } let expected_at = expected_at.saturated_into(); let log = ConsensusLog::ParaScheduleUpgradeCode(id, *code_hash, expected_at); @@ -1835,6 +1862,7 @@ impl Pallet { new_code: ValidationCode, inclusion_block_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, + set_go_ahead: SetGoAhead, ) -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -1884,7 +1912,7 @@ impl Pallet { }); weight += Self::kick_off_pvf_check( - PvfCheckCause::Upgrade { id, included_at: inclusion_block_number }, + PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, set_go_ahead }, code_hash, new_code, cfg, @@ -2036,7 +2064,7 @@ impl Pallet { } /// Submits a given PVF check statement with corresponding signature as an unsigned transaction - /// into the memory pool. Ultimately, that disseminates the transaction accross the network. + /// into the memory pool. Ultimately, that disseminates the transaction across the network. /// /// This function expects an offchain context and cannot be callable from the on-chain logic. /// diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index c511c65d473304b230897b036c73d63daca52024..cca200c2765e361120b4c5ea8db3dfc7c82bdc13 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -436,7 +436,13 @@ fn code_upgrade_applied_after_delay() { // this parablock is in the context of block 1. let expected_at = 1 + validation_upgrade_delay; let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; - Paras::schedule_code_upgrade(para_id, new_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -504,6 +510,127 @@ fn code_upgrade_applied_after_delay() { }); } +#[test] +fn code_upgrade_applied_without_setting_go_ahead_signal() { + let code_retention_period = 10; + let validation_upgrade_delay = 5; + let validation_upgrade_cooldown = 10; + + let original_code = ValidationCode(vec![1, 2, 3]); + let paras = vec![( + 0u32.into(), + ParaGenesisArgs { + para_kind: ParaKind::Parachain, + genesis_head: dummy_head_data(), + validation_code: original_code.clone(), + }, + )]; + + let genesis_config = MockGenesisConfig { + paras: GenesisConfig { paras, ..Default::default() }, + configuration: crate::configuration::GenesisConfig { + config: HostConfiguration { + code_retention_period, + validation_upgrade_delay, + validation_upgrade_cooldown, + ..Default::default() + }, + }, + ..Default::default() + }; + + new_test_ext(genesis_config).execute_with(|| { + check_code_is_stored(&original_code); + + let para_id = ParaId::from(0); + let new_code = ValidationCode(vec![4, 5, 6]); + + // Wait for at least one session change to set active validators. + const EXPECTED_SESSION: SessionIndex = 1; + run_to_block(2, Some(vec![1])); + assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); + + let (expected_at, next_possible_upgrade_at) = { + // this parablock is in the context of block 1. + let expected_at = 1 + validation_upgrade_delay; + let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; + // `set_go_ahead` parameter set to `false` which prevents signaling the parachain + // with the `GoAhead` signal. + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::No, + ); + // Include votes for super-majority. + submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); + + Paras::note_new_head(para_id, Default::default(), 1); + + assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); + assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); + assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); + assert_eq!(UpcomingUpgrades::::get(), vec![]); + assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); + assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); + + (expected_at, next_possible_upgrade_at) + }; + + run_to_block(expected_at, None); + + // the candidate is in the context of the parent of `expected_at`, + // thus does not trigger the code upgrade. However, now the `UpgradeGoAheadSignal` + // should not be set. + { + Paras::note_new_head(para_id, Default::default(), expected_at - 1); + + assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); + assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); + assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); + assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); + assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); + } + + run_to_block(expected_at + 1, None); + + // the candidate is in the context of `expected_at`, and triggers + // the upgrade. + { + Paras::note_new_head(para_id, Default::default(), expected_at); + + assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); + assert_eq!( + PastCodeHash::::get(&(para_id, expected_at)), + Some(original_code.hash()), + ); + assert!(FutureCodeUpgrades::::get(¶_id).is_none()); + assert!(FutureCodeHash::::get(¶_id).is_none()); + assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); + assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); + assert_eq!( + UpgradeRestrictionSignal::::get(¶_id), + Some(UpgradeRestriction::Present), + ); + assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); + } + + run_to_block(next_possible_upgrade_at + 1, None); + + { + assert!(UpgradeRestrictionSignal::::get(¶_id).is_none()); + assert!(UpgradeCooldowns::::get().is_empty()); + } + }); +} + #[test] fn code_upgrade_applied_after_delay_even_when_late() { let code_retention_period = 10; @@ -546,7 +673,13 @@ fn code_upgrade_applied_after_delay_even_when_late() { // this parablock is in the context of block 1. let expected_at = 1 + validation_upgrade_delay; let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; - Paras::schedule_code_upgrade(para_id, new_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -624,7 +757,13 @@ fn submit_code_change_when_not_allowed_is_err() { const EXPECTED_SESSION: SessionIndex = 1; run_to_block(1, Some(vec![1])); - Paras::schedule_code_upgrade(para_id, new_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -636,7 +775,13 @@ fn submit_code_change_when_not_allowed_is_err() { // ignore it. Note that this is only true from perspective of this module. run_to_block(2, None); assert!(!Paras::can_upgrade_validation_code(para_id)); - Paras::schedule_code_upgrade(para_id, newer_code.clone(), 2, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + newer_code.clone(), + 2, + &Configuration::config(), + SetGoAhead::Yes, + ); assert_eq!( FutureCodeUpgrades::::get(¶_id), Some(1 + validation_upgrade_delay), /* did not change since the same assertion from @@ -694,7 +839,13 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { const EXPECTED_SESSION: SessionIndex = 1; run_to_block(1, Some(vec![1])); - Paras::schedule_code_upgrade(para_id, new_code.clone(), 0, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 0, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -713,7 +864,13 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { assert!(!Paras::can_upgrade_validation_code(para_id)); // And scheduling another upgrade does not do anything. `expected_at` is still the same. - Paras::schedule_code_upgrade(para_id, newer_code.clone(), 30, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + newer_code.clone(), + 30, + &Configuration::config(), + SetGoAhead::Yes, + ); assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(0 + validation_upgrade_delay)); }); } @@ -765,7 +922,13 @@ fn full_parachain_cleanup_storage() { let expected_at = { // this parablock is in the context of block 1. let expected_at = 1 + validation_upgrade_delay; - Paras::schedule_code_upgrade(para_id, new_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -860,6 +1023,7 @@ fn cannot_offboard_ongoing_pvf_check() { new_code.clone(), RELAY_PARENT, &Configuration::config(), + SetGoAhead::Yes, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1012,7 +1176,13 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { let para_id = ParaId::from(0); let old_code: ValidationCode = vec![1, 2, 3].into(); let new_code: ValidationCode = vec![4, 5, 6].into(); - Paras::schedule_code_upgrade(para_id, new_code.clone(), 0, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 0, + &Configuration::config(), + SetGoAhead::Yes, + ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1120,6 +1290,7 @@ fn pvf_check_coalescing_onboarding_and_upgrade() { validation_code.clone(), RELAY_PARENT, &Configuration::config(), + SetGoAhead::Yes, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1224,7 +1395,13 @@ fn pvf_check_upgrade_reject() { // Expected current session index. const EXPECTED_SESSION: SessionIndex = 1; - Paras::schedule_code_upgrade(a, new_code.clone(), RELAY_PARENT, &Configuration::config()); + Paras::schedule_code_upgrade( + a, + new_code.clone(), + RELAY_PARENT, + &Configuration::config(), + SetGoAhead::Yes, + ); check_code_is_stored(&new_code); // 1/3 of validators vote against `new_code`. PVF should not be rejected yet. @@ -1404,7 +1581,13 @@ fn include_pvf_check_statement_refunds_weight() { // Expected current session index. const EXPECTED_SESSION: SessionIndex = 1; - Paras::schedule_code_upgrade(a, new_code.clone(), RELAY_PARENT, &Configuration::config()); + Paras::schedule_code_upgrade( + a, + new_code.clone(), + RELAY_PARENT, + &Configuration::config(), + SetGoAhead::Yes, + ); let mut stmts = IntoIterator::into_iter([0, 1, 2, 3]) .map(|i| { @@ -1499,7 +1682,13 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { // Then we add a user to the code, say by upgrading. run_to_block(2, None); - Paras::schedule_code_upgrade(para_id, validation_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + validation_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); Paras::note_new_head(para_id, HeadData::default(), 1); // Finally we poke the code, which should not remove it from the storage. @@ -1564,7 +1753,13 @@ fn add_trusted_validation_code_insta_approval() { // Then some parachain upgrades it's code with the relay-parent 1. run_to_block(2, None); - Paras::schedule_code_upgrade(para_id, validation_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + validation_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); Paras::note_new_head(para_id, HeadData::default(), 1); // Verify that the code upgrade has `expected_at` set to `26`. @@ -1600,7 +1795,13 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { new_test_ext(genesis_config).execute_with(|| { // First, some parachain upgrades it's code with the relay-parent 1. run_to_block(2, None); - Paras::schedule_code_upgrade(para_id, validation_code.clone(), 1, &Configuration::config()); + Paras::schedule_code_upgrade( + para_id, + validation_code.clone(), + 1, + &Configuration::config(), + SetGoAhead::Yes, + ); Paras::note_new_head(para_id, HeadData::default(), 1); // No upgrade should be scheduled at this point. PVF pre-checking vote should run for diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 7c70fcea1943415e9882bacb75e9dfc39b23b90d..4fc60792e34683d223873a6e0ee39d70d380709e 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1205,9 +1205,17 @@ mod sanitizers { } } - #[test] - fn candidates() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + mod candidates { + use super::*; + + // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing + struct TestData { + backed_candidates: Vec, + scheduled_paras: BTreeMap, + } + + // Generate test data for the candidates test + fn get_test_data() -> TestData { const RELAY_PARENT_NUM: u32 = 3; let header = default_header(); @@ -1233,9 +1241,6 @@ mod sanitizers { .unwrap(); } - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - let scheduled = (0_usize..2) .into_iter() .map(|idx| (ParaId::from(1_u32 + idx as u32), CoreIndex::from(idx as u32))) @@ -1278,29 +1283,54 @@ mod sanitizers { }) .collect::>(); - // happy path - assert_eq!( - sanitize_backed_candidates::( - backed_candidates.clone(), - has_concluded_invalid, - &scheduled - ), - backed_candidates - ); + TestData { backed_candidates, scheduled_paras: scheduled } + } - // nothing is scheduled, so no paraids match, thus all backed candidates are skipped - { + #[test] + fn happy_path() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + + let has_concluded_invalid = + |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + has_concluded_invalid, + &scheduled + ), + backed_candidates + ); + + {} + }); + } + + // nothing is scheduled, so no paraids match, thus all backed candidates are skipped + #[test] + fn nothing_scheduled() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { backed_candidates, scheduled_paras: _ } = get_test_data(); let scheduled = &BTreeMap::new(); + let has_concluded_invalid = + |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + assert!(sanitize_backed_candidates::( backed_candidates.clone(), has_concluded_invalid, &scheduled ) .is_empty()); - } + }); + } + + // candidates that have concluded as invalid are filtered out + #[test] + fn invalid_are_filtered_out() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); - // candidates that have concluded as invalid are filtered out - { // mark every second one as concluded invalid let set = { let mut set = std::collections::HashSet::new(); @@ -1322,7 +1352,7 @@ mod sanitizers { .len(), backed_candidates.len() / 2 ); - } - }); + }); + } } } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 35d92f71084fe64f1890f29d1ee840606e944beb..4d0bbc6a8960fc1c1d3e70abff2e0175e7898dac 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -370,15 +370,7 @@ pub fn get_session_disputes( pub fn session_executor_params( session_index: SessionIndex, ) -> Option { - // This is to bootstrap the storage working around the runtime migration issue: - // https://github.com/paritytech/substrate/issues/9997 - // After the bootstrap is complete (no less than 7 session passed with the runtime) - // this code should be replaced with a pure - // >::session_executor_params(session_index) call. - match >::session_executor_params(session_index) { - Some(ep) => Some(ep), - None => Some(ExecutorParams::default()), - } + >::session_executor_params(session_index) } /// Implementation of `unapplied_slashes` runtime API diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index d01b543630c31c80747d2d1e80480b931ae86886..200fd57915f9b8ac03fd8241e6b2e69d4194bad0 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -15,3 +15,35 @@ // along with Polkadot. If not, see . //! Put implementations of functions from staging APIs here. + +use crate::{configuration, initializer, shared}; +use primitives::{vstaging::NodeFeatures, ValidatorIndex}; +use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; + +/// Implementation for `DisabledValidators` +// CAVEAT: this should only be called on the node side +// as it might produce incorrect results on session boundaries +pub fn disabled_validators() -> Vec +where + T: pallet_session::Config + shared::Config, +{ + let shuffled_indices = >::active_validator_indices(); + // mapping from raw validator index to `ValidatorIndex` + // this computation is the same within a session, but should be cheap + let reverse_index = shuffled_indices + .iter() + .enumerate() + .map(|(i, v)| (v.0, ValidatorIndex(i as u32))) + .collect::>(); + + // we might have disabled validators who are not parachain validators + >::disabled_validators() + .iter() + .filter_map(|v| reverse_index.get(v).cloned()) + .collect() +} + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 60b2a9254600e56b40ca2027db06a98a7711f246..b81b68b5745ee57ce0736426abda5081aeb619f2 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -352,7 +352,6 @@ impl Pallet { None }, Ok((pos_in_claimqueue, pe)) => { - // is this correct? availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); Some((*core_idx, pos_in_claimqueue)) @@ -605,14 +604,10 @@ impl Pallet { /// Moves all elements in the claimqueue forward. fn move_claimqueue_forward() { let mut cq = ClaimQueue::::get(); - for (_, core_queue) in cq.iter_mut() { + for core_queue in cq.values_mut() { // First pop the finished claims from the front. - match core_queue.front() { - None => {}, - Some(None) => { - core_queue.pop_front(); - }, - Some(_) => {}, + if let Some(None) = core_queue.front() { + core_queue.pop_front(); } } @@ -628,10 +623,11 @@ impl Pallet { // This can only happen on new sessions at which we move all assignments back to the // provider. Hence, there's nothing we need to do here. - if ValidatorGroups::::get().is_empty() { + if ValidatorGroups::::decode_len().map_or(true, |l| l == 0) { return } - let n_lookahead = Self::claimqueue_lookahead(); + // If there exists a core, ensure we schedule at least one job onto it. + let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); let ttl = >::config().on_demand_ttl; @@ -686,8 +682,7 @@ impl Pallet { fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntry>) { ClaimQueue::::mutate(|la| { - let la_deque = la.entry(core_idx).or_insert_with(|| VecDeque::new()); - la_deque.push_back(Some(pe)); + la.entry(core_idx).or_default().push_back(Some(pe)); }); } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index accff7016ed1e37e585b8433b5cdfd37378e4fe3..bb9a647e955ca7e6bf7b7e7a78abd41e084550d0 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -18,42 +18,52 @@ use super::*; use frame_support::{ - pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, + traits::OnRuntimeUpgrade, weights::Weight, }; mod v0 { use super::*; - use primitives::CollatorId; + use primitives::{CollatorId, Id}; + #[storage_alias] pub(super) type Scheduled = StorageValue, Vec, ValueQuery>; - #[derive(Encode, Decode)] - pub struct QueuedParathread { - claim: primitives::ParathreadEntry, - core_offset: u32, - } + #[derive(Clone, Encode, Decode)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct ParathreadClaim(pub Id, pub CollatorId); - #[derive(Encode, Decode, Default)] - pub struct ParathreadClaimQueue { - queue: Vec, - next_core_offset: u32, + #[derive(Clone, Encode, Decode)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct ParathreadEntry { + /// The claim. + pub claim: ParathreadClaim, + /// Number of retries. + pub retries: u32, } - // Only here to facilitate the migration. - impl ParathreadClaimQueue { - pub fn len(self) -> usize { - self.queue.len() - } + /// What is occupying a specific availability core. + #[derive(Clone, Encode, Decode)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum CoreOccupied { + /// A parathread. + Parathread(ParathreadEntry), + /// A parachain. + Parachain, } + /// The actual type isn't important, as we only delete the key in the state. #[storage_alias] - pub(super) type ParathreadQueue = - StorageValue, ParathreadClaimQueue, ValueQuery>; + pub(crate) type AvailabilityCores = + StorageValue, Vec>, ValueQuery>; + /// The actual type isn't important, as we only delete the key in the state. #[storage_alias] - pub(super) type ParathreadClaimIndex = - StorageValue, Vec, ValueQuery>; + pub(super) type ParathreadQueue = StorageValue, (), ValueQuery>; + + #[storage_alias] + pub(super) type ParathreadClaimIndex = StorageValue, (), ValueQuery>; /// The assignment type. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] @@ -83,53 +93,60 @@ mod v0 { pub mod v1 { use super::*; use crate::scheduler; - use frame_support::traits::StorageVersion; - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { + #[allow(deprecated)] + pub type MigrateToV1 = VersionedMigration< + 0, + 1, + UncheckedMigrateToV1, + Pallet, + ::DbWeight, + >; + + #[deprecated(note = "Use MigrateToV1 instead")] + pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + #[allow(deprecated)] + impl OnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { - if StorageVersion::get::>() == 0 { - let weight_consumed = migrate_to_v1::(); + let weight_consumed = migrate_to_v1::(); - log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v1"); - StorageVersion::new(1).put::>(); + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v1"); - weight_consumed - } else { - log::warn!(target: scheduler::LOG_TARGET, "Para scheduler v1 migration should be removed."); - T::DbWeight::get().reads(1) - } + weight_consumed } #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::DispatchError> { - log::trace!( + let n: u32 = v0::Scheduled::::get().len() as u32 + + v0::AvailabilityCores::::get().iter().filter(|c| c.is_some()).count() as u32; + + log::info!( target: crate::scheduler::LOG_TARGET, - "Scheduled before migration: {}", - v0::Scheduled::::get().len() + "Number of scheduled and waiting for availability before: {n}", ); - let bytes = u32::to_be_bytes(v0::Scheduled::::get().len() as u32); - - Ok(bytes.to_vec()) + Ok(n.encode()) } #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { - log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); - ensure!( - StorageVersion::get::>() >= 1, - "Storage version should be at least `1` after the migration" - ); + log::info!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + ensure!( - v0::Scheduled::::get().len() == 0, + v0::Scheduled::::get().is_empty(), "Scheduled should be empty after the migration" ); - let sched_len = u32::from_be_bytes(state.try_into().unwrap()); + let expected_len = u32::decode(&mut &state[..]).unwrap(); + let availability_cores_waiting = super::AvailabilityCores::::get() + .iter() + .filter(|c| !matches!(c, CoreOccupied::Free)) + .count(); + ensure!( - Pallet::::claimqueue_len() as u32 == sched_len, - "Scheduled completely moved to ClaimQueue after migration" + Pallet::::claimqueue_len() as u32 + availability_cores_waiting as u32 == + expected_len, + "ClaimQueue and AvailabilityCores should have the correct length", ); Ok(()) @@ -140,11 +157,8 @@ pub mod v1 { pub fn migrate_to_v1() -> Weight { let mut weight: Weight = Weight::zero(); - let pq = v0::ParathreadQueue::::take(); - let pq_len = pq.len() as u64; - - let pci = v0::ParathreadClaimIndex::::take(); - let pci_len = pci.len() as u64; + v0::ParathreadQueue::::kill(); + v0::ParathreadClaimIndex::::kill(); let now = >::block_number(); let scheduled = v0::Scheduled::::take(); @@ -156,10 +170,35 @@ pub fn migrate_to_v1() -> Weight { Pallet::::add_to_claimqueue(core_idx, pe); } + let parachains = paras::Pallet::::parachains(); + let availability_cores = v0::AvailabilityCores::::take(); + let mut new_availability_cores = Vec::new(); + + for (core_index, core) in availability_cores.into_iter().enumerate() { + let new_core = if let Some(core) = core { + match core { + v0::CoreOccupied::Parachain => CoreOccupied::Paras(ParasEntry::new( + Assignment::new(parachains[core_index]), + now, + )), + v0::CoreOccupied::Parathread(entry) => + CoreOccupied::Paras(ParasEntry::new(Assignment::new(entry.claim.0), now)), + } + } else { + CoreOccupied::Free + }; + + new_availability_cores.push(new_core); + } + + super::AvailabilityCores::::set(new_availability_cores); + // 2x as once for Scheduled and once for Claimqueue weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); - weight = weight.saturating_add(T::DbWeight::get().reads_writes(pq_len, pq_len)); - weight = weight.saturating_add(T::DbWeight::get().reads_writes(pci_len, pci_len)); + // reading parachains + availability_cores, writing AvailabilityCores + weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); + // 2x kill + weight = weight.saturating_add(T::DbWeight::get().writes(2)); weight } diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index 0043851be0f22ef50cabb1b5f9f9fd51e9b5e188..9e1b3d05842fd7b35f87de5c297958f5f3f36bf3 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -17,8 +17,9 @@ //! The session info pallet provides information about validator sets //! from prior sessions needed for approvals and disputes. //! -//! See . - +//! See the documentation on [session info][session-info-page] in the implementers' guide. +//! +//! [session-info-page]: https://paritytech.github.io/polkadot-sdk/book/runtime/session_info.html use crate::{ configuration, paras, scheduler, shared, util::{take_active_subset, take_active_subset_and_inactive}, diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index e96b5e9689272a40d7977a43b5d3ed961a3926f1..def608882050ab57a4a95fe8c5b14a4852fbebb4 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -386,7 +386,7 @@ fn relay_dispatch_queue_size_is_updated() { MessageQueue::service_queues(Weight::from_all(u64::MAX)); let fp = MessageQueue::footprint(AggregateMessageOrigin::Ump(para)); - let (para_queue_count, para_queue_size) = (fp.count, fp.size); + let (para_queue_count, para_queue_size) = (fp.storage.count, fp.storage.size); assert_eq!(para_queue_count, 1, "count wrong for para: {}", p); assert_eq!(para_queue_size, 8, "size wrong for para: {}", p); } @@ -400,7 +400,7 @@ fn relay_dispatch_queue_size_is_updated() { assert_queue_remaining(p.into(), cfg.max_upward_queue_count, cfg.max_upward_queue_size); let fp = MessageQueue::footprint(AggregateMessageOrigin::Ump(para)); - let (para_queue_count, para_queue_size) = (fp.count, fp.size); + let (para_queue_count, para_queue_size) = (fp.storage.count, fp.storage.size); assert_eq!(para_queue_count, 0, "count wrong for para: {}", p); assert_eq!(para_queue_size, 0, "size wrong for para: {}", p); } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 0b8a8624bb673e7242f3b78066b7fbcaecc60c93..9693d351cf407afb326c4fa7583c13224c8364eb 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -2,13 +2,14 @@ name = "rococo-runtime" build = "build.rs" version = "1.0.0" +description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } serde = { version = "1.0.188", default-features = false } serde_derive = { version = "1.0.117", optional = true } @@ -85,6 +86,7 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } @@ -106,7 +108,7 @@ keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyrin remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" -serde_json = "1.0.107" +serde_json = "1.0.108" sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } tokio = { version = "1.24.2", features = ["macros"] } @@ -160,6 +162,7 @@ std = [ "pallet-ranked-collective/std", "pallet-recovery/std", "pallet-referenda/std", + "pallet-root-testing/std", "pallet-scheduler/std", "pallet-session/std", "pallet-society/std", @@ -289,6 +292,7 @@ try-runtime = [ "pallet-ranked-collective/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", + "pallet-root-testing/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -309,11 +313,11 @@ try-runtime = [ ] # Set timing constants (e.g. session period) to faster versions to speed up testing. -fast-runtime = [] +fast-runtime = [ "rococo-runtime-constants/fast-runtime" ] runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] -# A feature that should be enabled when the runtime should be build for on-chain +# A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index ed32d33105b76c45bac68360e92646019e578b2e..0b7ee77b0d0d3b984a168f8cefe02a9e00e8f86c 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -16,16 +16,19 @@ #[cfg(feature = "std")] fn main() { - // note: needs to be synced with rococo-runtime-constants::time hard-coded string literal - const ROCOCO_EPOCH_DURATION_ENV: &str = "ROCOCO_EPOCH_DURATION"; - substrate_wasm_builder::WasmBuilder::new() .with_current_project() .import_memory() .export_heap_base() .build(); - println!("cargo:rerun-if-env-changed={}", ROCOCO_EPOCH_DURATION_ENV); + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .import_memory() + .export_heap_base() + .build(); } #[cfg(not(feature = "std"))] diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index b1eb3f48f65518079b8fe534ad23e8bc40e410c1..8ff6d57ea5b5f1e399928ad2f3c956e3d984eab0 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "rococo-runtime-constants" version = "1.0.0" +description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true @@ -15,6 +16,8 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime", default-featur sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } + [features] default = [ "std" ] std = [ @@ -24,4 +27,8 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-weights/std", + "xcm/std", ] + +# Set timing constants (e.g. session period) to faster versions to speed up testing. +fast-runtime = [] diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 2200f7ddefe1f912f026b98f7232cf7273891d18..2f641d60fc8b098274f39a5e72ef2e42ede50c27 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -37,14 +37,15 @@ pub mod currency { /// Time and blocks. pub mod time { + use runtime_common::prod_or_fast; + use primitives::{BlockNumber, Moment}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; frame_support::parameter_types! { - pub storage EpochDurationInBlocks: BlockNumber = option_env!("ROCOCO_EPOCH_DURATION") - .map(|s| s.parse().expect("`ROCOCO_EPOCH_DURATION` is not a valid `BlockNumber`")) - .unwrap_or(1 * MINUTES); + pub EpochDurationInBlocks: BlockNumber = + prod_or_fast!(1 * HOURS, 1 * MINUTES, "ROCOCO_EPOCH_DURATION"); } // These time units are defined in number of blocks. @@ -100,6 +101,29 @@ pub mod fee { } } +/// System Parachains. +pub mod system_parachain { + use xcm::latest::prelude::*; + + /// Network's Asset Hub parachain ID. + pub const ASSET_HUB_ID: u32 = 1000; + /// Contracts parachain ID. + pub const CONTRACTS_ID: u32 = 1002; + /// Encointer parachain ID. + pub const ENCOINTER_ID: u32 = 1003; + /// BridgeHub parachain ID. + pub const BRIDGE_HUB_ID: u32 = 1013; + + frame_support::match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | CONTRACTS_ID | ENCOINTER_ID | BRIDGE_HUB_ID)) } + }; + } +} + +/// Rococo Treasury pallet instance. +pub const TREASURY_PALLET_ID: u8 = 18; + #[cfg(test)] mod tests { use super::{ diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..71b1091eeb6c199fbbae5ed047e633a4c1ca596f --- /dev/null +++ b/polkadot/runtime/rococo/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use rococo_runtime_constants::currency::*; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/rococo.rs#L29 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let roc = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let roc_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(roc_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index fd3e656f695733b6b9f0a5cb2d1fa2f664fda3e0..675e0a20b2b0c746450867e764078948e3655541 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,14 +23,15 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ - assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, + assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -49,7 +50,9 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::v7 as parachains_runtime_api_impl, + runtime_api_impl::{ + v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, + }, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -74,6 +77,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; +use pallet_identity::legacy::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; @@ -110,6 +114,10 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -117,6 +125,9 @@ use governance::{ TreasurySpender, }; +#[cfg(test)] +mod tests; + mod validator_manager; impl_runtime_weights!(rococo_runtime_constants); @@ -125,6 +136,14 @@ impl_runtime_weights!(rococo_runtime_constants); #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Provides the `WASM_BINARY` build with `fast-runtime` feature enabled. +/// +/// This is for example useful for local test chains. +#[cfg(feature = "std")] +pub mod fast_runtime_binary { + include!(concat!(env!("OUT_DIR"), "/fast_runtime_binary.rs")); +} + /// Runtime version (Rococo). #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -155,8 +174,8 @@ pub fn native_version() -> NativeVersion { /// locking the state of the pallet and preventing further updates to identities and sub-identities. /// The locked state will be the genesis state of a new system chain and then removed from the Relay /// Chain. -pub struct IdentityCalls; -impl Contains for IdentityCalls { +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { fn contains(c: &RuntimeCall) -> bool { matches!(c, RuntimeCall::Identity(_)) } @@ -168,7 +187,7 @@ parameter_types! { } impl frame_system::Config for Runtime { - type BaseCallFilter = EverythingBut; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; @@ -303,7 +322,8 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = RuntimeHoldReason; - type MaxHolds = ConstU32<1>; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxHolds = ConstU32<2>; } parameter_types! { @@ -600,7 +620,7 @@ impl claims::Config for Runtime { parameter_types! { // Minimum 100 bytes/ROC deposited (1 CENT/byte) pub const BasicDeposit: Balance = 1000 * CENTS; // 258 bytes on-chain - pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain + pub const ByteDeposit: Balance = deposit(0, 1); pub const SubAccountDeposit: Balance = 200 * CENTS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -611,10 +631,10 @@ impl pallet_identity::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; + type ByteDeposit = ByteDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; + type IdentityInformation = IdentityInfo; type MaxRegistrars = MaxRegistrars; type Slashed = Treasury; type ForceOrigin = EitherOf, GeneralAdmin>; @@ -1074,6 +1094,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + type NisCounterpartInstance = pallet_balances::Instance2; impl pallet_balances::Config for Runtime { type Balance = Balance; @@ -1090,9 +1118,10 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type WeightInfo = weights::pallet_balances_nis_counterpart_balances::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type MaxHolds = ConstU32<2>; + type MaxFreezes = ConstU32<1>; } parameter_types! { @@ -1232,6 +1261,10 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = weights::pallet_sudo::WeightInfo; } +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + impl pallet_asset_rate::Config for Runtime { type WeightInfo = weights::pallet_asset_rate::WeightInfo; type RuntimeEvent = RuntimeEvent; @@ -1330,7 +1363,7 @@ construct_runtime! { // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event, HoldReason} = 38, -// pub type NisCounterpartInstance = pallet_balances::Instance2; + // pub type NisCounterpartInstance = pallet_balances::Instance2; NisCounterpartBalances: pallet_balances:: = 45, // Parachains pallets. Start indices at 50 to leave room. @@ -1361,6 +1394,9 @@ construct_runtime! { // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, + ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, @@ -1370,6 +1406,9 @@ construct_runtime! { // State trie migration pallet, only temporary. StateTrieMigration: pallet_state_trie_migration = 254, + // Root testing pallet. + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 249, + // Sudo. Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } @@ -1412,18 +1451,82 @@ pub type Migrations = migrations::Unreleased; pub mod migrations { use super::*; + use frame_support::traits::LockIdentifier; + use frame_system::pallet_prelude::BlockNumberFor; + + parameter_types! { + pub const DemocracyPalletName: &'static str = "Democracy"; + pub const CouncilPalletName: &'static str = "Council"; + pub const TechnicalCommitteePalletName: &'static str = "TechnicalCommittee"; + pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; + pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; + pub const TipsPalletName: &'static str = "Tips"; + pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; + } + + // Special Config for Gov V1 pallets, allowing us to run migrations for them without + // implementing their configs on [`Runtime`]. + pub struct UnlockConfig; + impl pallet_democracy::migrations::unlock_and_unreserve_all_funds::UnlockConfig for UnlockConfig { + type Currency = Balances; + type MaxVotes = ConstU32<100>; + type MaxDeposits = ConstU32<100>; + type AccountId = AccountId; + type BlockNumber = BlockNumberFor; + type DbWeight = ::DbWeight; + type PalletName = DemocracyPalletName; + } + impl pallet_elections_phragmen::migrations::unlock_and_unreserve_all_funds::UnlockConfig + for UnlockConfig + { + type Currency = Balances; + type MaxVotesPerVoter = ConstU32<16>; + type PalletId = PhragmenElectionPalletId; + type AccountId = AccountId; + type DbWeight = ::DbWeight; + type PalletName = PhragmenElectionPalletName; + } + impl pallet_tips::migrations::unreserve_deposits::UnlockConfig<()> for UnlockConfig { + type Currency = Balances; + type Hash = Hash; + type DataDepositPerByte = DataDepositPerByte; + type TipReportDepositBase = TipReportDepositBase; + type AccountId = AccountId; + type BlockNumber = BlockNumberFor; + type DbWeight = ::DbWeight; + type PalletName = TipsPalletName; + } + /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_society::migrations::VersionCheckedMigrateToV2, + pallet_society::migrations::MigrateToV2, pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, + paras_registrar::migration::MigrateToV1, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_referenda::migration::v1::MigrateV0ToV1, + + // Unlock & unreserve Gov1 funds + + pallet_elections_phragmen::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, + pallet_democracy::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, + pallet_tips::migrations::unreserve_deposits::UnreserveDeposits, + + // Delete all Gov v1 pallet storage key/values. + + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + + pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1474,6 +1577,7 @@ mod benches { [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] [runtime_common::claims, Claims] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::slots, Slots] [runtime_common::paras_registrar, Registrar] [runtime_parachains::configuration, Configuration] @@ -1513,7 +1617,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -1585,7 +1689,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(7)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1728,6 +1832,14 @@ sp_api::impl_runtime_apis! { fn async_backing_params() -> primitives::AsyncBackingParams { parachains_runtime_api_impl::async_backing_params::() } + + fn disabled_validators() -> Vec { + parachains_staging_runtime_api_impl::disabled_validators::() + } + + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } #[api_version(3)] @@ -1984,6 +2096,8 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2001,17 +2115,60 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ - LocalCheckAccount, LocationConverter, AssetHub, TokenLocation, XcmConfig, + AssetHub, LocalCheckAccount, LocationConverter, TokenLocation, XcmConfig, }; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub ToParachain: ParaId = rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + } + impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + Parachain(43211234).into(), + )) + } + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; + type DeliveryHelper = runtime_common::xcm_sender::ToParachainDeliveryHelper< + XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForChildParachainDelivery, + ToParachain, + (), + >; fn valid_destination() -> Result { Ok(AssetHub::get()) } @@ -2048,6 +2205,7 @@ sp_api::impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { @@ -2120,62 +2278,6 @@ sp_api::impl_runtime_apis! { } } -#[cfg(test)] -mod tests { - use std::collections::HashSet; - - use super::*; - use frame_support::traits::WhitelistedStorageKeys; - use sp_core::hexdisplay::HexDisplay; - - #[test] - fn check_whitelist() { - let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() - .iter() - .map(|e| HexDisplay::from(&e.key).to_string()) - .collect(); - - // Block number - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac") - ); - // Total issuance - assert!( - whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80") - ); - // Execution phase - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a") - ); - // Event count - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850") - ); - // System events - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7") - ); - // XcmPallet VersionDiscoveryQueue - assert!( - whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d194a222ba0333561192e474c59ed8e30e1") - ); - // XcmPallet SafeXcmVersion - assert!( - whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d196323ae84c43568be0d1394d5d0d522c4") - ); - } -} - -#[cfg(test)] -mod encoding_tests { - use super::*; - - #[test] - fn nis_hold_reason_encoding_is_correct() { - assert_eq!(RuntimeHoldReason::Nis(pallet_nis::HoldReason::NftReceipt).encode(), [38, 0]); - } -} - #[cfg(all(test, feature = "try-runtime"))] mod remote_tests { use super::*; diff --git a/polkadot/runtime/rococo/src/tests.rs b/polkadot/runtime/rococo/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..464a8c4f5454dce32ec43388061c83056f0a260c --- /dev/null +++ b/polkadot/runtime/rococo/src/tests.rs @@ -0,0 +1,63 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the Rococo Runtime Configuration + +use crate::*; +use std::collections::HashSet; + +use frame_support::traits::WhitelistedStorageKeys; +use sp_core::hexdisplay::HexDisplay; + +#[test] +fn check_whitelist() { + let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() + .iter() + .map(|e| HexDisplay::from(&e.key).to_string()) + .collect(); + + // Block number + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac")); + // Total issuance + assert!(whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80")); + // Execution phase + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a")); + // Event count + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850")); + // System events + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7")); + // XcmPallet VersionDiscoveryQueue + assert!(whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d194a222ba0333561192e474c59ed8e30e1")); + // XcmPallet SafeXcmVersion + assert!(whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d196323ae84c43568be0d1394d5d0d522c4")); +} + +#[test] +fn check_treasury_pallet_id() { + assert_eq!( + ::index() as u8, + rococo_runtime_constants::TREASURY_PALLET_ID + ); +} + +mod encoding_tests { + use super::*; + + #[test] + fn nis_hold_reason_encoding_is_correct() { + assert_eq!(RuntimeHoldReason::Nis(pallet_nis::HoldReason::NftReceipt).encode(), [38, 0]); + } +} diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 9c563a67d98b721b08cb3d84c4f5562682749b1d..bd2079ce8277ef511f38cfde8251ddeb9087d446 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_claims; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_assigner_on_demand; diff --git a/polkadot/runtime/rococo/src/weights/pallet_identity.rs b/polkadot/runtime/rococo/src/weights/pallet_identity.rs index e10c042dde6aa167e63b56244f7f5dcb21960915..e8c25269ac37a0fcec86d89a2584183fe654aaa9 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_identity.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_identity.rs @@ -65,8 +65,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` @@ -75,8 +74,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 2_307 .saturating_add(Weight::from_parts(92_753, 0).saturating_mul(r.into())) - // Standard Error: 450 - .saturating_add(Weight::from_parts(449_529, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,8 +127,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(_r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -140,8 +136,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 1_353 .saturating_add(Weight::from_parts(1_074_019, 0).saturating_mul(s.into())) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(229_947, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -151,8 +145,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn request_judgement(r: u32, x: u32, ) -> Weight { + fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -161,16 +154,14 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 2_214 .saturating_add(Weight::from_parts(83_822, 0).saturating_mul(r.into())) - // Standard Error: 432 - .saturating_add(Weight::from_parts(458_801, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` @@ -179,8 +170,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 2_528 .saturating_add(Weight::from_parts(85_593, 0).saturating_mul(r.into())) - // Standard Error: 493 - .saturating_add(Weight::from_parts(468_140, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -234,8 +223,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. - fn provide_judgement(r: u32, x: u32, ) -> Weight { + fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -244,8 +232,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 2_881 .saturating_add(Weight::from_parts(109_812, 0).saturating_mul(r.into())) - // Standard Error: 533 - .saturating_add(Weight::from_parts(733_244, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -259,8 +245,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -271,8 +256,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(15_486, 0).saturating_mul(r.into())) // Standard Error: 1_275 .saturating_add(Weight::from_parts(1_085_117, 0).saturating_mul(s.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(228_226, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 83215af5b8aecf5a79b084f24d0cfd8506f5b559..694174954fc78b1eeb4d93f78e6b26d5bab22b83 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -17,24 +17,25 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=pallet_sudo // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,38 +48,50 @@ use core::marker::PhantomData; /// Weight functions for `pallet_sudo`. pub struct WeightInfo(PhantomData); impl pallet_sudo::WeightInfo for WeightInfo { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_047_000 picoseconds. - Weight::from_parts(13_325_000, 0) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(8_757_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_250_000 picoseconds. - Weight::from_parts(13_544_000, 0) + // Minimum execution time: 9_167_000 picoseconds. + Weight::from_parts(9_397_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_424_000 picoseconds. - Weight::from_parts(13_801_000, 0) + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(9_573_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 7_374_000 picoseconds. + Weight::from_parts(7_702_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs index fd820f1aea3241521e650b1e6af3af32236a465d..2596207d5837df6776177b71480c57af61ac02ce 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs @@ -187,16 +187,29 @@ impl pallet_vesting::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 41_497_000 picoseconds. + Weight::from_parts(38_763_834, 4764) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) + // Standard Error: 3_750 + .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 43b4358b89038530ad66545e195aece5a4e0c874..aafded3f7319fd3717f8e9a04ca4af80bd34547a 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,58 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 37_039_000 picoseconds. - Weight::from_parts(37_605_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_976_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_646_000 picoseconds. - Weight::from_parts(22_119_000, 0) + // Minimum execution time: 16_280_000 picoseconds. + Weight::from_parts(16_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_353_000 picoseconds. - Weight::from_parts(21_768_000, 0) + // Minimum execution time: 15_869_000 picoseconds. + Weight::from_parts(16_264_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_942_000 picoseconds. - Weight::from_parts(10_110_000, 0) + // Minimum execution time: 6_923_000 picoseconds. + Weight::from_parts(7_432_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_951_000 picoseconds. - Weight::from_parts(10_182_000, 0) + // Minimum execution time: 7_333_000 picoseconds. + Weight::from_parts(7_566_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -108,171 +105,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_163_000 picoseconds. - Weight::from_parts(3_298_000, 0) + // Minimum execution time: 2_219_000 picoseconds. + Weight::from_parts(2_375_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 41_207_000 picoseconds. - Weight::from_parts(41_879_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 30_650_000 picoseconds. + Weight::from_parts(31_683_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `837` - // Estimated: `4302` - // Minimum execution time: 44_763_000 picoseconds. - Weight::from_parts(45_368_000, 0) - .saturating_add(Weight::from_parts(0, 4302)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 37_666_000 picoseconds. + Weight::from_parts(38_920_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_089_000 picoseconds. - Weight::from_parts(3_246_000, 0) + // Minimum execution time: 2_244_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 16_733_000 picoseconds. - Weight::from_parts(17_354_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_710_000 picoseconds. + Weight::from_parts(15_156_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 16_959_000 picoseconds. - Weight::from_parts(17_306_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_630_000 picoseconds. + Weight::from_parts(15_290_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_332_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `6575` - // Minimum execution time: 39_436_000 picoseconds. - Weight::from_parts(39_669_000, 0) - .saturating_add(Weight::from_parts(0, 6575)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `178` + // Estimated: `6118` + // Minimum execution time: 30_180_000 picoseconds. + Weight::from_parts(31_351_000, 0) + .saturating_add(Weight::from_parts(0, 6118)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 8_991_000 picoseconds. - Weight::from_parts(9_248_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_029_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_614_000 picoseconds. - Weight::from_parts(17_948_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_139_000 picoseconds. + Weight::from_parts(15_575_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `639` - // Estimated: `11529` - // Minimum execution time: 45_531_000 picoseconds. - Weight::from_parts(46_533_000, 0) - .saturating_add(Weight::from_parts(0, 11529)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `182` + // Estimated: `11072` + // Minimum execution time: 37_871_000 picoseconds. + Weight::from_parts(38_940_000, 0) + .saturating_add(Weight::from_parts(0, 11072)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(2_892_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 23_813_000 picoseconds. + Weight::from_parts(24_201_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cec357453b67be400c0191ac7d5c12e6961a4bee --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 29f387657786afd1e07c207d733b7cfddfd4d83a..34541b83597e6284a401a171e703b200366114a0 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_051_000 picoseconds. - Weight::from_parts(9_496_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_793_000 picoseconds. + Weight::from_parts(8_192_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_104_000 picoseconds. - Weight::from_parts(9_403_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_819_000 picoseconds. + Weight::from_parts(8_004_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_112_000 picoseconds. - Weight::from_parts(9_495_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_760_000 picoseconds. + Weight::from_parts(8_174_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_011_000 picoseconds. - Weight::from_parts(9_460_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_814_000 picoseconds. + Weight::from_parts(8_098_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_940_000 picoseconds. - Weight::from_parts(10_288_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_028_000 picoseconds. + Weight::from_parts(10_386_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_192_000 picoseconds. - Weight::from_parts(9_595_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_867_000 picoseconds. + Weight::from_parts(8_191_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_158_000 picoseconds. + Weight::from_parts(10_430_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index 1d613717dc52798ece0ac9c484b8f91b66f53803..cc485dfbaf7e4e29fe7fdd50960490bc9c05665c 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -91,7 +91,6 @@ impl XcmWeightInfo for RococoXcmWeight { assets.weigh_multi_assets(XcmBalancesWeight::::withdraw_asset()) } fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - // Rococo doesn't support ReserveAssetDeposited, so this benchmark has a default weight assets.weigh_multi_assets(XcmBalancesWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 59c49e4f8c821beb00a9493ad7b07928cfe6daf9..60c40429b1ac323e69bca1345c41c3c04c221f79 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-gghbxkbs-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -31,12 +31,12 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::fungible // --chain=rococo-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/rococo/src/weights/xcm/ +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 24_892_000 picoseconds. - Weight::from_parts(25_219_000, 3593) + // Minimum execution time: 23_189_000 picoseconds. + Weight::from_parts(23_896_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 52_112_000 picoseconds. - Weight::from_parts(53_104_000, 6196) + // Minimum execution time: 50_299_000 picoseconds. + Weight::from_parts(50_962_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -83,10 +83,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `210` + // Measured: `243` // Estimated: `6196` - // Minimum execution time: 76_459_000 picoseconds. - Weight::from_parts(79_152_000, 6196) + // Minimum execution time: 71_748_000 picoseconds. + Weight::from_parts(74_072_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -96,8 +96,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000_000_000 picoseconds. - Weight::from_parts(2_000_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -109,10 +109,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 29_734_000 picoseconds. - Weight::from_parts(30_651_000, 3574) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 27_806_000 picoseconds. + Weight::from_parts(28_594_000, 3607) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -122,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 23_028_000 picoseconds. - Weight::from_parts(23_687_000, 3593) + // Minimum execution time: 21_199_000 picoseconds. + Weight::from_parts(21_857_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -133,8 +133,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 26_399_000 picoseconds. - Weight::from_parts(27_262_000, 3593) + // Minimum execution time: 23_578_000 picoseconds. + Weight::from_parts(24_060_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -150,10 +150,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3593` - // Minimum execution time: 52_015_000 picoseconds. - Weight::from_parts(53_498_000, 3593) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 48_522_000 picoseconds. + Weight::from_parts(49_640_000, 3607) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -169,10 +169,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3593` - // Minimum execution time: 53_833_000 picoseconds. - Weight::from_parts(55_688_000, 3593) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 50_429_000 picoseconds. + Weight::from_parts(51_295_000, 3607) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index b84d2335a6990e7c4e92093c5cff68e4f1c266b3..c8f8f59dae9983f9b2f3dfe9248445710e7d1cb6 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -18,7 +18,7 @@ use super::{ parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, Fellows, ParaId, Runtime, - RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmPallet, + RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, Treasury, WeightToFee, XcmPallet, }; use crate::governance::StakingAdmin; @@ -29,7 +29,7 @@ use frame_support::{ weights::Weight, }; use frame_system::EnsureRoot; -use rococo_runtime_constants::currency::CENTS; +use rococo_runtime_constants::{currency::CENTS, system_parachain::*}; use runtime_common::{ xcm_sender::{ChildParachainRouter, ExponentialPrice}, ToAuthor, @@ -43,7 +43,8 @@ use xcm_builder::{ DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -53,6 +54,7 @@ parameter_types! { pub UniversalLocation: InteriorMultiLocation = ThisNetwork::get().into(); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); + pub TreasuryAccount: AccountId = Treasury::account_id(); } pub type LocationConverter = ( @@ -100,22 +102,22 @@ parameter_types! { pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); } +pub type PriceForChildParachainDelivery = + ExponentialPrice; + /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our /// individual routers. -pub type XcmRouter = WithUniqueTopic<( +pub type XcmRouter = WithUniqueTopic< // Only one router so far - use DMP to communicate with child parachains. - ChildParachainRouter< - Runtime, - XcmPallet, - ExponentialPrice, - >, -)>; + ChildParachainRouter, +>; parameter_types! { pub const Roc: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); - pub const AssetHub: MultiLocation = Parachain(1000).into_location(); - pub const Contracts: MultiLocation = Parachain(1002).into_location(); - pub const Encointer: MultiLocation = Parachain(1003).into_location(); + pub const AssetHub: MultiLocation = Parachain(ASSET_HUB_ID).into_location(); + pub const Contracts: MultiLocation = Parachain(CONTRACTS_ID).into_location(); + pub const Encointer: MultiLocation = Parachain(ENCOINTER_ID).into_location(); + pub const BridgeHub: MultiLocation = Parachain(BRIDGE_HUB_ID).into_location(); pub const Tick: MultiLocation = Parachain(100).into_location(); pub const Trick: MultiLocation = Parachain(110).into_location(); pub const Track: MultiLocation = Parachain(120).into_location(); @@ -125,6 +127,7 @@ parameter_types! { pub const RocForAssetHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), AssetHub::get()); pub const RocForContracts: (MultiAssetFilter, MultiLocation) = (Roc::get(), Contracts::get()); pub const RocForEncointer: (MultiAssetFilter, MultiLocation) = (Roc::get(), Encointer::get()); + pub const RocForBridgeHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), BridgeHub::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } @@ -135,6 +138,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, + xcm_builder::Case, ); match_types! { @@ -151,7 +155,7 @@ pub type Barrier = TrailingSetTopicAsId<( AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then allow it. + // If the message is one that immediately attempts to pay for execution, then allow it. AllowTopLevelPaidExecutionFrom, // Messages coming from system parachains need not pay for execution. AllowExplicitUnpaidExecutionFrom>, @@ -188,7 +192,10 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + SystemParachains, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -204,11 +211,6 @@ parameter_types! { pub const FellowsBodyId: BodyId = BodyId::Technical; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( @@ -262,7 +264,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 2e9c773a3f8cf17da29e6e52ea1efb15da147d85..29debad7b53b6a3875bbb92bd841cf32ae54b315 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -12,7 +12,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" @@ -71,7 +71,7 @@ hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } sp-trie = { path = "../../../substrate/primitives/trie" } -serde_json = "1.0.107" +serde_json = "1.0.108" [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 99fd2198400bf821f406345ac5daabe13d4b009a..596e65eca0680b3ee1b987e0e1f387b501762fb7 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -192,7 +192,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; @@ -229,6 +229,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -317,7 +318,8 @@ parameter_types! { // 27 eras in which slashes can be cancelled (a bit less than 7 days). pub storage SlashDeferDuration: sp_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub storage MaxNominatorRewardedPerValidator: u32 = 64; + pub const MaxExposurePageSize: u32 = 64; + pub const MaxNominators: u32 = 256; pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; @@ -353,7 +355,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = frame_system::EnsureNever<()>; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; @@ -379,7 +381,7 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = sp_core::Void; diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 2113bbae66adaebfc6014d93999c758be759f7db..ae4faecf70013d8b093e87e71a7acbdde3ee577d 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -60,7 +60,11 @@ pub type Barrier = AllowUnpaidExecutionFrom; pub struct DummyAssetTransactor; impl TransactAsset for DummyAssetTransactor { - fn deposit_asset(_what: &MultiAsset, _who: &MultiLocation, _context: &XcmContext) -> XcmResult { + fn deposit_asset( + _what: &MultiAsset, + _who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> XcmResult { Ok(()) } @@ -123,11 +127,6 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(xcm::latest::Junctions::Here.into()); -} - impl pallet_xcm::Config for crate::Runtime { // The config types here are entirely configurable, since the only one that is sorely needed // is `XcmExecutor`, which will be used in unit tests located in xcm-executor. @@ -153,7 +152,5 @@ impl pallet_xcm::Config for crate::Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 58a6cc21eecdb4da8a3c3b28b75aadd8dac51de0..eaebf01e3a7647ef277d20c0dc749323a1591af6 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -2,6 +2,7 @@ name = "westend-runtime" build = "build.rs" version = "1.0.0" +description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true @@ -9,7 +10,7 @@ license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } serde = { version = "1.0.188", default-features = false } @@ -91,6 +92,7 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } @@ -114,7 +116,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.107" +serde_json = "1.0.108" remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } @@ -176,6 +178,7 @@ std = [ "pallet-proxy/std", "pallet-recovery/std", "pallet-referenda/std", + "pallet-root-testing/std", "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", @@ -314,6 +317,7 @@ try-runtime = [ "pallet-proxy/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", + "pallet-root-testing/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -337,7 +341,7 @@ fast-runtime = [] runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] -# A feature that should be enabled when the runtime should be build for on-chain +# A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 779b2cb3110a5f776a0f1cd81b342eca1a14121f..2243210975b1df6bc0193fc60b9003e2199a5121 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "westend-runtime-constants" version = "1.0.0" +description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true @@ -15,6 +16,8 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime", default-featur sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } + [features] default = [ "std" ] std = [ @@ -24,4 +27,5 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-weights/std", + "xcm/std", ] diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 0dd64d092c34512dc963499ab418201d253fa167..a06b3ba602a365c0c9d2fe006ae27ffc7003ae97 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -96,6 +96,27 @@ pub mod fee { } } +/// System Parachains. +pub mod system_parachain { + use xcm::latest::prelude::*; + + /// Network's Asset Hub parachain ID. + pub const ASSET_HUB_ID: u32 = 1000; + /// Collectives parachain ID. + pub const COLLECTIVES_ID: u32 = 1001; + /// BridgeHub parachain ID. + pub const BRIDGE_HUB_ID: u32 = 1002; + + frame_support::match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | COLLECTIVES_ID | BRIDGE_HUB_ID ))} + }; + } +} + +/// Westend Treasury pallet instance. +pub const TREASURY_PALLET_ID: u8 = 37; + /// XCM protocol related constants. pub mod xcm { /// Pluralistic bodies existing within the consensus. @@ -108,14 +129,6 @@ pub mod xcm { } } -/// System Parachains. -pub mod system_parachain { - /// Statemint parachain ID. - pub const ASSET_HUB_ID: u32 = 1000; - /// Collectives parachain ID. - pub const COLLECTIVES_ID: u32 = 1001; -} - #[cfg(test)] mod tests { use super::{ diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..80105594965468fb225755630009d80dacf73acd --- /dev/null +++ b/polkadot/runtime/westend/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use westend_runtime_constants::currency::*; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/westend.rs#L28 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let wnd = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let wnd_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(wnd_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d61acf36b4cb802905262fcea1904f5f47eae9f0..29183fdfe00bc008372fc426b77f4bc1d672f53b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -40,22 +40,23 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; +use pallet_identity::legacy::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, - impl_runtime_weights, + identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -70,7 +71,9 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v7 as parachains_runtime_api_impl, + runtime_api_impl::{ + v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, + }, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -116,6 +119,10 @@ mod bag_thresholds; mod weights; pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -162,8 +169,8 @@ pub fn native_version() -> NativeVersion { /// locking the state of the pallet and preventing further updates to identities and sub-identities. /// The locked state will be the genesis state of a new system chain and then removed from the Relay /// Chain. -pub struct IdentityCalls; -impl Contains for IdentityCalls { +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { fn contains(c: &RuntimeCall) -> bool { matches!(c, RuntimeCall::Identity(_)) } @@ -175,7 +182,7 @@ parameter_types! { } impl frame_system::Config for Runtime { - type BaseCallFilter = EverythingBut; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; @@ -263,7 +270,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; @@ -301,6 +308,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type WeightInfo = weights::pallet_balances::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; type MaxHolds = ConstU32<1>; @@ -313,7 +321,7 @@ parameter_types! { impl pallet_beefy::Config for Runtime { type BeefyId = BeefyId; type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = BeefyMmrLeaf; type WeightInfo = (); @@ -417,17 +425,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -impl_opaque_keys! { - pub struct OldSessionKeys { - pub grandpa: Grandpa, - pub babe: Babe, - pub im_online: ImOnline, - pub para_validator: Initializer, - pub para_assignment: ParaSessionInfo, - pub authority_discovery: AuthorityDiscovery, - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -440,32 +437,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - im_online: old.im_online, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: { - // From Session::upgrade_keys(): - // - // Care should be taken that the raw versions of the - // added keys are unique for every `ValidatorId, KeyTypeId` combination. - // This is an invariant that the session pallet typically maintains internally. - // - // So, produce a dummy value that's unique for the `ValidatorId, KeyTypeId` combination. - let mut id: BeefyId = sp_application_crypto::ecdsa::Public::from_raw([0u8; 33]).into(); - let id_raw: &mut [u8] = id.as_mut(); - id_raw[1..33].copy_from_slice(v.as_ref()); - id_raw[0..4].copy_from_slice(b"beef"); - id - }, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -652,7 +623,11 @@ parameter_types! { // 1 era in which slashes can be cancelled (6 hours). pub const SlashDeferDuration: sp_staking::EraIndex = 1; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const MaxExposurePageSize: u32 = 64; + // Note: this is not really correct as Max Nominators is (MaxExposurePageSize * page_count) but + // this is an unbounded number. We just set it to a reasonably high value, 1 full page + // of nominators. + pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; } @@ -672,7 +647,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = EnsureRoot; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; @@ -695,8 +670,6 @@ impl pallet_fast_unstake::Config for Runtime { type ControlOrigin = EnsureRoot; type Staking = Staking; type MaxErasToCheckPerBlock = ConstU32<1>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = MaxNominatorRewardedPerValidator; type WeightInfo = weights::pallet_fast_unstake::WeightInfo; } @@ -795,7 +768,7 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; @@ -868,7 +841,7 @@ where parameter_types! { // Minimum 100 bytes/KSM deposited (1 CENT/byte) pub const BasicDeposit: Balance = 1000 * CENTS; // 258 bytes on-chain - pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain + pub const ByteDeposit: Balance = deposit(0, 1); pub const SubAccountDeposit: Balance = 200 * CENTS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -880,10 +853,10 @@ impl pallet_identity::Config for Runtime { type Currency = Balances; type Slashed = (); type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; + type ByteDeposit = ByteDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; + type IdentityInformation = IdentityInfo; type MaxRegistrars = MaxRegistrars; type ForceOrigin = EitherOf, GeneralAdmin>; type RegistrarOrigin = EitherOf, GeneralAdmin>; @@ -1333,6 +1306,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); pub const MaxPointsToBalance: u8 = 10; @@ -1355,6 +1336,10 @@ impl pallet_nomination_pools::Config for Runtime { type MaxPointsToBalance = MaxPointsToBalance; } +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; @@ -1489,6 +1474,12 @@ construct_runtime! { // Asset rate. AssetRate: pallet_asset_rate::{Pallet, Call, Storage, Event} = 101, + + // Root testing pallet. + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 102, + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, } } @@ -1532,29 +1523,21 @@ pub type Migrations = migrations::Unreleased; pub mod migrations { use super::*; - /// Upgrade Session keys to include BEEFY key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - fn on_runtime_upgrade() -> Weight { - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - } - /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + pallet_staking::migrations::v14::MigrateToV14, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, - UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, - pallet_nomination_pools::migration::versioned_migrations::V5toV6, + paras_registrar::migration::MigrateToV1, + pallet_nomination_pools::migration::versioned::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_nomination_pools::migration::versioned::V6ToV7, + pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1582,6 +1565,7 @@ mod benches { [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::paras_registrar, Registrar] [runtime_common::slots, Slots] [runtime_parachains::configuration, Configuration] @@ -1622,7 +1606,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1695,7 +1679,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(7)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1838,6 +1822,14 @@ sp_api::impl_runtime_apis! { fn async_backing_params() -> primitives::AsyncBackingParams { parachains_runtime_api_impl::async_backing_params::() } + + fn disabled_validators() -> Vec { + parachains_staging_runtime_api_impl::disabled_validators::() + } + + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { @@ -2095,10 +2087,14 @@ sp_api::impl_runtime_apis! { } } - impl pallet_staking_runtime_api::StakingApi for Runtime { + impl pallet_staking_runtime_api::StakingApi for Runtime { fn nominations_quota(balance: Balance) -> u32 { Staking::api_nominations_quota(balance) } + + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { + Staking::api_eras_stakers_page_count(era, account) + } } #[cfg(feature = "try-runtime")] @@ -2133,6 +2129,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2160,12 +2157,37 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::Junction::Parachain(43211234).into(), + )) + } + } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} @@ -2176,9 +2198,24 @@ sp_api::impl_runtime_apis! { }; use xcm_config::{AssetHub, TokenLocation}; + parameter_types! { + pub ExistentialDepositMultiAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub ToParachain: ParaId = westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + } + impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationConverter; + type DeliveryHelper = runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositMultiAsset, + xcm_config::PriceForChildParachainDelivery, + ToParachain, + (), + >; fn valid_destination() -> Result { Ok(AssetHub::get()) } @@ -2215,6 +2252,7 @@ sp_api::impl_runtime_apis! { } impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 78062662fee04cbc9dfd0d035407c2cc5449dd60..9f99631605903c805ac42089453e0fb8e5aa0485 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -91,3 +91,11 @@ fn check_whitelist() { // XcmPallet SafeXcmVersion assert!(whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d196323ae84c43568be0d1394d5d0d522c4")); } + +#[test] +fn check_treasury_pallet_id() { + assert_eq!( + ::index() as u8, + westend_runtime_constants::TREASURY_PALLET_ID + ); +} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 9ae6798d70b6e234c7ffbbfd99f2f97c56fb30a4..3841579088a91d9b6c07efe1686409d1bf35b959 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_configuration; diff --git a/polkadot/runtime/westend/src/weights/pallet_identity.rs b/polkadot/runtime/westend/src/weights/pallet_identity.rs index 8c11482ebdc13693d59cd299189b2b18753aeb06..dea631b9316bc6160ee1d98e794aa865625bf2ed 100644 --- a/polkadot/runtime/westend/src/weights/pallet_identity.rs +++ b/polkadot/runtime/westend/src/weights/pallet_identity.rs @@ -68,8 +68,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` @@ -78,8 +77,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 7_269 .saturating_add(Weight::from_parts(250_439, 0).saturating_mul(r.into())) - // Standard Error: 1_418 - .saturating_add(Weight::from_parts(483_981, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -133,8 +130,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -145,8 +141,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(475_120, 0).saturating_mul(r.into())) // Standard Error: 4_092 .saturating_add(Weight::from_parts(1_348_869, 0).saturating_mul(s.into())) - // Standard Error: 4_092 - .saturating_add(Weight::from_parts(314_033, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -156,8 +150,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn request_judgement(r: u32, x: u32, ) -> Weight { + fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -166,16 +159,13 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 7_973 .saturating_add(Weight::from_parts(124_283, 0).saturating_mul(r.into())) - // Standard Error: 1_555 - .saturating_add(Weight::from_parts(512_825, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` @@ -184,8 +174,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 5_154 .saturating_add(Weight::from_parts(147_560, 0).saturating_mul(r.into())) - // Standard Error: 1_005 - .saturating_add(Weight::from_parts(490_754, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +227,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. - fn provide_judgement(r: u32, x: u32, ) -> Weight { + fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -249,8 +236,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 11003)) // Standard Error: 10_027 .saturating_add(Weight::from_parts(154_816, 0).saturating_mul(r.into())) - // Standard Error: 1_855 - .saturating_add(Weight::from_parts(803_084, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -264,8 +249,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -276,8 +260,6 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(666_376, 0).saturating_mul(r.into())) // Standard Error: 4_433 .saturating_add(Weight::from_parts(1_396_065, 0).saturating_mul(s.into())) - // Standard Error: 4_433 - .saturating_add(Weight::from_parts(300_762, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 099693d26b505817a390ddb7757a7ea38b75f812..87b603621e8d418c2fa309b208959e8a1e744e56 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_staking // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_staking +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,746 +48,729 @@ use core::marker::PhantomData; /// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `894` // Estimated: `4764` - // Minimum execution time: 51_108_000 picoseconds. - Weight::from_parts(52_521_000, 0) + // Minimum execution time: 38_052_000 picoseconds. + Weight::from_parts(39_303_000, 0) .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `1959` + // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 96_564_000 picoseconds. - Weight::from_parts(100_133_000, 0) + // Minimum execution time: 81_690_000 picoseconds. + Weight::from_parts(83_889_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2166` + // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 97_705_000 picoseconds. - Weight::from_parts(102_055_000, 0) + // Minimum execution time: 84_409_000 picoseconds. + Weight::from_parts(87_330_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `981` + // Measured: `1075` // Estimated: `4764` - // Minimum execution time: 45_257_000 picoseconds. - Weight::from_parts(47_309_508, 0) + // Minimum execution time: 39_770_000 picoseconds. + Weight::from_parts(40_828_632, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_343 - .saturating_add(Weight::from_parts(61_484, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 824 + .saturating_add(Weight::from_parts(51_107, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2221 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 94_800_000 picoseconds. - Weight::from_parts(101_763_223, 0) + // Minimum execution time: 82_500_000 picoseconds. + Weight::from_parts(90_099_121, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_481 - .saturating_add(Weight::from_parts(1_450_372, 0).saturating_mul(s.into())) + // Standard Error: 3_280 + .saturating_add(Weight::from_parts(1_273_212, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1343` + // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 57_763_000 picoseconds. - Weight::from_parts(59_394_000, 0) + // Minimum execution time: 48_236_000 picoseconds. + Weight::from_parts(49_518_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1248 + k * (569 ±0)` + // Measured: `1243 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 35_501_000 picoseconds. - Weight::from_parts(32_260_525, 0) + // Minimum execution time: 28_280_000 picoseconds. + Weight::from_parts(29_182_740, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 34_554 - .saturating_add(Weight::from_parts(10_625_386, 0).saturating_mul(k.into())) + // Standard Error: 6_102 + .saturating_add(Weight::from_parts(6_412_107, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1839 + n * (102 ±0)` + // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 67_970_000 picoseconds. - Weight::from_parts(65_110_939, 0) + // Minimum execution time: 59_846_000 picoseconds. + Weight::from_parts(58_029_857, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 32_193 - .saturating_add(Weight::from_parts(4_688_614, 0).saturating_mul(n.into())) + // Standard Error: 15_967 + .saturating_add(Weight::from_parts(3_898_764, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1675` + // Measured: `1581` // Estimated: `6248` - // Minimum execution time: 59_515_000 picoseconds. - Weight::from_parts(62_462_000, 0) + // Minimum execution time: 51_223_000 picoseconds. + Weight::from_parts(52_310_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `771` + // Measured: `865` // Estimated: `4556` - // Minimum execution time: 13_943_000 picoseconds. - Weight::from_parts(14_384_000, 0) + // Minimum execution time: 15_762_000 picoseconds. + Weight::from_parts(16_381_000, 0) .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - fn set_controller() -> Weight { + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `8122` - // Minimum execution time: 21_212_000 picoseconds. - Weight::from_parts(22_061_000, 0) - .saturating_add(Weight::from_parts(0, 8122)) + // Measured: `932` + // Estimated: `4556` + // Minimum execution time: 21_904_000 picoseconds. + Weight::from_parts(22_373_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `865` + // Estimated: `4556` + // Minimum execution time: 18_869_000 picoseconds. + Weight::from_parts(19_422_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_977_000 picoseconds. - Weight::from_parts(3_217_000, 0) + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_320_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_152_000 picoseconds. - Weight::from_parts(9_949_000, 0) + // Minimum execution time: 7_179_000 picoseconds. + Weight::from_parts(7_843_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_509_000 picoseconds. - Weight::from_parts(9_838_000, 0) + // Minimum execution time: 7_206_000 picoseconds. + Weight::from_parts(7_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_480_000 picoseconds. - Weight::from_parts(9_755_000, 0) + // Minimum execution time: 7_414_000 picoseconds. + Weight::from_parts(7_770_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_140_000 picoseconds. - Weight::from_parts(3_438_665, 0) + // Minimum execution time: 2_256_000 picoseconds. + Weight::from_parts(2_645_840, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 93 - .saturating_add(Weight::from_parts(15_688, 0).saturating_mul(v.into())) + // Standard Error: 37 + .saturating_add(Weight::from_parts(10_207, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1947 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_729_000 picoseconds. - Weight::from_parts(93_633_668, 0) + // Minimum execution time: 81_032_000 picoseconds. + Weight::from_parts(88_297_596, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_522 - .saturating_add(Weight::from_parts(1_421_038, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12)) + // Standard Error: 3_070 + .saturating_add(Weight::from_parts(1_207_207, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66606` - // Estimated: `70071` - // Minimum execution time: 135_155_000 picoseconds. - Weight::from_parts(960_317_735, 0) - .saturating_add(Weight::from_parts(0, 70071)) - // Standard Error: 59_264 - .saturating_add(Weight::from_parts(4_884_888, 0).saturating_mul(s.into())) + // Measured: `66639` + // Estimated: `70104` + // Minimum execution time: 131_456_000 picoseconds. + Weight::from_parts(935_254_517, 0) + .saturating_add(Weight::from_parts(0, 70104)) + // Standard Error: 57_806 + .saturating_add(Weight::from_parts(4_823_189, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:65 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:65 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:65 w:65) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 64]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `5773 + n * (151 ±0)` - // Estimated: `8579 + n * (2603 ±0)` - // Minimum execution time: 92_788_000 picoseconds. - Weight::from_parts(129_527_249, 0) - .saturating_add(Weight::from_parts(0, 8579)) - // Standard Error: 73_346 - .saturating_add(Weight::from_parts(33_413_624, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:65 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:65 w:65) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:65 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:65 w:65) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:65 w:65) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:65 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:65 w:65) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:65 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 64]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8056 + n * (396 ±0)` - // Estimated: `10634 + n * (3774 ±0)` - // Minimum execution time: 118_795_000 picoseconds. - Weight::from_parts(181_663_036, 0) - .saturating_add(Weight::from_parts(0, 10634)) - // Standard Error: 132_894 - .saturating_add(Weight::from_parts(51_369_596, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11)) + // Measured: `8249 + n * (396 ±0)` + // Estimated: `10779 + n * (3774 ±0)` + // Minimum execution time: 129_233_000 picoseconds. + Weight::from_parts(165_096_042, 0) + .saturating_add(Weight::from_parts(0, 10779)) + // Standard Error: 29_598 + .saturating_add(Weight::from_parts(40_716_425, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1960 + l * (5 ±0)` + // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 88_870_000 picoseconds. - Weight::from_parts(92_783_195, 0) + // Minimum execution time: 77_223_000 picoseconds. + Weight::from_parts(80_026_259, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 7_412 - .saturating_add(Weight::from_parts(49_785, 0).saturating_mul(l.into())) + // Standard Error: 4_493 + .saturating_add(Weight::from_parts(52_909, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2221 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 102_112_000 picoseconds. - Weight::from_parts(103_755_459, 0) + // Minimum execution time: 89_871_000 picoseconds. + Weight::from_parts(92_313_331, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_107 - .saturating_add(Weight::from_parts(1_436_139, 0).saturating_mul(s.into())) + // Standard Error: 3_321 + .saturating_add(Weight::from_parts(1_243_347, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:178 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:20) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 547_465_000 picoseconds. - Weight::from_parts(557_541_000, 0) + // Minimum execution time: 518_819_000 picoseconds. + Weight::from_parts(522_108_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_380_806 - .saturating_add(Weight::from_parts(78_379_807, 0).saturating_mul(v.into())) - // Standard Error: 237_234 - .saturating_add(Weight::from_parts(22_772_283, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(185)) + // Standard Error: 1_987_848 + .saturating_add(Weight::from_parts(64_855_377, 0).saturating_mul(v.into())) + // Standard Error: 198_078 + .saturating_add(Weight::from_parts(18_343_485, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(8)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:178 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3151 + n * (907 ±0) + v * (391 ±0)` + // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 39_710_080_000 picoseconds. - Weight::from_parts(42_191_823_000, 0) + // Minimum execution time: 34_976_277_000 picoseconds. + Weight::from_parts(35_245_501_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 506_609 - .saturating_add(Weight::from_parts(7_688_462, 0).saturating_mul(v.into())) - // Standard Error: 506_609 - .saturating_add(Weight::from_parts(6_303_908, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(180)) + // Standard Error: 386_461 + .saturating_add(Weight::from_parts(5_145_210, 0).saturating_mul(v.into())) + // Standard Error: 386_461 + .saturating_add(Weight::from_parts(3_762_623, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `951 + v * (50 ±0)` + // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_603_304_000 picoseconds. - Weight::from_parts(481_860_383, 0) + // Minimum execution time: 2_577_411_000 picoseconds. + Weight::from_parts(86_073_486, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 55_189 - .saturating_add(Weight::from_parts(4_786_173, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 8_363 + .saturating_add(Weight::from_parts(5_074_828, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_453_000 picoseconds. - Weight::from_parts(6_857_000, 0) + // Minimum execution time: 3_539_000 picoseconds. + Weight::from_parts(3_903_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_037_000 picoseconds. - Weight::from_parts(6_303_000, 0) + // Minimum execution time: 3_244_000 picoseconds. + Weight::from_parts(3_450_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1798` + // Measured: `1704` // Estimated: `6248` - // Minimum execution time: 72_578_000 picoseconds. - Weight::from_parts(74_232_000, 0) + // Minimum execution time: 62_606_000 picoseconds. + Weight::from_parts(64_678_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `661` + // Measured: `658` // Estimated: `3510` - // Minimum execution time: 13_066_000 picoseconds. - Weight::from_parts(13_421_000, 0) + // Minimum execution time: 11_490_000 picoseconds. + Weight::from_parts(11_867_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_057_000 picoseconds. - Weight::from_parts(3_488_000, 0) + // Minimum execution time: 2_125_000 picoseconds. + Weight::from_parts(2_337_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index 8a220173ee57a45dd122c1aad020151c5bb5f1e6..e9ab3ad37a4cca6483dc7fdeb98562dd6f1937e6 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_sudo // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,38 +48,50 @@ use core::marker::PhantomData; /// Weight functions for `pallet_sudo`. pub struct WeightInfo(PhantomData); impl pallet_sudo::WeightInfo for WeightInfo { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_360_000 picoseconds. - Weight::from_parts(12_803_000, 0) + // Minimum execution time: 8_750_000 picoseconds. + Weight::from_parts(9_102_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_158_000 picoseconds. - Weight::from_parts(12_506_000, 0) + // Minimum execution time: 9_607_000 picoseconds. + Weight::from_parts(10_139_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_286_000 picoseconds. - Weight::from_parts(12_664_000, 0) + // Minimum execution time: 9_886_000 picoseconds. + Weight::from_parts(10_175_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_152_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_vesting.rs b/polkadot/runtime/westend/src/weights/pallet_vesting.rs index 4ccd524ffc28de68a8a19f5e7e78d495f2fa00bd..dfd87d1c550eb7e75d77300df72160fb7cefb569 100644 --- a/polkadot/runtime/westend/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/westend/src/weights/pallet_vesting.rs @@ -238,4 +238,28 @@ impl pallet_vesting::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } + + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 41_497_000 picoseconds. + Weight::from_parts(38_763_834, 4764) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) + // Standard Error: 3_750 + .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 7f2a1de44e93909a718c83e546024572ac7347dc..cca4bdbd91e309f29d5d09c730c1a95c4e34138d 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,15 +17,16 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=westend-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -35,12 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,44 +50,42 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 33_628_000 picoseconds. - Weight::from_parts(34_633_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 28_098_000 picoseconds. + Weight::from_parts(28_887_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_535_000 picoseconds. - Weight::from_parts(21_936_000, 0) + // Minimum execution time: 17_609_000 picoseconds. + Weight::from_parts(18_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_576_000 picoseconds. - Weight::from_parts(21_942_000, 0) + // Minimum execution time: 17_007_000 picoseconds. + Weight::from_parts(17_471_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -99,14 +94,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_764_000 picoseconds. - Weight::from_parts(9_927_000, 0) + // Minimum execution time: 7_444_000 picoseconds. + Weight::from_parts(7_671_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,171 +109,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_935_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_253_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 38_436_000 picoseconds. - Weight::from_parts(39_300_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 31_318_000 picoseconds. + Weight::from_parts(32_413_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3826` - // Minimum execution time: 41_600_000 picoseconds. - Weight::from_parts(42_703_000, 0) - .saturating_add(Weight::from_parts(0, 3826)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `289` + // Estimated: `3754` + // Minimum execution time: 35_282_000 picoseconds. + Weight::from_parts(35_969_000, 0) + .saturating_add(Weight::from_parts(0, 3754)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_958_000, 0) + // Minimum execution time: 2_247_000 picoseconds. + Weight::from_parts(2_381_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 17_640_000 picoseconds. - Weight::from_parts(18_011_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_512_000 picoseconds. + Weight::from_parts(15_042_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 17_325_000 picoseconds. - Weight::from_parts(17_896_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_659_000 picoseconds. + Weight::from_parts(15_164_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 19_295_000 picoseconds. - Weight::from_parts(19_840_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_261_000 picoseconds. + Weight::from_parts(16_986_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `6179` - // Minimum execution time: 35_819_000 picoseconds. - Weight::from_parts(36_708_000, 0) - .saturating_add(Weight::from_parts(0, 6179)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(31_117_000, 0) + .saturating_add(Weight::from_parts(0, 6085)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_907_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_463_000 picoseconds. + Weight::from_parts(9_728_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_376_000 picoseconds. - Weight::from_parts(17_870_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_169_000 picoseconds. + Weight::from_parts(15_694_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `11133` - // Minimum execution time: 43_468_000 picoseconds. - Weight::from_parts(44_327_000, 0) - .saturating_add(Weight::from_parts(0, 11133)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `149` + // Estimated: `11039` + // Minimum execution time: 37_549_000 picoseconds. + Weight::from_parts(38_203_000, 0) + .saturating_add(Weight::from_parts(0, 11039)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_947_000 picoseconds. + Weight::from_parts(3_117_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 24_595_000 picoseconds. + Weight::from_parts(24_907_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cec357453b67be400c0191ac7d5c12e6961a4bee --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 585dc9058f21ec29b15040470d4a4c663c18d303..3a4813b667c68ea6400c9ad58cea8fd1bfece5d0 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_616_000 picoseconds. - Weight::from_parts(9_961_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_065_000 picoseconds. + Weight::from_parts(8_389_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_587_000 picoseconds. - Weight::from_parts(9_964_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_038_000 picoseconds. + Weight::from_parts(8_463_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_650_000 picoseconds. - Weight::from_parts(9_960_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_216_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_545_000 picoseconds. - Weight::from_parts(9_845_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_969_000 picoseconds. + Weight::from_parts(8_362_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 10_258_000 picoseconds. - Weight::from_parts(10_607_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_084_000 picoseconds. + Weight::from_parts(10_451_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_502_000 picoseconds. - Weight::from_parts(9_902_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_948_000 picoseconds. + Weight::from_parts(8_268_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_257_000 picoseconds. + Weight::from_parts(10_584_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index c6fa6bb93eb6ab69b256597bbedd59210bfcbf67..d5b3d8257ba54cd665933d10a5518d73382327e1 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -94,7 +94,6 @@ impl XcmWeightInfo for WestendXcmWeight { assets.weigh_multi_assets(XcmBalancesWeight::::withdraw_asset()) } fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - // Westend doesn't support ReserveAssetDeposited, so this benchmark has a default weight assets.weigh_multi_assets(XcmBalancesWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &MultiAssets) -> Weight { diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index b92749bfa15b314573052ed4472bc664cd39e3ae..9939f16aa29f84fb0df68f658fc8cef44c52eea3 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-gghbxkbs-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -31,12 +31,12 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::fungible // --chain=westend-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/westend/src/weights/xcm/ +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 24_887_000 picoseconds. - Weight::from_parts(25_361_000, 3593) + // Minimum execution time: 24_815_000 picoseconds. + Weight::from_parts(25_098_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 52_408_000 picoseconds. - Weight::from_parts(53_387_000, 6196) + // Minimum execution time: 51_268_000 picoseconds. + Weight::from_parts(51_857_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -83,10 +83,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `177` + // Measured: `210` // Estimated: `6196` - // Minimum execution time: 74_753_000 picoseconds. - Weight::from_parts(76_838_000, 6196) + // Minimum execution time: 74_113_000 picoseconds. + Weight::from_parts(74_721_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -96,8 +96,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000_000_000 picoseconds. - Weight::from_parts(2_000_000_000_000, 0) + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -109,10 +109,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3541` - // Minimum execution time: 29_272_000 picoseconds. - Weight::from_parts(30_061_000, 3541) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 28_919_000 picoseconds. + Weight::from_parts(29_703_000, 3574) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -122,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 23_112_000 picoseconds. - Weight::from_parts(23_705_000, 3593) + // Minimum execution time: 21_685_000 picoseconds. + Weight::from_parts(22_528_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -133,8 +133,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 26_077_000 picoseconds. - Weight::from_parts(26_486_000, 3593) + // Minimum execution time: 25_192_000 picoseconds. + Weight::from_parts(25_445_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -150,10 +150,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `109` // Estimated: `3593` - // Minimum execution time: 51_022_000 picoseconds. - Weight::from_parts(52_498_000, 3593) + // Minimum execution time: 49_349_000 picoseconds. + Weight::from_parts(50_476_000, 3593) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -169,10 +169,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `109` // Estimated: `3593` - // Minimum execution time: 53_062_000 picoseconds. - Weight::from_parts(54_300_000, 3593) + // Minimum execution time: 51_386_000 picoseconds. + Weight::from_parts(52_141_000, 3593) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 66a2e2230ccd6652c3b9f5e8d54d8ca0dae7d122..9ab6470f6dae97cc4c436a0862c73ddafe6a8cb3 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -19,7 +19,7 @@ use super::{ parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, FellowshipAdmin, GeneralAdmin, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, StakingAdmin, - TransactionByteFee, WeightToFee, XcmPallet, + TransactionByteFee, Treasury, WeightToFee, XcmPallet, }; use frame_support::{ @@ -44,6 +44,7 @@ use xcm_builder::{ DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -53,6 +54,7 @@ parameter_types! { pub const UniversalLocation: InteriorMultiLocation = X1(GlobalConsensus(ThisNetwork::get())); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); + pub TreasuryAccount: AccountId = Treasury::account_id(); /// The asset ID for the asset that we use to pay for message delivery fees. pub FeeAssetId: AssetId = Concrete(TokenLocation::get()); /// The base fee for the message delivery fees. @@ -95,29 +97,33 @@ type LocalOriginConverter = ( XcmPassthrough, ); +pub type PriceForChildParachainDelivery = + ExponentialPrice; + /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our /// individual routers. -pub type XcmRouter = WithUniqueTopic<( +pub type XcmRouter = WithUniqueTopic< // Only one router so far - use DMP to communicate with child parachains. - ChildParachainRouter< - Runtime, - XcmPallet, - ExponentialPrice, - >, -)>; + ChildParachainRouter, +>; parameter_types! { - pub const Wnd: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); pub const AssetHub: MultiLocation = Parachain(ASSET_HUB_ID).into_location(); - pub const WndForAssetHub: (MultiAssetFilter, MultiLocation) = (Wnd::get(), AssetHub::get()); pub const Collectives: MultiLocation = Parachain(COLLECTIVES_ID).into_location(); + pub const BridgeHub: MultiLocation = Parachain(BRIDGE_HUB_ID).into_location(); + pub const Wnd: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); + pub const WndForAssetHub: (MultiAssetFilter, MultiLocation) = (Wnd::get(), AssetHub::get()); pub const WndForCollectives: (MultiAssetFilter, MultiLocation) = (Wnd::get(), Collectives::get()); + pub const WndForBridgeHub: (MultiAssetFilter, MultiLocation) = (Wnd::get(), BridgeHub::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } -pub type TrustedTeleporters = - (xcm_builder::Case, xcm_builder::Case); +pub type TrustedTeleporters = ( + xcm_builder::Case, + xcm_builder::Case, + xcm_builder::Case, +); match_types! { pub type OnlyParachains: impl Contains = { @@ -137,7 +143,7 @@ pub type Barrier = TrailingSetTopicAsId<( AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then allow it. + // If the message is one that immediately attempts to pay for execution, then allow it. AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, @@ -174,7 +180,10 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + SystemParachains, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -191,11 +200,6 @@ parameter_types! { pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - /// Type to convert the `GeneralAdmin` origin to a Plurality `MultiLocation` value. pub type GeneralAdminToPlurality = OriginToPluralityVoice; @@ -256,7 +260,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls new file mode 100644 index 0000000000000000000000000000000000000000..349af783cf1a1340d9467187fde21e1829ec6248 --- /dev/null +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -0,0 +1,71 @@ +0 (read) +1 (write) +2 (open) +3 (close) +4 (stat) +5 (fstat) +7 (poll) +8 (lseek) +9 (mmap) +10 (mprotect) +11 (munmap) +12 (brk) +13 (rt_sigaction) +14 (rt_sigprocmask) +15 (rt_sigreturn) +16 (ioctl) +19 (readv) +20 (writev) +22 (pipe) +24 (sched_yield) +25 (mremap) +28 (madvise) +39 (getpid) +41 (socket) +42 (connect) +45 (recvfrom) +46 (sendmsg) +56 (clone) +57 (fork) +60 (exit) +61 (wait4) +62 (kill) +72 (fcntl) +79 (getcwd) +80 (chdir) +82 (rename) +83 (mkdir) +87 (unlink) +89 (readlink) +96 (gettimeofday) +97 (getrlimit) +98 (getrusage) +99 (sysinfo) +102 (getuid) +110 (getppid) +131 (sigaltstack) +140 (getpriority) +141 (setpriority) +144 (sched_setscheduler) +157 (prctl) +158 (arch_prctl) +165 (mount) +166 (umount2) +186 (gettid) +200 (tkill) +202 (futex) +204 (sched_getaffinity) +217 (getdents64) +218 (set_tid_address) +228 (clock_gettime) +230 (clock_nanosleep) +231 (exit_group) +257 (openat) +262 (newfstatat) +263 (unlinkat) +272 (unshare) +273 (set_robust_list) +293 (pipe2) +302 (prlimit64) +318 (getrandom) +319 (memfd_create) diff --git a/polkadot/scripts/list-syscalls/list-syscalls.rb b/polkadot/scripts/list-syscalls/list-syscalls.rb new file mode 100755 index 0000000000000000000000000000000000000000..9cef6f74b2ebe5588fca908f3787791a956ba50e --- /dev/null +++ b/polkadot/scripts/list-syscalls/list-syscalls.rb @@ -0,0 +1,608 @@ +#!/usr/bin/ruby + +# A script to statically list syscalls used by a given binary. +# +# Syntax: list-syscalls.rb [--only-used-syscalls] +# +# NOTE: For accurate results, build the binary with musl and LTO enabled. +# Example: ./polkadot/scripts/list-syscalls/list-syscalls.rb target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls +# +# Author: @koute +# Source: https://gist.github.com/koute/166f82bfee5e27324077891008fca6eb + +require 'shellwords' +require 'set' + +SYNTAX_STRING = 'Syntax: list-syscalls.rb [--only-used-syscalls]'.freeze + +# Generated from `libc` using the following regex: +# 'pub const SYS_([a-z0-9_]+): ::c_long = (\d+);' +# ' \2 => "\1",' +SYSCALLS = { + 0 => 'read', + 1 => 'write', + 2 => 'open', + 3 => 'close', + 4 => 'stat', + 5 => 'fstat', + 6 => 'lstat', + 7 => 'poll', + 8 => 'lseek', + 9 => 'mmap', + 10 => 'mprotect', + 11 => 'munmap', + 12 => 'brk', + 13 => 'rt_sigaction', + 14 => 'rt_sigprocmask', + 15 => 'rt_sigreturn', + 16 => 'ioctl', + 17 => 'pread64', + 18 => 'pwrite64', + 19 => 'readv', + 20 => 'writev', + 21 => 'access', + 22 => 'pipe', + 23 => 'select', + 24 => 'sched_yield', + 25 => 'mremap', + 26 => 'msync', + 27 => 'mincore', + 28 => 'madvise', + 29 => 'shmget', + 30 => 'shmat', + 31 => 'shmctl', + 32 => 'dup', + 33 => 'dup2', + 34 => 'pause', + 35 => 'nanosleep', + 36 => 'getitimer', + 37 => 'alarm', + 38 => 'setitimer', + 39 => 'getpid', + 40 => 'sendfile', + 41 => 'socket', + 42 => 'connect', + 43 => 'accept', + 44 => 'sendto', + 45 => 'recvfrom', + 46 => 'sendmsg', + 47 => 'recvmsg', + 48 => 'shutdown', + 49 => 'bind', + 50 => 'listen', + 51 => 'getsockname', + 52 => 'getpeername', + 53 => 'socketpair', + 54 => 'setsockopt', + 55 => 'getsockopt', + 56 => 'clone', + 57 => 'fork', + 58 => 'vfork', + 59 => 'execve', + 60 => 'exit', + 61 => 'wait4', + 62 => 'kill', + 63 => 'uname', + 64 => 'semget', + 65 => 'semop', + 66 => 'semctl', + 67 => 'shmdt', + 68 => 'msgget', + 69 => 'msgsnd', + 70 => 'msgrcv', + 71 => 'msgctl', + 72 => 'fcntl', + 73 => 'flock', + 74 => 'fsync', + 75 => 'fdatasync', + 76 => 'truncate', + 77 => 'ftruncate', + 78 => 'getdents', + 79 => 'getcwd', + 80 => 'chdir', + 81 => 'fchdir', + 82 => 'rename', + 83 => 'mkdir', + 84 => 'rmdir', + 85 => 'creat', + 86 => 'link', + 87 => 'unlink', + 88 => 'symlink', + 89 => 'readlink', + 90 => 'chmod', + 91 => 'fchmod', + 92 => 'chown', + 93 => 'fchown', + 94 => 'lchown', + 95 => 'umask', + 96 => 'gettimeofday', + 97 => 'getrlimit', + 98 => 'getrusage', + 99 => 'sysinfo', + 100 => 'times', + 101 => 'ptrace', + 102 => 'getuid', + 103 => 'syslog', + 104 => 'getgid', + 105 => 'setuid', + 106 => 'setgid', + 107 => 'geteuid', + 108 => 'getegid', + 109 => 'setpgid', + 110 => 'getppid', + 111 => 'getpgrp', + 112 => 'setsid', + 113 => 'setreuid', + 114 => 'setregid', + 115 => 'getgroups', + 116 => 'setgroups', + 117 => 'setresuid', + 118 => 'getresuid', + 119 => 'setresgid', + 120 => 'getresgid', + 121 => 'getpgid', + 122 => 'setfsuid', + 123 => 'setfsgid', + 124 => 'getsid', + 125 => 'capget', + 126 => 'capset', + 127 => 'rt_sigpending', + 128 => 'rt_sigtimedwait', + 129 => 'rt_sigqueueinfo', + 130 => 'rt_sigsuspend', + 131 => 'sigaltstack', + 132 => 'utime', + 133 => 'mknod', + 134 => 'uselib', + 135 => 'personality', + 136 => 'ustat', + 137 => 'statfs', + 138 => 'fstatfs', + 139 => 'sysfs', + 140 => 'getpriority', + 141 => 'setpriority', + 142 => 'sched_setparam', + 143 => 'sched_getparam', + 144 => 'sched_setscheduler', + 145 => 'sched_getscheduler', + 146 => 'sched_get_priority_max', + 147 => 'sched_get_priority_min', + 148 => 'sched_rr_get_interval', + 149 => 'mlock', + 150 => 'munlock', + 151 => 'mlockall', + 152 => 'munlockall', + 153 => 'vhangup', + 154 => 'modify_ldt', + 155 => 'pivot_root', + 156 => '_sysctl', + 157 => 'prctl', + 158 => 'arch_prctl', + 159 => 'adjtimex', + 160 => 'setrlimit', + 161 => 'chroot', + 162 => 'sync', + 163 => 'acct', + 164 => 'settimeofday', + 165 => 'mount', + 166 => 'umount2', + 167 => 'swapon', + 168 => 'swapoff', + 169 => 'reboot', + 170 => 'sethostname', + 171 => 'setdomainname', + 172 => 'iopl', + 173 => 'ioperm', + 174 => 'create_module', + 175 => 'init_module', + 176 => 'delete_module', + 177 => 'get_kernel_syms', + 178 => 'query_module', + 179 => 'quotactl', + 180 => 'nfsservctl', + 181 => 'getpmsg', + 182 => 'putpmsg', + 183 => 'afs_syscall', + 184 => 'tuxcall', + 185 => 'security', + 186 => 'gettid', + 187 => 'readahead', + 188 => 'setxattr', + 189 => 'lsetxattr', + 190 => 'fsetxattr', + 191 => 'getxattr', + 192 => 'lgetxattr', + 193 => 'fgetxattr', + 194 => 'listxattr', + 195 => 'llistxattr', + 196 => 'flistxattr', + 197 => 'removexattr', + 198 => 'lremovexattr', + 199 => 'fremovexattr', + 200 => 'tkill', + 201 => 'time', + 202 => 'futex', + 203 => 'sched_setaffinity', + 204 => 'sched_getaffinity', + 205 => 'set_thread_area', + 206 => 'io_setup', + 207 => 'io_destroy', + 208 => 'io_getevents', + 209 => 'io_submit', + 210 => 'io_cancel', + 211 => 'get_thread_area', + 212 => 'lookup_dcookie', + 213 => 'epoll_create', + 214 => 'epoll_ctl_old', + 215 => 'epoll_wait_old', + 216 => 'remap_file_pages', + 217 => 'getdents64', + 218 => 'set_tid_address', + 219 => 'restart_syscall', + 220 => 'semtimedop', + 221 => 'fadvise64', + 222 => 'timer_create', + 223 => 'timer_settime', + 224 => 'timer_gettime', + 225 => 'timer_getoverrun', + 226 => 'timer_delete', + 227 => 'clock_settime', + 228 => 'clock_gettime', + 229 => 'clock_getres', + 230 => 'clock_nanosleep', + 231 => 'exit_group', + 232 => 'epoll_wait', + 233 => 'epoll_ctl', + 234 => 'tgkill', + 235 => 'utimes', + 236 => 'vserver', + 237 => 'mbind', + 238 => 'set_mempolicy', + 239 => 'get_mempolicy', + 240 => 'mq_open', + 241 => 'mq_unlink', + 242 => 'mq_timedsend', + 243 => 'mq_timedreceive', + 244 => 'mq_notify', + 245 => 'mq_getsetattr', + 246 => 'kexec_load', + 247 => 'waitid', + 248 => 'add_key', + 249 => 'request_key', + 250 => 'keyctl', + 251 => 'ioprio_set', + 252 => 'ioprio_get', + 253 => 'inotify_init', + 254 => 'inotify_add_watch', + 255 => 'inotify_rm_watch', + 256 => 'migrate_pages', + 257 => 'openat', + 258 => 'mkdirat', + 259 => 'mknodat', + 260 => 'fchownat', + 261 => 'futimesat', + 262 => 'newfstatat', + 263 => 'unlinkat', + 264 => 'renameat', + 265 => 'linkat', + 266 => 'symlinkat', + 267 => 'readlinkat', + 268 => 'fchmodat', + 269 => 'faccessat', + 270 => 'pselect6', + 271 => 'ppoll', + 272 => 'unshare', + 273 => 'set_robust_list', + 274 => 'get_robust_list', + 275 => 'splice', + 276 => 'tee', + 277 => 'sync_file_range', + 278 => 'vmsplice', + 279 => 'move_pages', + 280 => 'utimensat', + 281 => 'epoll_pwait', + 282 => 'signalfd', + 283 => 'timerfd_create', + 284 => 'eventfd', + 285 => 'fallocate', + 286 => 'timerfd_settime', + 287 => 'timerfd_gettime', + 288 => 'accept4', + 289 => 'signalfd4', + 290 => 'eventfd2', + 291 => 'epoll_create1', + 292 => 'dup3', + 293 => 'pipe2', + 294 => 'inotify_init1', + 295 => 'preadv', + 296 => 'pwritev', + 297 => 'rt_tgsigqueueinfo', + 298 => 'perf_event_open', + 299 => 'recvmmsg', + 300 => 'fanotify_init', + 301 => 'fanotify_mark', + 302 => 'prlimit64', + 303 => 'name_to_handle_at', + 304 => 'open_by_handle_at', + 305 => 'clock_adjtime', + 306 => 'syncfs', + 307 => 'sendmmsg', + 308 => 'setns', + 309 => 'getcpu', + 310 => 'process_vm_readv', + 311 => 'process_vm_writev', + 312 => 'kcmp', + 313 => 'finit_module', + 314 => 'sched_setattr', + 315 => 'sched_getattr', + 316 => 'renameat2', + 317 => 'seccomp', + 318 => 'getrandom', + 319 => 'memfd_create', + 320 => 'kexec_file_load', + 321 => 'bpf', + 322 => 'execveat', + 323 => 'userfaultfd', + 324 => 'membarrier', + 325 => 'mlock2', + 326 => 'copy_file_range', + 327 => 'preadv2', + 328 => 'pwritev2', + 329 => 'pkey_mprotect', + 330 => 'pkey_alloc', + 331 => 'pkey_free', + 332 => 'statx', + 334 => 'rseq', + 424 => 'pidfd_send_signal', + 425 => 'io_uring_setup', + 426 => 'io_uring_enter', + 427 => 'io_uring_register', + 428 => 'open_tree', + 429 => 'move_mount', + 430 => 'fsopen', + 431 => 'fsconfig', + 432 => 'fsmount', + 433 => 'fspick', + 434 => 'pidfd_open', + 435 => 'clone3', + 436 => 'close_range', + 437 => 'openat2', + 438 => 'pidfd_getfd', + 439 => 'faccessat2', + 440 => 'process_madvise', + 441 => 'epoll_pwait2', + 442 => 'mount_setattr', + 443 => 'quotactl_fd', + 444 => 'landlock_create_ruleset', + 445 => 'landlock_add_rule', + 446 => 'landlock_restrict_self', + 447 => 'memfd_secret', + 448 => 'process_mrelease', + 449 => 'futex_waitv', + 450 => 'set_mempolicy_home_node' +}.map { |num, name| [num, "#{num} (#{name})"] }.to_h + +REGS_R64 = %w[ + rax + rbx + rcx + rdx + rsi + rdi + rsp + rbp + r8 + r9 + r10 + r11 + r12 + r13 + r14 + r15 +] + +REGS_R32 = %w[ + eax + ebx + ecx + edx + esi + edi + esp + ebp + r8d + r9d + r10d + r11d + r12d + r13d + r14d + r15d +] + +REGS_R16 = %w[ + ax + bx + cx + dx + si + di + sp + bp + r8w + r9w + r10w + r11w + r12w + r13w + r14w + r15w +] + +REGS_R8 = %w[ + al + bl + cl + dl + sil + dil + spl + bpl + r8b + r9b + r10b + r11b + r12b + r13b + r14b + r15b +] + +REG_MAP = (REGS_R64.map { |r| [r, r] } + REGS_R32.zip(REGS_R64) + REGS_R16.zip(REGS_R64) + REGS_R8.zip(REGS_R64)).to_h +REGS_R = (REGS_R64 + REGS_R32 + REGS_R16 + REGS_R8).join('|') + +if ARGV.empty? + warn SYNTAX_STRING + exit 1 +end + +file_path = ARGV[0] +raise "no such file: #{file_path}" unless File.exist? file_path + +only_used_syscalls = false +ARGV[1..].each do |arg| + if arg == '--only-used-syscalls' + only_used_syscalls = true + else + warn "invalid argument '#{arg}':\n#{SYNTAX_STRING}" + exit 1 + end +end + +puts 'Running objdump...' unless only_used_syscalls +dump = `objdump -wd -j .text -M intel #{file_path.shellescape}` +raise 'objdump failed' unless $?.exitstatus == 0 + +puts 'Parsing objdump output...' unless only_used_syscalls +current_fn = nil +code_for_fn = {} +fns_with_syscall = Set.new +fns_with_indirect_syscall = Set.new +dump.split("\n").each do |line| + if line =~ /\A[0-9a-f]+ <(.+?)>:/ + current_fn = Regexp.last_match(1) + next + end + + next unless current_fn + next if %w[syscall __syscall_cp_c].include?(current_fn) # These are for indirect syscalls. + + code = line.strip.split("\t")[2] + next if [nil, ''].include?(code) + + code_for_fn[current_fn] ||= [] + code_for_fn[current_fn] << code.gsub(/[\t ]+/, ' ') + + fns_with_syscall.add(current_fn) if code == 'syscall' + fns_with_indirect_syscall.add(current_fn) if code =~ /<(syscall|__syscall_cp)>/ +end + +unless only_used_syscalls + puts "Found #{fns_with_syscall.length} functions doing direct syscalls" + puts "Found #{fns_with_indirect_syscall.length} functions doing indirect syscalls" +end + +syscalls_for_fn = {} +not_found_count = 0 +(fns_with_syscall + fns_with_indirect_syscall).each do |fn_name| + syscalls_for_fn[fn_name] ||= [] + + if fn_name =~ /_ZN11parking_lot9raw_mutex8RawMutex9lock_slow.+/ + # Hardcode 'SYS_futex' as this function produces a really messy assembly. + syscalls_for_fn[fn_name] << 202 + next + end + + code = code_for_fn[fn_name] + + found = false + regs = {} + code.each do |inst| + if inst =~ /mov (#{REGS_R}),(.+)/ + reg = Regexp.last_match(1) + value = Regexp.last_match(2) + regs[REG_MAP[reg]] = if value =~ /#{REGS_R}/ + regs[REG_MAP[value]] + elsif value =~ /0x([0-9a-f]+)/ + Regexp.last_match(1).to_i(16) + end + elsif inst =~ /xor (#{REGS_R}),(#{REGS_R})/ + reg_1 = Regexp.last_match(1) + reg_2 = Regexp.last_match(1) + regs[REG_MAP[reg_1]] = 0 if reg_1 == reg_2 + elsif inst =~ /lea (#{REGS_R}),(.+)/ + reg = Regexp.last_match(1) + value = Regexp.last_match(2) + regs[REG_MAP[reg]] = ('syscall' if value.strip =~ /\[rip\+0x[a-z0-9]+\]\s*#\s*[0-9a-f]+\s*/) + elsif inst =~ /(call|jmp) (#{REGS_R})/ + reg = Regexp.last_match(2) + if regs[REG_MAP[reg]] == 'syscall' + if !regs['rdi'].nil? + syscalls_for_fn[fn_name] << regs['rdi'] + found = true + else + found = false + end + end + elsif inst =~ /(call|jmp) [0-9a-f]+ <(syscall|__syscall_cp)>/ + if !regs['rdi'].nil? + syscalls_for_fn[fn_name] << regs['rdi'] + found = true + else + found = false + end + elsif inst == 'syscall' + if !regs['rax'].nil? + syscalls_for_fn[fn_name] << regs['rax'] + found = true + else + found = false + end + end + end + + next if found + + puts "WARN: Function triggers a syscall but couldn't figure out which one: #{fn_name}" + puts ' ' + code.join("\n ") + puts + not_found_count += 1 +end + +puts "WARN: Failed to figure out syscall for #{not_found_count} function(s)" if not_found_count > 0 + +fns_for_syscall = {} +syscalls_for_fn.each do |fn_name, syscalls| + syscalls.each do |syscall| + fns_for_syscall[syscall] ||= [] + fns_for_syscall[syscall] << fn_name + end +end + +if only_used_syscalls + puts syscalls_for_fn.values.flatten.sort.uniq.map { |sc| SYSCALLS[sc] || sc }.join("\n") +else + puts 'Functions per syscall:' + fns_for_syscall.sort_by { |sc, _| sc }.each do |syscall, fn_names| + fn_names = fn_names.sort.uniq + + puts " #{SYSCALLS[syscall] || syscall} [#{fn_names.length} functions]" + fn_names.each do |fn_name| + puts " #{fn_name}" + end + end + + puts + puts 'Used syscalls:' + puts ' ' + syscalls_for_fn.values.flatten.sort.uniq.map { |sc| SYSCALLS[sc] || sc }.join("\n ") +end diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls new file mode 100644 index 0000000000000000000000000000000000000000..05281b61591a7f9e45efa7d7bb6514fbe118f130 --- /dev/null +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -0,0 +1,72 @@ +0 (read) +1 (write) +2 (open) +3 (close) +4 (stat) +5 (fstat) +7 (poll) +8 (lseek) +9 (mmap) +10 (mprotect) +11 (munmap) +12 (brk) +13 (rt_sigaction) +14 (rt_sigprocmask) +15 (rt_sigreturn) +16 (ioctl) +19 (readv) +20 (writev) +22 (pipe) +24 (sched_yield) +25 (mremap) +28 (madvise) +39 (getpid) +41 (socket) +42 (connect) +45 (recvfrom) +46 (sendmsg) +56 (clone) +57 (fork) +60 (exit) +61 (wait4) +62 (kill) +72 (fcntl) +79 (getcwd) +80 (chdir) +82 (rename) +83 (mkdir) +87 (unlink) +89 (readlink) +96 (gettimeofday) +97 (getrlimit) +98 (getrusage) +99 (sysinfo) +102 (getuid) +110 (getppid) +131 (sigaltstack) +140 (getpriority) +141 (setpriority) +144 (sched_setscheduler) +157 (prctl) +158 (arch_prctl) +165 (mount) +166 (umount2) +186 (gettid) +200 (tkill) +202 (futex) +203 (sched_setaffinity) +204 (sched_getaffinity) +217 (getdents64) +218 (set_tid_address) +228 (clock_gettime) +230 (clock_nanosleep) +231 (exit_group) +257 (openat) +262 (newfstatat) +263 (unlinkat) +272 (unshare) +273 (set_robust_list) +293 (pipe2) +302 (prlimit64) +309 (getcpu) +318 (getrandom) diff --git a/polkadot/src/bin/execute-worker.rs b/polkadot/src/bin/execute-worker.rs index 1deb365809860502e8e298802814a185479c09c6..c39a5a3c89de7e5d1bec3b49e275526786879f87 100644 --- a/polkadot/src/bin/execute-worker.rs +++ b/polkadot/src/bin/execute-worker.rs @@ -20,4 +20,5 @@ polkadot_node_core_pvf_common::decl_worker_main!( "execute-worker", polkadot_node_core_pvf_execute_worker::worker_entrypoint, polkadot_cli::NODE_VERSION, + env!("SUBSTRATE_CLI_COMMIT_HASH"), ); diff --git a/polkadot/src/bin/prepare-worker.rs b/polkadot/src/bin/prepare-worker.rs index d731f8a30d065f0c9abdcc438df411be7d16417e..3b42991f18baa042fb605a9859800c7da4634929 100644 --- a/polkadot/src/bin/prepare-worker.rs +++ b/polkadot/src/bin/prepare-worker.rs @@ -20,4 +20,5 @@ polkadot_node_core_pvf_common::decl_worker_main!( "prepare-worker", polkadot_node_core_pvf_prepare_worker::worker_entrypoint, polkadot_cli::NODE_VERSION, + env!("SUBSTRATE_CLI_COMMIT_HASH"), ); diff --git a/polkadot/src/main.rs b/polkadot/src/main.rs index 5986d8cea7bb5029ed34bb953f6c02aac009d1e9..1a96bf8fb00f6943ec64073be1e29365ef4a4e4e 100644 --- a/polkadot/src/main.rs +++ b/polkadot/src/main.rs @@ -24,7 +24,7 @@ use color_eyre::eyre; /// `memory_stats::MemoryAllocationTracker`. #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] #[global_allocator] -pub static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; fn main() -> eyre::Result<()> { color_eyre::install()?; diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 91ca2015af3a1d766f37cf7e0ae33eb282640a53..d2518591d26c8062b87f4b51ab20a5400012c544 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Stores messages other authorities issue about candidates in Polkadot." [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 95ca57ea728e16837843207e04d97f6b472979be..ed29001aa4f47e98522e45b2bb435f1c34097081 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "CLI to generate voter bags for Polkadot runtimes" [dependencies] clap = { version = "4.4.6", features = ["derive"] } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index f04f31c3ec103da9d08dd9c84cc3668eb46afb70..60c27f7fcfc34cd54d69b16007e5d535d5911b50 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -12,7 +12,7 @@ derivative = { version = "2.2.0", default-features = false, features = [ "use_co impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len" ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } xcm-procedural = { path = "procedural" } @@ -29,6 +29,7 @@ wasm-api = [] std = [ "bounded-collections/std", "environmental/std", + "log/std", "parity-scale-codec/std", "scale-info/std", "serde/std", diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 35b0b7dc553e5fa58e9457ab89bf9416d418bfe8..5be0bbe4ae5ad407aa465191402e4c570f40bc71 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -4,13 +4,14 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "1.0.0" +description = "Benchmarks for the XCM pallet" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} @@ -41,6 +42,8 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", + "scale-info/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 504b795403991dc3229f005027baa8fc97790617..d32eb8d4a52f7c98473f4a427b7976410701183b 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -15,16 +15,17 @@ // along with Polkadot. If not, see . use super::*; -use crate::{account_and_location, new_executor, AssetTransactorOf, XcmCallOf}; +use crate::{account_and_location, new_executor, AssetTransactorOf, EnsureDelivery, XcmCallOf}; use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError, BenchmarkResult}; use frame_support::{ pallet_prelude::Get, traits::fungible::{Inspect, Mutate}, + weights::Weight, }; use sp_runtime::traits::{Bounded, Zero}; use sp_std::{prelude::*, vec}; -use xcm::latest::prelude::*; -use xcm_executor::traits::{ConvertLocation, TransactAsset}; +use xcm::latest::{prelude::*, MAX_ITEMS_IN_MULTIASSETS}; +use xcm_executor::traits::{ConvertLocation, FeeReason, TransactAsset}; benchmarks_instance_pallet! { where_clause { where @@ -44,15 +45,7 @@ benchmarks_instance_pallet! { let worst_case_holding = T::worst_case_holding(0); let asset = T::get_multi_asset(); - >::deposit_asset( - &asset, - &sender_location, - &XcmContext { - origin: Some(sender_location.clone()), - message_id: [0; 32], - topic: None, - }, - ).unwrap(); + >::deposit_asset(&asset, &sender_location, None).unwrap(); // check the assets of origin. assert!(!T::TransactAsset::balance(&sender_account).is_zero()); @@ -77,15 +70,7 @@ benchmarks_instance_pallet! { let dest_location = T::valid_destination()?; let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); - >::deposit_asset( - &asset, - &sender_location, - &XcmContext { - origin: Some(sender_location.clone()), - message_id: [0; 32], - topic: None, - }, - ).unwrap(); + >::deposit_asset(&asset, &sender_location, None).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); let mut executor = new_executor::(sender_location); @@ -103,20 +88,27 @@ benchmarks_instance_pallet! { let dest_location = T::valid_destination()?; let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); - let asset = T::get_multi_asset(); - >::deposit_asset( - &asset, + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( &sender_location, - &XcmContext { - origin: Some(sender_location.clone()), - message_id: [0; 32], - topic: None, - }, - ).unwrap(); + &dest_location, + FeeReason::TransferReserveAsset + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + let asset = T::get_multi_asset(); + >::deposit_asset(&asset, &sender_location, None).unwrap(); + assert!(T::TransactAsset::balance(&sender_account) > sender_account_balance_before); let assets: MultiAssets = vec![ asset ].into(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); let mut executor = new_executor::(sender_location); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } + let instruction = Instruction::TransferReserveAsset { assets, dest: dest_location, @@ -126,7 +118,7 @@ benchmarks_instance_pallet! { }: { executor.bench_process(xcm)?; } verify { - assert!(T::TransactAsset::balance(&sender_account).is_zero()); + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); assert!(!T::TransactAsset::balance(&dest_account).is_zero()); // TODO: Check sender queue is not empty. #4426 } @@ -134,7 +126,7 @@ benchmarks_instance_pallet! { reserve_asset_deposited { let (trusted_reserve, transferable_reserve_asset) = T::TrustedReserve::get() .ok_or(BenchmarkError::Override( - BenchmarkResult::from_weight(T::BlockWeights::get().max_block) + BenchmarkResult::from_weight(Weight::MAX) ))?; let assets: MultiAssets = vec![ transferable_reserve_asset ].into(); @@ -149,16 +141,33 @@ benchmarks_instance_pallet! { } initiate_reserve_withdraw { + let (sender_account, sender_location) = account_and_location::(1); let holding = T::worst_case_holding(1); - let assets_filter = MultiAssetFilter::Definite(holding.clone()); + let assets_filter = MultiAssetFilter::Definite(holding.clone().into_inner().into_iter().take(MAX_ITEMS_IN_MULTIASSETS).collect::>().into()); let reserve = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let mut executor = new_executor::(Default::default()); + + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &reserve, + FeeReason::InitiateReserveWithdraw, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + let mut executor = new_executor::(sender_location); executor.set_holding(holding.into()); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } let instruction = Instruction::InitiateReserveWithdraw { assets: assets_filter, reserve, xcm: Xcm(vec![]) }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; } verify { + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // The execute completing successfully is as good as we can check. // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } @@ -187,7 +196,7 @@ benchmarks_instance_pallet! { }: { executor.bench_process(xcm).map_err(|_| { BenchmarkError::Override( - BenchmarkResult::from_weight(T::BlockWeights::get().max_block) + BenchmarkResult::from_weight(Weight::MAX) ) })?; } verify { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index f5759afc06464141fe1823d10ff662ca3cac64f0..9adc706fc18ae9ecd1540a820f79085573e52d1b 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -19,7 +19,7 @@ use crate::{fungible as xcm_balances_benchmark, mock::*}; use frame_benchmarking::BenchmarkError; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing}, weights::Weight, }; @@ -75,20 +75,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 7; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; } parameter_types! { @@ -157,6 +147,7 @@ impl xcm_executor::Config for XcmConfig { impl crate::Config for Test { type XcmConfig = XcmConfig; type AccountIdConverter = AccountIdConverter; + type DeliveryHelper = (); fn valid_destination() -> Result { let valid_destination: MultiLocation = X1(AccountId32 { network: None, id: [0u8; 32] }).into(); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 4583ecdba89fc395997fe50b0b86f437297011f9..f1c48ba9b83040af5d14086924b35e1acb7acd1e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -15,27 +15,45 @@ // along with Polkadot. If not, see . use super::*; -use crate::{new_executor, XcmCallOf}; +use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use codec::Encode; use frame_benchmarking::{benchmarks, BenchmarkError}; -use frame_support::dispatch::GetDispatchInfo; +use frame_support::{dispatch::GetDispatchInfo, traits::fungible::Inspect}; use sp_std::vec; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight}, DoubleEncoded, }; -use xcm_executor::{ExecutorError, FeesMode}; +use xcm_executor::{ + traits::{ConvertLocation, FeeReason}, + ExecutorError, FeesMode, +}; benchmarks! { report_holding { + let (sender_account, sender_location) = account_and_location::(1); let holding = T::worst_case_holding(0); + let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let mut executor = new_executor::(Default::default()); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + let mut executor = new_executor::(sender_location); executor.set_holding(holding.clone().into()); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } let instruction = Instruction::>::ReportHolding { response_info: QueryResponseInfo { - destination: T::valid_destination()?, + destination, query_id: Default::default(), max_weight: Weight::MAX, }, @@ -44,11 +62,11 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - } : { executor.bench_process(xcm)?; } verify { - // The completion of execution above is enough to validate this is completed. + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); } // This benchmark does not use any additional orders or instructions. This should be managed @@ -182,11 +200,26 @@ benchmarks! { } report_error { - let mut executor = new_executor::(Default::default()); - executor.set_error(Some((0u32, XcmError::Unimplemented))); + let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); - let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); + let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; + + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + let mut executor = new_executor::(sender_location); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } + executor.set_error(Some((0u32, XcmError::Unimplemented))); let instruction = Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight @@ -195,7 +228,8 @@ benchmarks! { }: { executor.bench_process(xcm)?; } verify { - // the execution succeeding is all we need to verify this xcm was successful + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); } claim_asset { @@ -360,31 +394,48 @@ benchmarks! { } query_pallet { + let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let mut executor = new_executor::(Default::default()); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + let mut executor = new_executor::(sender_location); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } + + let valid_pallet = T::valid_pallet(); let instruction = Instruction::QueryPallet { - module_name: b"frame_system".to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; } verify { + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } expect_pallet { let mut executor = new_executor::(Default::default()); - + let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { - index: 0, - name: b"System".to_vec(), - module_name: b"frame_system".to_vec(), - crate_major: 4, - min_crate_minor: 0, + index: valid_pallet.index as u32, + name: valid_pallet.name.as_bytes().to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), + crate_major: valid_pallet.crate_version.major.into(), + min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); }: { @@ -394,11 +445,25 @@ benchmarks! { } report_transact_status { + let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let mut executor = new_executor::(Default::default()); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + let mut executor = new_executor::(sender_location); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ReportTransactStatus(QueryResponseInfo { @@ -410,6 +475,8 @@ benchmarks! { }: { executor.bench_process(xcm)?; } verify { + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } @@ -491,14 +558,30 @@ benchmarks! { let inner_xcm = Xcm(vec![ClearOrigin; x as usize]); // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; + + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.into(), + FeeReason::Export { network, destination }, + ); + let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + let mut executor = new_executor::(origin); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } let xcm = Xcm(vec![ExportMessage { network, destination, xcm: inner_xcm, }]); }: { executor.bench_process(xcm)?; } verify { - // The execute completing successfully is as good as we can check. + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } @@ -517,14 +600,30 @@ benchmarks! { lock_asset { let (unlocker, owner, asset) = T::unlockable_asset()?; + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &owner, + &unlocker, + FeeReason::LockAsset, + ); + let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + let mut executor = new_executor::(owner); executor.set_holding(asset.clone().into()); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; } verify { + // Check delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } @@ -595,13 +694,29 @@ benchmarks! { .enact() .map_err(|_| BenchmarkError::Skip)?; + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); + let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // ... then request for an unlock with the RequestUnlock instruction. let mut executor = new_executor::(owner); + if let Some(expected_fees_mode) = expected_fees_mode { + executor.set_fees_mode(expected_fees_mode); + } + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + executor.set_holding(expected_assets_in_holding.into()); + } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; } verify { + // Check we charged the delivery fees + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 1b244f316de9f4dd2979c859c8f818739d7ee6b1..710ff0d801927854a35a9de27a7e51a6803b809f 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -19,7 +19,7 @@ use crate::{generic, mock::*, *}; use codec::Decode; use frame_support::{ - match_types, parameter_types, + derive_impl, match_types, parameter_types, traits::{Everything, OriginTrait}, weights::Weight, }; @@ -40,6 +40,7 @@ frame_support::construct_runtime!( pub enum Test { System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, XcmGenericBenchmarks: generic::{Pallet}, } ); @@ -79,7 +80,11 @@ impl frame_system::Config for Test { /// The benchmarks in this pallet should never need an asset transactor to begin with. pub struct NoAssetTransactor; impl xcm_executor::traits::TransactAsset for NoAssetTransactor { - fn deposit_asset(_: &MultiAsset, _: &MultiLocation, _: &XcmContext) -> Result<(), XcmError> { + fn deposit_asset( + _: &MultiAsset, + _: &MultiLocation, + _: Option<&XcmContext>, + ) -> Result<(), XcmError> { unreachable!(); } @@ -133,9 +138,20 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Aliasers; } +parameter_types! { + pub const ExistentialDeposit: u64 = 7; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Test { + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; +} + impl crate::Config for Test { type XcmConfig = XcmConfig; type AccountIdConverter = AccountIdConverter; + type DeliveryHelper = (); fn valid_destination() -> Result { let valid_destination: MultiLocation = Junction::AccountId32 { network: None, id: [0u8; 32] }.into(); @@ -151,6 +167,7 @@ impl crate::Config for Test { } impl generic::Config for Test { + type TransactAsset = Balances; type RuntimeCall = RuntimeCall; fn worst_case_response() -> (u64, Response) { @@ -183,7 +200,7 @@ impl generic::Config for Test { fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { let assets: MultiAsset = (Concrete(Here.into()), 100).into(); - Ok((Default::default(), Default::default(), assets)) + Ok((Default::default(), account_id_junction::(1).into(), assets)) } fn export_message_origin_and_destination( diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index f207c238a39f14e5d1b6606b1d91dda12b096237..11f7bba19a9873ec5d21eebec237097958bc461e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -38,6 +38,11 @@ pub mod pallet { + From> + Encode; + /// The type of `fungible` that is being used under the hood. + /// + /// This is useful for testing and checking. + type TransactAsset: frame_support::traits::fungible::Mutate; + /// The response which causes the most runtime weight. fn worst_case_response() -> (u64, Response); @@ -86,6 +91,18 @@ pub mod pallet { /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; + + /// Returns a valid pallet info for `ExpectPallet` or `QueryPallet` benchmark. + /// + /// By default returns `frame_system::Pallet` info with expected pallet index `0`. + fn valid_pallet() -> frame_support::traits::PalletInfoData { + frame_support::traits::PalletInfoData { + index: as frame_support::traits::PalletInfoAccess>::index(), + name: as frame_support::traits::PalletInfoAccess>::name(), + module_name: as frame_support::traits::PalletInfoAccess>::module_name(), + crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), + } + } } #[pallet::pallet] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index c6a96343595345b041d4d7f7fc26dc4643a0a40a..3bf4aea1b25e5ca6680fd707b02493cb393087ee 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -22,7 +22,10 @@ use codec::Encode; use frame_benchmarking::{account, BenchmarkError}; use sp_std::prelude::*; use xcm::latest::prelude::*; -use xcm_executor::{traits::ConvertLocation, Config as XcmConfig}; +use xcm_executor::{ + traits::{ConvertLocation, FeeReason}, + Config as XcmConfig, FeesMode, +}; pub mod fungible; pub mod generic; @@ -41,6 +44,9 @@ pub trait Config: frame_system::Config { /// A converter between a multi-location to a sovereign account. type AccountIdConverter: ConvertLocation; + /// Helper that ensures successful delivery for XCM instructions which need `SendXcm`. + type DeliveryHelper: EnsureDelivery; + /// Does any necessary setup to create a valid destination for XCM messages. /// Returns that destination's multi-location to be used in benchmarks. fn valid_destination() -> Result; @@ -108,3 +114,29 @@ pub fn account_and_location(index: u32) -> (T::AccountId, MultiLocati (account, location) } + +/// Trait for a type which ensures all requirements for successful delivery with XCM transport +/// layers. +pub trait EnsureDelivery { + /// Prepare all requirements for successful `XcmSender: SendXcm` passing (accounts, balances, + /// channels ...). Returns: + /// - possible `FeesMode` which is expected to be set to executor + /// - possible `MultiAssets` which are expected to be subsume to the Holding Register + fn ensure_successful_delivery( + origin_ref: &MultiLocation, + dest: &MultiLocation, + fee_reason: FeeReason, + ) -> (Option, Option); +} + +/// `()` implementation does nothing which means no special requirements for environment. +impl EnsureDelivery for () { + fn ensure_successful_delivery( + _origin_ref: &MultiLocation, + _dest: &MultiLocation, + _fee_reason: FeeReason, + ) -> (Option, Option) { + // doing nothing + (None, None) + } +} diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4946a8d11ce96aed88e5864a57e79875d62555f0..cc5d7d97c45184672a703ae36430f81d6fcb74f8 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -1,19 +1,18 @@ [package] name = "pallet-xcm" +version = "1.0.0" +description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true -version = "1.0.0" - [dependencies] bounded-collections = { version = "0.1.8", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} @@ -23,12 +22,16 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false} xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } + +# marked optional, used in benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-assets = { path = "../../../substrate/frame/assets" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } [features] default = [ "std" ] @@ -38,12 +41,15 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", + "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] @@ -51,6 +57,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", @@ -61,6 +68,7 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index aca4bd1fb3fa92b812bc11d9957286310047a98b..3aca24791fc2ec5d5a69e0df9431c0496dbf6595 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,15 +16,56 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; -use frame_support::weights::Weight; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_support::{traits::Currency, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 100; + +/// Pallet we're benchmarking here. +pub struct Pallet(crate::Pallet); + +/// Trait that must be implemented by runtime to be able to benchmark pallet properly. +pub trait Config: crate::Config { + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + fn reachable_dest() -> Option { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// teleported to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// reserve-transferred to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } +} + benchmarks! { + where_clause { + where + T: pallet_balances::Config, + ::Balance: From + Into, + } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -32,7 +73,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); @@ -40,44 +81,82 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } reserve_transfer_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } execute { let execute_origin = @@ -92,7 +171,7 @@ benchmarks! { }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) force_xcm_version { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let xcm_version = 2; @@ -101,18 +180,18 @@ benchmarks! { force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = Pallet::::request_version_notify(loc); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_suspension {}: _(RawOrigin::Root, true) @@ -122,7 +201,7 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } migrate_version_notifiers { @@ -130,22 +209,22 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } already_notified_target { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), )?; let loc = VersionedMultiLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_current_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); @@ -153,7 +232,7 @@ benchmarks! { let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_target_migration_fail { @@ -167,7 +246,7 @@ benchmarks! { let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_version_notify_targets { @@ -176,18 +255,45 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_and_notify_old_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + } + + new_query { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + }: { + crate::Pallet::::new_query(responder, timeout, match_querier); + } + + take_response { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); + let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( + u32::MAX, + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + u32::MAX, + u32::MAX, + u32::MAX, + ).unwrap()).collect::>(); + crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + + }: { + as QueryHandler>::take_response(query_id); } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 321bb294b88d89187924a88a98302c33dd860fb9..38ea7555fc32783135eb50b852dd13088b2bee43 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] @@ -28,9 +28,17 @@ mod tests; pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::{ - Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, OriginTrait, +use frame_support::{ + dispatch::GetDispatchInfo, + pallet_prelude::*, + traits::{ + Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, + OriginTrait, WithdrawReasons, + }, + PalletId, }; +use frame_system::pallet_prelude::{BlockNumberFor, *}; +pub use pallet::*; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -41,17 +49,15 @@ use sp_runtime::{ }; use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; -use xcm_executor::traits::{ConvertOrigin, Properties}; - -use frame_support::{ - dispatch::GetDispatchInfo, pallet_prelude::*, traits::WithdrawReasons, PalletId, +use xcm_builder::{ + ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, + SendController, SendControllerWeightInfo, }; -use frame_system::pallet_prelude::*; -pub use pallet::*; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, DropAssets, MatchesFungible, OnResponse, - QueryHandler, QueryResponseStatus, VersionChangeNotifier, WeightBounds, + AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, + DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, + TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, }, Assets, }; @@ -73,6 +79,8 @@ pub trait WeightInfo { fn notify_target_migration_fail() -> Weight; fn migrate_version_notify_targets() -> Weight; fn migrate_and_notify_old_targets() -> Weight; + fn new_query() -> Weight; + fn take_response() -> Weight; } /// fallback implementation @@ -141,6 +149,14 @@ impl WeightInfo for TestWeightInfo { fn migrate_and_notify_old_targets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn new_query() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn take_response() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -206,7 +222,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -259,12 +275,93 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + } - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest: Get>; + impl ExecuteControllerWeightInfo for Pallet { + fn execute() -> Weight { + T::WeightInfo::execute() + } + } + + impl ExecuteController, ::RuntimeCall> for Pallet { + type WeightInfo = Self; + fn execute( + origin: OriginFor, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + let outcome = T::XcmExecutor::execute_xcm_in_credit( + origin_location, + message, + hash, + max_weight, + max_weight, + ); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + Ok(outcome) + } + } + + impl SendControllerWeightInfo for Pallet { + fn send() -> Weight { + T::WeightInfo::send() + } + } + + impl SendController> for Pallet { + type WeightInfo = Self; + fn send( + origin: OriginFor, + dest: Box, + message: Box>, + ) -> Result { + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + let interior: Junctions = + origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = + Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + + impl QueryControllerWeightInfo for Pallet { + fn query() -> Weight { + T::WeightInfo::new_query() + } + fn take_response() -> Weight { + T::WeightInfo::take_response() + } + } + + impl QueryController, BlockNumberFor> for Pallet { + type WeightInfo = Self; + + fn query( + origin: OriginFor, + timeout: BlockNumberFor, + match_querier: VersionedMultiLocation, + ) -> Result { + let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; + let query_id = ::new_query( + responder, + timeout, + MultiLocation::try_from(match_querier) + .map_err(|_| Into::::into(Error::::BadVersion))?, + ); + + Ok(query_id) + } } #[pallet::event] @@ -428,8 +525,8 @@ pub mod pallet { NoSubscription, /// The location is invalid since it already has a subscription from us. AlreadySubscribed, - /// Invalid asset for the operation. - InvalidAsset, + /// Could not check-out the assets for teleportation to the destination chain. + CannotCheckOutTeleport, /// The owner does not own (all) of the asset that they wish to do the operation on. LowBalance, /// The asset owner has too many locks on the asset. @@ -442,6 +539,16 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, + /// Invalid non-concrete asset. + InvalidAssetNotConcrete, + /// Invalid asset, reserve chain could not be determined for it. + InvalidAssetUnknownReserve, + /// Invalid asset, do not support remote asset reserves with different fees reserves. + InvalidAssetUnsupportedReserve, + /// Too many assets with different reserve locations have been attempted for transfer. + TooManyReserves, + /// Local XCM execution of asset transfer incomplete. + LocalExecutionIncomplete, } impl From for Error { @@ -454,6 +561,15 @@ pub mod pallet { } } + impl From for Error { + fn from(e: AssetTransferError) -> Self { + match e { + AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, + AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, + } + } + } + /// The status of a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum QueryStatus { @@ -771,16 +887,7 @@ pub mod pallet { dest: Box, message: Box>, ) -> DispatchResult { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = - Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); + >::send(origin, dest, message)?; Ok(()) } @@ -813,11 +920,7 @@ pub mod pallet { let mut message = Xcm(vec![ WithdrawAsset(assets), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { - assets: Wild(AllCounted(count)), - dest, - xcm: Xcm(vec![]), - }, + InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, ]); T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) } @@ -860,6 +963,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -896,7 +1001,7 @@ pub mod pallet { /// execution attempt will be made. /// /// NOTE: A successful return to this does *not* imply that the `msg` was executed - /// successfully to completion; only that *some* of it was executed. + /// successfully to completion; only that it was attempted. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -904,23 +1009,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - let outcome = T::XcmExecutor::execute_xcm_in_credit( - origin_location, - message, - hash, - max_weight, - max_weight, - ); - let result = - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()); - Self::deposit_event(Event::Attempted { outcome }); - result + let outcome = >::execute(origin, message, max_weight)?; + Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) } /// Extoll that a particular destination can be communicated with through a particular @@ -1035,6 +1125,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1145,7 +1237,7 @@ impl QueryHandler for Pallet { timeout: BlockNumberFor, match_querier: impl Into, ) -> Self::QueryId { - Self::do_new_query(responder, None, timeout, match_querier).into() + Self::do_new_query(responder, None, timeout, match_querier) } /// To check the status of the query, use `fn query()` passing the resultant `QueryId` @@ -1194,6 +1286,33 @@ impl QueryHandler for Pallet { } impl Pallet { + /// Validate `assets` to be reserve-transferred and return their reserve location. + fn validate_assets_and_find_reserve( + assets: &[MultiAsset], + dest: &MultiLocation, + ) -> Result> { + let mut reserve = None; + for asset in assets.iter() { + if let Fungible(x) = asset.fun { + // If fungible asset, ensure non-zero amount. + ensure!(!x.is_zero(), Error::::Empty); + } + let transfer_type = + T::XcmExecutor::determine_for(&asset, dest).map_err(Error::::from)?; + // Ensure asset is not teleportable to `dest`. + ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); + if let Some(reserve) = reserve.as_ref() { + // Ensure transfer for multiple assets uses same reserve location (only fee may have + // different reserve location) + ensure!(reserve == &transfer_type, Error::::TooManyReserves); + } else { + // asset reserve identified + reserve = Some(transfer_type); + } + } + reserve.ok_or(Error::::Empty) + } + fn do_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1207,35 +1326,75 @@ impl Pallet { let beneficiary: MultiLocation = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::trace!( + target: "xcm::pallet_xcm::do_reserve_transfer_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", + origin_location, dest, beneficiary, assets, fee_asset_item, + ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); let value = (origin_location, assets.into_inner()); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ - BuyExecution { fees, weight_limit }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + let (origin_location, mut assets) = value; + + if fee_asset_item as usize >= assets.len() { + return Err(Error::::Empty.into()) + } + let fees = assets.swap_remove(fee_asset_item as usize); + let fees_transfer_type = + T::XcmExecutor::determine_for(&fees, &dest).map_err(Error::::from)?; + let assets_transfer_type = if assets.is_empty() { + // Single asset to transfer (one used for fees where transfer type is determined above). + ensure!(fees_transfer_type != TransferType::Teleport, Error::::Filtered); + fees_transfer_type + } else { + // Find reserve for non-fee assets. + Self::validate_assets_and_find_reserve(&assets, &dest)? + }; + + // local and remote XCM programs to potentially handle fees separately + let separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>; + if fees_transfer_type == assets_transfer_type { + // Same reserve location (fees not teleportable), we can batch together fees and assets + // in same reserve-based-transfer. + assets.push(fees.clone()); + // no need for custom fees instructions, fees are batched with assets + separate_fees_instructions = None; + } else { + // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered by + // branch above). The reason for this is that we'd need to send XCMs to separate chains + // with no guarantee of delivery order on final destination; therefore we cannot + // guarantee to have fees in place on final destination chain to pay for assets + // transfer. + ensure!( + !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), + Error::::InvalidAssetUnsupportedReserve + ); + let fees = fees.clone(); + let weight_limit = weight_limit.clone(); + // build fees transfer instructions to be added to assets transfers XCM programs + separate_fees_instructions = Some(match fees_transfer_type { + TransferType::LocalReserve => + Self::local_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::DestinationReserve => + Self::destination_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::Teleport => + Self::teleport_fees_instructions(origin_location, dest, fees, weight_limit)?, + TransferType::RemoteReserve(_) => + return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }); + }; + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + assets_transfer_type, + fees, + separate_fees_instructions, + weight_limit, + ) } fn do_teleport_assets( @@ -1256,31 +1415,388 @@ impl Pallet { let value = (origin_location, assets.into_inner()); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); let (origin_location, assets) = value; + for asset in assets.iter() { + let transfer_type = + T::XcmExecutor::determine_for(asset, &dest).map_err(Error::::from)?; + ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); + } + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + TransferType::Teleport, + fees, + None, + weight_limit, + ) + } + + fn build_and_execute_xcm_transfer_type( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + transfer_type: TransferType, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> DispatchResult { + log::trace!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ + fees {:?}, fees_xcm: {:?}, weight_limit: {:?}", + origin, dest, beneficiary, assets, transfer_type, fees, separate_fees_instructions, weight_limit, + ); + let (mut local_xcm, remote_xcm) = match transfer_type { + TransferType::LocalReserve => { + let (local, remote) = Self::local_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::DestinationReserve => { + let (local, remote) = Self::destination_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::RemoteReserve(reserve) => ( + Self::remote_reserve_transfer_program( + reserve, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?, + None, + ), + TransferType::Teleport => ( + Self::teleport_assets_program(dest, beneficiary, assets, fees, weight_limit)?, + None, + ), + }; + let weight = + T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; + let hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + if let Some(remote_xcm) = remote_xcm { + outcome.ensure_complete().map_err(|_| Error::::LocalExecutionIncomplete)?; + + let (ticket, price) = validate_send::(dest, remote_xcm.clone()) + .map_err(Error::::from)?; + if origin != Here.into_location() { + Self::charge_fees(origin, price).map_err(|_| Error::::FeesNotMet)?; + } + let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + + let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; + Self::deposit_event(e); + } + Ok(()) + } + + fn local_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? + let reanchored_fees = fees .clone() .reanchored(&dest, context) .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; + + let local_execute_xcm = Xcm(vec![ + // move `fees` to `dest`s local sovereign account + TransferAsset { assets: fees.into(), beneficiary: dest }, + ]); + let xcm_on_dest = Xcm(vec![ + // let (dest) chain know `fees` are in its SA on reserve + ReserveAssetDeposited(reanchored_fees.clone().into()), + // buy exec using `fees` in holding deposited in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn local_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // move `assets` to `dest`s local sovereign account + local_execute_xcm.push(TransferAsset { assets, beneficiary: dest }); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // let (dest) chain know assets are in its SA on reserve + ReserveAssetDeposited(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + fn destination_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let fees: MultiAssets = fees.into(); + + let local_execute_xcm = Xcm(vec![ + // withdraw reserve-based fees (derivatives) + WithdrawAsset(fees.clone()), + // burn derivatives + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // withdraw `fees` from origin chain's sovereign account + WithdrawAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding withdrawn in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn destination_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); + let assets: MultiAssets = assets.into(); + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // continue with rest of assets + local_execute_xcm.extend_from_slice(&[ + // withdraw reserve-based assets + WithdrawAsset(assets.clone()), + // burn reserve-based assets + BurnAsset(assets), + ]); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // withdraw `assets` from origin chain's sovereign account + WithdrawAsset(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + // function assumes fees and assets have the same remote reserve + fn remote_reserve_transfer_program( + reserve: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let max_assets = assets.len() as u32; + let context = T::UniversalLocation::get(); + // we spend up to half of fees for execution on reserve and other half for execution on + // destination + let (fees_half_1, fees_half_2) = Self::halve_fees(fees)?; + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1 + .reanchored(&reserve, context) + .map_err(|_| Error::::CannotReanchor)?; + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = + fees_half_2.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, context).map_err(|_| Error::::CannotReanchor)?; + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, + DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, + ]); + // xcm to be executed on reserve + let xcm_on_reserve = Xcm(vec![ + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ]); + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(AllCounted(max_assets)), + reserve, + xcm: xcm_on_reserve, + }, + ])) + } + + fn teleport_fees_instructions( + origin: MultiLocation, + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XcmContext irrelevant in teleports checks + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + // We should check that the asset can actually be teleported out (for this to + // be in error, there would need to be an accounting violation by ourselves, + // so it's unlikely, but we don't want to allow that kind of bug to leak into + // a trusted chain. + ::AssetTransactor::can_check_out( + &dest, + &fees, + &dummy_context, + ) + .map_err(|_| Error::::CannotCheckOutTeleport)?; + ::AssetTransactor::check_out( + &dest, + &fees, + &dummy_context, + ); + + let fees: MultiAssets = fees.into(); + let local_execute_xcm = Xcm(vec![ + // withdraw fees + WithdrawAsset(fees.clone()), + // burn fees + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // (dest) chain receive teleported assets burned on origin chain + ReceiveTeleportedAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding received in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn teleport_assets_program( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + mut fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let context = T::UniversalLocation::get(); + fees.reanchor(&dest, context).map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let xcm_on_dest = Xcm(vec![ BuyExecution { fees, weight_limit }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, ]); - let mut message = Xcm(vec![ - WithdrawAsset(assets), + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ])) + } + + /// Halve `fees` fungible amount. + pub(crate) fn halve_fees(fees: MultiAsset) -> Result<(MultiAsset, MultiAsset), Error> { + match fees.fun { + Fungible(amount) => { + let fee1 = amount.saturating_div(2); + let fee2 = amount.saturating_sub(fee1); + ensure!(fee1 > 0, Error::::FeesNotMet); + ensure!(fee2 > 0, Error::::FeesNotMet); + Ok((MultiAsset::from((fees.id, fee1)), MultiAsset::from((fees.id, fee2)))) + }, + NonFungible(_) => Err(Error::::FeesNotMet), + } } /// Will always make progress, and will do its best not to use much more than `weight_cutoff` diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index ba3cdb5c51edc82712b3bf00feedc8b670365547..2793afcc910484948f7d3a611c88b2ca59beb766 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -31,7 +31,7 @@ pub mod v1 { /// checking, the version checking is not complete as it will begin failing after the upgrade is /// enacted on-chain. /// - /// Use experimental [`VersionCheckedMigrateToV1`] instead. + /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -65,7 +65,7 @@ pub mod v1 { /// /// Wrapped in [`frame_support::migrations::VersionedMigration`] so the pre/post checks don't /// begin failing after the upgrade is enacted on-chain. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, VersionUncheckedMigrateToV1, diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b09bcb80ed671de34eaea0a512b90be7e2b92534..e744cefb162c3467f061ddd1c12a35a6f70cfdc1 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -16,8 +16,11 @@ use codec::Encode; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU32, Everything, Nothing}, + construct_runtime, match_types, parameter_types, + traits::{ + AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Equals, Everything, EverythingBut, + Nothing, + }, weights::Weight, }; use frame_system::EnsureRoot; @@ -25,16 +28,22 @@ use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::origin; use sp_core::H256; use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; -pub use sp_std::{cell::RefCell, fmt::Debug, marker::PhantomData}; +pub use sp_std::{ + cell::RefCell, collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, +}; use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, - FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, HashedDescription, IsConcrete, + MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{ + traits::{Identity, JustTry}, + XcmExecutor, }; -use xcm_executor::XcmExecutor; use crate::{self as pallet_xcm, TestWeightInfo}; @@ -135,6 +144,7 @@ construct_runtime!( { System: frame_system::{Pallet, Call, Storage, Config, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Config, Event}, ParasOrigin: origin::{Pallet, Origin}, XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, @@ -154,7 +164,7 @@ pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { r }) } -/// Sender that never returns error, always sends +/// Sender that never returns error. pub struct TestSendXcm; impl SendXcm for TestSendXcm { type Ticket = (MultiLocation, Xcm<()>); @@ -177,14 +187,46 @@ impl SendXcm for TestSendXcmErrX8 { type Ticket = (MultiLocation, Xcm<()>); fn validate( dest: &mut Option, - msg: &mut Option>, + _: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { - let (dest, msg) = (dest.take().unwrap(), msg.take().unwrap()); - if dest.len() == 8 { + if dest.as_ref().unwrap().len() == 8 { + dest.take(); Err(SendError::Transport("Destination location full")) } else { - Ok(((dest, msg), MultiAssets::new())) + Err(SendError::NotApplicable) + } + } + fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { + let hash = fake_message_hash(&pair.1); + SENT_XCM.with(|q| q.borrow_mut().push(pair)); + Ok(hash) + } +} + +parameter_types! { + pub Para3000: u32 = 3000; + pub Para3000Location: MultiLocation = Parachain(Para3000::get()).into(); + pub Para3000PaymentAmount: u128 = 1; + pub Para3000PaymentMultiAssets: MultiAssets = MultiAssets::from(MultiAsset::from((Here, Para3000PaymentAmount::get()))); +} +/// Sender only sends to `Parachain(3000)` destination requiring payment. +pub struct TestPaidForPara3000SendXcm; +impl SendXcm for TestPaidForPara3000SendXcm { + type Ticket = (MultiLocation, Xcm<()>); + fn validate( + dest: &mut Option, + msg: &mut Option>, + ) -> SendResult<(MultiLocation, Xcm<()>)> { + if let Some(dest) = dest.as_ref() { + if !dest.eq(&Para3000Location::get()) { + return Err(SendError::NotApplicable) + } + } else { + return Err(SendError::NotApplicable) } + + let pair = (dest.take().unwrap(), msg.take().unwrap()); + Ok((pair, Para3000PaymentMultiAssets::get())) } fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { let hash = fake_message_hash(&pair.1); @@ -240,23 +282,155 @@ impl pallet_balances::Config for Test { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; } +#[cfg(feature = "runtime-benchmarks")] +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocation; + type AssetIdParameter = MultiLocation; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type WeightInfo = (); + type CallbackHandle = (); + type Extra = (); + type RemoveItemsLimit = ConstU32<5>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = XcmBenchmarkHelper; +} + +// This child parachain is a system parachain trusted to teleport native token. +pub const SOME_SYSTEM_PARA: u32 = 1001; + +// This child parachain acts as trusted reserve for its assets in tests. +// USDT allowed to teleport to/from here. +pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; +// Inner junction of reserve asset on `FOREIGN_ASSET_RESERVE_PARA_ID`. +pub const FOREIGN_ASSET_INNER_JUNCTION: Junction = GeneralIndex(1234567); + +// This child parachain acts as trusted reserve for say.. USDC that can be used for fees. +pub const USDC_RESERVE_PARA_ID: u32 = 2002; +// Inner junction of reserve asset on `USDC_RESERVE_PARA_ID`. +pub const USDC_INNER_JUNCTION: Junction = PalletInstance(42); + +// This child parachain is a trusted teleporter for say.. USDT (T from Teleport :)). +// We'll use USDT in tests that teleport fees. +pub const USDT_PARA_ID: u32 = 2003; + +// This child parachain is not configured as trusted reserve or teleport location for any assets. +pub const OTHER_PARA_ID: u32 = 2009; + +// This child parachain is used for filtered/disallowed assets. +pub const FILTERED_PARA_ID: u32 = 2010; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); + pub const NativeAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }; + pub const SystemParachainLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(SOME_SYSTEM_PARA)) + }; + pub const ForeignReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) + }; + pub const ForeignAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION), + }), + }; + pub const UsdcReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDC_RESERVE_PARA_ID)) + }; + pub const Usdc: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(USDC_RESERVE_PARA_ID), USDC_INNER_JUNCTION), + }), + }; + pub const UsdtTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)) + }; + pub const Usdt: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)), + }), + }; + pub const FilteredTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)) + }; + pub const FilteredTeleportAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; + pub CheckingAccount: AccountId = XcmPallet::check_account(); } -pub type SovereignAccountOf = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type SovereignAccountOf = ( + ChildParachainConvertsVia, + AccountId32Aliases, + HashedDescription, +); -pub type LocalAssetTransactor = - XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; +pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< + MultiLocation, + Balance, + // Excludes relay/parent chain currency + EverythingBut<(Equals,)>, + Identity, + JustTry, +>; + +pub type AssetTransactors = ( + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>, + FungiblesAdapter< + Assets, + ForeignAssetsConvertedConcreteId, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, + >, +); type LocalOriginConverter = ( SovereignSignedViaLocation, @@ -268,9 +442,23 @@ type LocalOriginConverter = ( parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); - pub TrustedAssets: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); + pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TrustedFilteredTeleport: (MultiAssetFilter, MultiLocation) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); + pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); + pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; + pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); +} + +pub const XCM_FEES_NOT_WAIVED_USER_ACCOUNT: [u8; 32] = [37u8; 32]; +match_types! { + pub type XcmFeesNotWaivedLocations: impl Contains = { + MultiLocation { parents: 0, interior: X1(Junction::AccountId32 {network: None, id: XCM_FEES_NOT_WAIVED_USER_ACCOUNT})} + }; } pub type Barrier = ( @@ -280,14 +468,22 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); +pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = TestSendXcm; - type AssetTransactor = LocalAssetTransactor; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = Case; + type IsReserve = (Case, Case); + type IsTeleporter = ( + Case, + Case, + Case, + Case, + Case, + ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -300,7 +496,10 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + EverythingBut, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -314,19 +513,22 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); +pub struct XcmTeleportFiltered; +impl Contains<(MultiLocation, Vec)> for XcmTeleportFiltered { + fn contains(t: &(MultiLocation, Vec)) -> bool { + let filtered = FilteredTeleportAsset::get(); + t.1.iter().any(|asset| asset == &filtered) + } } impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = (TestSendXcmErrX8, TestSendXcm); + type XcmRouter = XcmRouter; type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; + type XcmTeleportFilter = EverythingBut; type XcmReserveTransferFilter = Everything; type Weigher = FixedWeightBounds; type UniversalLocation = UniversalLocation; @@ -334,6 +536,7 @@ impl pallet_xcm::Config for Test { type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; type Currency = Balances; @@ -342,9 +545,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - type AdminOrigin = EnsureRoot; } impl origin::Config for Test {} @@ -355,6 +555,24 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +impl super::benchmarking::Config for Test { + fn reachable_dest() -> Option { + Some(Parachain(1000).into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some((NativeAsset::get(), SystemParachainLocation::get())) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some(( + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, + Parachain(OTHER_PARA_ID).into(), + )) + } +} + pub(crate) fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } @@ -370,10 +588,10 @@ pub(crate) fn buy_execution(fees: impl Into) -> Instruction { pub(crate) fn buy_limited_execution( fees: impl Into, - weight: Weight, + weight_limit: WeightLimit, ) -> Instruction { use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit: Limited(weight) } + BuyExecution { fees: fees.into(), weight_limit } } pub(crate) fn new_test_ext_with_balances( diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..bf39e1ca28837c3e0dacfdc07233d47f520dfefc --- /dev/null +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -0,0 +1,1459 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(test)] + +use crate::{ + mock::*, + tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, +}; +use frame_support::{ + assert_ok, + traits::{tokens::fungibles::Inspect, Currency}, + weights::Weight, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +// Helper function to deduplicate testing different teleport types. +fn do_test_and_verify_teleport_assets( + expected_beneficiary: MultiLocation, + call: Call, + expected_weight_limit: WeightLimit, +) { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 3; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + call(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + }); +} + +/// Test `teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + }, + Unlimited, + ); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + }, + expected_weight_limit, + ); +} + +/// `limited_teleport_assets` should fail for filtered assets +#[test] +fn limited_teleport_filtered_assets_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let result = XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(FilteredTeleportAsset::get().into()), + 0, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} + +/// Test `reserve_transfer_assets_with_paid_router_works` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +#[test] +fn reserve_transfer_assets_with_paid_router_works() { + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); + assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_transfer_assets( + RuntimeOrigin::signed(user_account.clone()), + Box::new(Parachain(paid_para_id).into()), + Box::new(dest.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + + // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount + assert_eq!( + Balances::free_balance(user_account), + INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount + ); + + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + let dest_para: MultiLocation = Parachain(paid_para_id).into(); + assert_eq!( + sent_xcm(), + vec![( + dest_para, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, + ]), + )] + ); + let mut last_events = last_events(5).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + // balances events + last_events.next().unwrap(); + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: dest, + fees: Para3000PaymentMultiAssets::get(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +fn set_up_foreign_asset( + reserve_para_id: u32, + inner_junction: Option, + initial_amount: u128, + is_sufficient: bool, +) -> (MultiLocation, AccountId, MultiLocation) { + let reserve_location = + RelayLocation::get().pushed_with_interior(Parachain(reserve_para_id)).unwrap(); + let reserve_sovereign_account = + SovereignAccountOf::convert_location(&reserve_location).unwrap(); + + let foreign_asset_id_multilocation = if let Some(junction) = inner_junction { + reserve_location.pushed_with_interior(junction).unwrap() + } else { + reserve_location + }; + + // create sufficient (to be used as fees as well) foreign asset (0 total issuance) + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + foreign_asset_id_multilocation, + BOB, + is_sufficient, + 1 + )); + // this asset should have been teleported/reserve-transferred in, but for this test we just + // mint it locally. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(BOB), + foreign_asset_id_multilocation, + ALICE, + initial_amount + )); + + (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) +} + +// Helper function that provides correct `fee_index` after `sort()` done by +// `vec![MultiAsset, MultiAsset].into()`. +fn into_multiassets_checked( + fee_asset: MultiAsset, + transfer_asset: MultiAsset, +) -> (MultiAssets, usize, MultiAsset, MultiAsset) { + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset.clone()].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + (assets, fee_index, fee_asset, transfer_asset) +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and local fee reserve. +/// +/// Transferring native asset (local reserve) to some `OTHER_PARA_ID` (no teleport trust). +/// Using native asset for fees as well. +/// +/// ```nocompile +/// Here (source) OTHER_PARA_ID (destination) +/// | `assets` reserve +/// | `fees` reserve +/// | +/// | 1. execute `TransferReserveAsset(assets_and_fees_batched_together)` +/// | \--> sends `ReserveAssetDeposited(both), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn limited_reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let expected_beneficiary = beneficiary; + let dest: MultiLocation = Parachain(OTHER_PARA_ID).into(); + + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(OTHER_PARA_ID).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: expected_beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and local fee reserve. +/// +/// Transferring foreign asset (`FOREIGN_ASSET_RESERVE_PARA_ID` reserve) to +/// `FOREIGN_ASSET_RESERVE_PARA_ID` (no teleport trust). +/// Using native asset (local reserve) for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve `assets` reserve +/// | +/// | 1. execute `TransferReserveAsset(fees)` +/// | \-> sends `ReserveAssetDeposited(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() { + let weight = BaseXcmWeight::get() * 3; + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is reserve location (no teleport trust) + let dest = reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice used native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); + // Destination account (parachain account) added native reserve used as fee to balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), FEE_AMOUNT); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + // `fees` are being sent through local-reserve transfer because fee reserve is + // local chain; `assets` are burned on source and withdrawn from SA here + Xcm(vec![ + ReserveAssetDeposited((Parent, FEE_AMOUNT).into()), + buy_limited_execution(expected_fee, Unlimited), + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and local fee reserve. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `OTHER_PARA_ID`. +/// Using native (local reserve) as fee should be disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is OTHER_PARA_ID (foreign asset needs to go through its reserve + // chain) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // try the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice transferred nothing + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + // Alice spent native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and destination fee reserve. +/// +/// Transferring native asset (local reserve) to `USDC_RESERVE_PARA_ID` (no teleport trust). Using +/// foreign asset (`USDC_RESERVE_PARA_ID` reserve) for fees. +/// +/// ```nocompile +/// Here (source) USDC_RESERVE_PARA_ID (destination) +/// | `assets` reserve `fees` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(fees)` +/// | \--> sends `WithdrawAsset(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_reserve_location, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // native assets transfer to fee reserve location (no teleport trust) + let dest = usdc_reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // usdc for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_issuance = usdc_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are being sent through destination-reserve transfer because fee reserve + // is destination chain + WithdrawAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native asset) + // have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and destination fee reserve. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve +/// | `assets` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(assets_and_fees_batched_together)` +/// | \--> sends `WithdrawAsset(batch), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // we'll send just this foreign asset back to its reserve location and use it for fees as + // well + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + true, + ); + + // transfer destination is reserve location + let dest = reserve_location; + let assets: MultiAssets = vec![(foreign_asset_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let mut expected_assets = assets.clone(); + expected_assets.reanchor(&dest, UniversalLocation::get()).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index, + Unlimited, + )); + + let weight = BaseXcmWeight::get() * 2; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Reserve sovereign account has same balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + Parachain(FOREIGN_ASSET_RESERVE_PARA_ID).into(), + Xcm(vec![ + WithdrawAsset(expected_assets.clone()), + ClearOrigin, + buy_limited_execution(expected_assets.get(0).unwrap().clone(), Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary }, + ]), + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and destination fee reserve is +/// disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to +/// `USDC_RESERVE_PARA_ID`. Using USDC (destination reserve) as fee. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (usdc_chain, _, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDC chain (foreign asset BLA needs to go through its separate + // reserve chain) + let dest = usdc_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and remote fee reserve is disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain != fee reserve location (no teleport trust) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + let dest_sovereign_account = SovereignAccountOf::convert_location(&dest).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Sovereign account of reserve parachain is unchanged + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and remote fee reserve is +/// disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and (same) remote fee reserve. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +/// +/// ```nocompile +/// | chain `A` | chain `C` | chain `B` +/// | Here (source) | USDC_RESERVE_PARA_ID | OTHER_PARA_ID (destination) +/// | | `fees` reserve | +/// | | `assets` reserve | +/// | +/// | 1. `A` executes `InitiateReserveWithdraw(both)` dest `C` +/// | -----------------> `C` executes `DepositReserveAsset(both)` dest `B` +/// | --------------------------> `DepositAsset(both)` +/// ``` +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_chain, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_dest_on_reserve = dest.reanchored(&usdc_chain, context).unwrap(); + let fees = assets.get(fee_index).unwrap().clone(); + let (fees_half_1, fees_half_2) = XcmPallet::halve_fees(fees).unwrap(); + let mut expected_assets_on_reserve = assets.clone(); + expected_assets_on_reserve.reanchor(&usdc_chain, context).unwrap(); + let expected_fee_on_reserve = fees_half_1.reanchored(&usdc_chain, context).unwrap(); + let expected_fee_on_dest = fees_half_2.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + assert!(matches!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(_) }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) has expected (same) balances + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_usdc_issuance = usdc_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + // first message sent to reserve chain + usdc_chain, + Xcm(vec![ + WithdrawAsset(expected_assets_on_reserve), + ClearOrigin, + BuyExecution { fees: expected_fee_on_reserve, weight_limit: Unlimited }, + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: expected_dest_on_reserve, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_limited_execution(expected_fee_on_dest, Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )], + ); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and teleported fee. +/// +/// Transferring native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for +/// fees. +/// +/// ```nocompile +/// Here (source) USDT_PARA_ID (destination) +/// | `assets` reserve `fees` teleport-trust +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // native assets transfer destination is USDT chain (teleport trust only for USDT) + let dest = usdt_chain; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native + // asset) have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and teleported fee. +/// +/// Transferring foreign asset (destination reserve) to `FOREIGN_ASSET_RESERVE_PARA_ID`. Using +/// teleport-trusted USDT for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` (USDT) teleport-trust +/// | `assets` reserve +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(asset), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 4; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice spent USDT for fees + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice transferred BLA + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Verify balances of USDT reserve parachain + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify balances of transferred-asset reserve parachain + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + // Verify total and active issuance of USDT have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_bla_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // assets are withdrawn from origin's local SA + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and teleported fee is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `USDT_PARA_ID`. +/// Using teleport-trusted USDT for fees. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, reserve_sovereign_account, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + let expected_usdt_issuance = usdt_initial_local_amount; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` single asset which is teleportable - should fail. +/// +/// Attempting to reserve-transfer teleport-trusted USDT to `USDT_PARA_ID` should fail. +#[test] +fn reserve_transfer_assets_with_teleportable_asset_fails() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + let assets: MultiAssets = vec![(usdt_id_multilocation, FEE_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let res = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + res, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + // Alice native asset is still same + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice USDT balance is still same + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + // No USDT moved to sovereign account of reserve parachain + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance of USDT are still the same + assert_eq!(Assets::total_issuance(usdt_id_multilocation), usdt_initial_local_amount); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); + }); +} + +/// Test `reserve_transfer_assets` with teleportable fee that is filtered - should fail. +#[test] +fn reserve_transfer_assets_with_filtered_teleported_fee_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let (assets, fee_index, _, _) = into_multiassets_checked( + // FilteredTeleportAsset for fees - teleportable but filtered + FilteredTeleportAsset::get().into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs similarity index 73% rename from polkadot/xcm/pallet-xcm/src/tests.rs rename to polkadot/xcm/pallet-xcm/src/tests/mod.rs index 5d8aee8d665f4d2a98af0aeef3622219d3a87634..72814e507f2a0106054ec882e8e761bb5074be7a 100644 --- a/polkadot/xcm/pallet-xcm/src/tests.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(test)] + +mod assets_transfer; + use crate::{ mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, @@ -35,15 +39,15 @@ use xcm_executor::{ const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); -const PARA_ID: u32 = 2000; const INITIAL_BALANCE: u128 = 100; const SEND_AMOUNT: u128 = 10; +const FEE_AMOUNT: u128 = 2; #[test] fn report_outcome_notify_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = @@ -56,7 +60,7 @@ fn report_outcome_notify_works() { new_test_ext_with_balances(balances).execute_with(|| { XcmPallet::report_outcome_notify( &mut message, - Parachain(PARA_ID).into_location(), + Parachain(OTHER_PARA_ID).into_location(), notify, 100, ) @@ -74,8 +78,8 @@ fn report_outcome_notify_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), - maybe_notify: Some((4, 2)), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), + maybe_notify: Some((5, 2)), timeout: 100, maybe_match_querier: Some(querier.into()), }; @@ -89,7 +93,7 @@ fn report_outcome_notify_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -99,13 +103,13 @@ fn report_outcome_notify_works() { last_events(2), vec![ RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( - Parachain(PARA_ID).into(), + Parachain(OTHER_PARA_ID).into(), 0, Response::ExecutionResult(None), )), RuntimeEvent::XcmPallet(crate::Event::Notified { query_id: 0, - pallet_index: 4, + pallet_index: 5, call_index: 2 }), ] @@ -118,13 +122,14 @@ fn report_outcome_notify_works() { fn report_outcome_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); + XcmPallet::report_outcome(&mut message, Parachain(OTHER_PARA_ID).into_location(), 100) + .unwrap(); assert_eq!( message, Xcm(vec![ @@ -138,7 +143,7 @@ fn report_outcome_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), maybe_notify: None, timeout: 100, maybe_match_querier: Some(querier.into()), @@ -153,7 +158,7 @@ fn report_outcome_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -177,7 +182,7 @@ fn report_outcome_works() { fn custom_querier_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let querier: MultiLocation = @@ -281,7 +286,7 @@ fn custom_querier_works() { fn send_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); @@ -325,7 +330,7 @@ fn send_works() { fn send_fails_when_xcm_router_blocks() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = @@ -346,276 +351,6 @@ fn send_fails_when_xcm_router_blocks() { }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` with unlimited weight -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn unlimited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn limited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn unlimited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - /// Test local execution of XCM /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -624,7 +359,7 @@ fn unlimited_reserve_transfer_assets_works() { fn execute_withdraw_to_deposit_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let weight = BaseXcmWeight::get() * 3; diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 56df0d94f586002cdc0e5aee58a5a9dfc7470599..8ab27c91dae141ad06317428c4952261227871a7 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "xcm-procedural" +description = "Procedural macros for XCM" authors.workspace = true edition.workspace = true license.workspace = true version = "1.0.0" +publish = true [lib] proc-macro = true @@ -13,3 +15,7 @@ proc-macro2 = "1.0.56" quote = "1.0.28" syn = "2.0.38" Inflector = "0.11.4" + +[dev-dependencies] +trybuild = { version = "1.0.74", features = ["diff"] } +xcm = { package = "staging-xcm", path = ".." } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs new file mode 100644 index 0000000000000000000000000000000000000000..1cb795ea9b208eb2b1985c6ce48132855a83c5f8 --- /dev/null +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -0,0 +1,338 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Derive macro for creating XCMs with a builder pattern + +use inflector::Inflector; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use syn::{ + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, + Result, Variant, +}; + +pub fn derive(input: DeriveInput) -> Result { + let data_enum = match &input.data { + Data::Enum(data_enum) => data_enum, + _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), + }; + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); + let builder_impl = generate_builder_impl(&input.ident, data_enum)?; + let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; + let output = quote! { + /// A trait for types that track state inside the XcmBuilder + pub trait XcmBuilderState {} + + /// Access to all the instructions + pub enum AnythingGoes {} + /// You need to pay for execution + pub enum PaymentRequired {} + /// The holding register was loaded, now to buy execution + pub enum LoadedHolding {} + /// Need to explicitly state it won't pay for fees + pub enum ExplicitUnpaidRequired {} + + impl XcmBuilderState for AnythingGoes {} + impl XcmBuilderState for PaymentRequired {} + impl XcmBuilderState for LoadedHolding {} + impl XcmBuilderState for ExplicitUnpaidRequired {} + + /// Type used to build XCM programs + pub struct XcmBuilder { + pub(crate) instructions: Vec>, + pub state: core::marker::PhantomData, + } + + impl Xcm { + pub fn builder() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unpaid() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unsafe() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + } + #builder_impl + #builder_unpaid_impl + #builder_raw_impl + }; + Ok(output) +} + +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { + let methods = data_enum.variants.iter().map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs = get_doc_comments(&variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + pub fn #method_name(mut self) -> Self { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); + self + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { + self.instructions.push(#name::::#variant_name { #(#arg_names),* }); + self + } + } + }, + }; + quote! { + #(#docs)* + #method + } + }); + let output = quote! { + impl XcmBuilder { + #(#methods)* + + pub fn build(self) -> Xcm { + Xcm(self.instructions) + } + } + }; + output +} + +fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { + // We first require an instruction that load the holding register + let load_holding_variants = data_enum + .variants + .iter() + .map(|variant| { + let maybe_builder_attr = variant.attrs.iter().find(|attr| match attr.meta { + Meta::List(ref list) => { + return list.path.is_ident("builder"); + }, + _ => false, + }); + let builder_attr = match maybe_builder_attr { + Some(builder) => builder.clone(), + None => return Ok(None), /* It's not going to be an instruction that loads the + * holding register */ + }; + let Meta::List(ref list) = builder_attr.meta else { unreachable!("We checked before") }; + let inner_ident: Ident = syn::parse2(list.tokens.clone().into()).map_err(|_| { + Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`") + })?; + let ident_to_match: Ident = syn::parse_quote!(loads_holding); + if inner_ident == ident_to_match { + Ok(Some(variant)) + } else { + Err(Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`")) + } + }) + .collect::>>()?; + + let load_holding_methods = load_holding_variants + .into_iter() + .flatten() + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs = get_doc_comments(&variant); + let method = match &variant.fields { + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + &variant, + "Instructions that load the holding register should take operands", + )), + }; + Ok(method) + }) + .collect::, _>>()?; + + let first_impl = quote! { + impl XcmBuilder { + #(#load_holding_methods)* + } + }; + + // Then we require fees to be paid + let buy_execution_method = data_enum + .variants + .iter() + .find(|variant| variant.ident.to_string() == "BuyExecution") + .map_or( + Err(Error::new_spanned(&data_enum.variants, "No BuyExecution instruction")), + |variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs = get_doc_comments(&variant); + let fields = match &variant.fields { + Fields::Named(fields) => { + let arg_names: Vec<_> = + fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = + fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + &variant, + "BuyExecution should have named fields", + )), + }; + Ok(fields) + }, + )?; + + let second_impl = quote! { + impl XcmBuilder { + #buy_execution_method + } + }; + + let output = quote! { + #first_impl + #second_impl + }; + + Ok(output) +} + +fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result { + let unpaid_execution_variant = data_enum + .variants + .iter() + .find(|variant| variant.ident.to_string() == "UnpaidExecution") + .ok_or(Error::new_spanned(&data_enum.variants, "No UnpaidExecution instruction"))?; + let unpaid_execution_ident = &unpaid_execution_variant.ident; + let unpaid_execution_method_name = Ident::new( + &unpaid_execution_ident.to_string().to_snake_case(), + unpaid_execution_ident.span(), + ); + let docs = get_doc_comments(&unpaid_execution_variant); + let fields = match &unpaid_execution_variant.fields { + Fields::Named(fields) => fields, + _ => + return Err(Error::new_spanned( + &unpaid_execution_variant, + "UnpaidExecution should have named fields", + )), + }; + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + Ok(quote! { + impl XcmBuilder { + #(#docs)* + pub fn #unpaid_execution_method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }) +} + +fn get_doc_comments(variant: &Variant) -> Vec { + variant + .attrs + .iter() + .filter_map(|attr| match &attr.meta { + Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), + .. + }) if attr.path().is_ident("doc") => Some(literal.value()), + _ => None, + }) + .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) + .collect() +} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 2ebccadf50be805c2f66db0a703e0f09bf43861d..7600e817d0e662e42ef560c291de6ac192c7ca53 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -17,7 +17,9 @@ //! Procedural macros used in XCM. use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; +mod builder_pattern; mod v2; mod v3; mod weight_info; @@ -47,3 +49,18 @@ pub fn impl_conversion_functions_for_junctions_v3(input: TokenStream) -> TokenSt .unwrap_or_else(syn::Error::into_compile_error) .into() } + +/// This is called on the `Instruction` enum, not on the `Xcm` struct, +/// and allows for the following syntax for building XCMs: +/// let message = Xcm::builder() +/// .withdraw_asset(assets) +/// .buy_execution(fees, weight_limit) +/// .deposit_asset(assets, beneficiary) +/// .build(); +#[proc_macro_derive(Builder, attributes(builder))] +pub fn derive_builder(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + builder_pattern::derive(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs new file mode 100644 index 0000000000000000000000000000000000000000..eab9d67121f610a22166d9bd0d556f79e8770d1c --- /dev/null +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test the methods generated by the Builder derive macro. +//! Tests directly on the actual Xcm struct and Instruction enum. + +use xcm::latest::prelude::*; + +#[test] +fn builder_pattern_works() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone().into()) + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} + +#[test] +fn default_builder_requires_buy_execution() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + // This is invalid, since it doesn't pay for fees. + // This is enforced by the runtime, because the build() method doesn't exist + // on the resulting type. + // let message: Xcm<()> = Xcm::builder() + // .withdraw_asset(asset.clone().into()) + // .deposit_asset(asset.into(), beneficiary) + // .build(); + + // To be able to do that, we need to use the explicitly unpaid variant + let message: Xcm<()> = Xcm::builder_unpaid() + .unpaid_execution(Unlimited, None) + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); // This works + assert_eq!( + message, + Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); + + // The other option doesn't have any limits whatsoever, so it should + // only be used when you really know what you're doing. + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); +} diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs new file mode 100644 index 0000000000000000000000000000000000000000..a30f4d7dee5dc64fa601e1bf040538607bafc84e --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! UI tests for XCM procedural macros + +#[cfg(not(feature = "disable-ui-tests"))] +#[test] +fn ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return; + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/**/*.rs"); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs new file mode 100644 index 0000000000000000000000000000000000000000..3a103f3ddc459dcb59a07a40931242aa1eab3c1c --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using a badly formatted attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding = 2)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr new file mode 100644 index 0000000000000000000000000000000000000000..978faf2e868d89ea47276bb5a7fed40c529e6336 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/badly_formatted_attribute.rs:25:5 + | +25 | #[builder(funds_holding = 2)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc5c679a96e72b92c0095e246ef487132dad4f69 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution(u128), + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr new file mode 100644 index 0000000000000000000000000000000000000000..dc8246770ba3e10ed0df45a714dd2d9eb337cc5e --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: BuyExecution should have named fields + --> tests/ui/builder_pattern/buy_execution_named_fields.rs:25:5 + | +25 | BuyExecution(u128), + | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs new file mode 100644 index 0000000000000000000000000000000000000000..070f0be6bacc995aa38a341fa9d242d395c4e045 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when an instruction that loads the holding register doesn't take operands. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(loads_holding)] + WithdrawAsset, + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0358a35ad3dd7bb48ddd51b69e4f395642d44edf --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr @@ -0,0 +1,6 @@ +error: Instructions that load the holding register should take operands + --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 + | +25 | / #[builder(loads_holding)] +26 | | WithdrawAsset, + | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ed8dd38cbad5b32bb9ce1a38470fb579b3cb5c2 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `BuyExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d8798c8223f18e74e8ec6f409923da482547c9ec --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr @@ -0,0 +1,6 @@ +error: No BuyExecution instruction + --> tests/ui/builder_pattern/no_buy_execution.rs:25:5 + | +25 | / UnpaidExecution { weight_limit: (u32, u32) }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs new file mode 100644 index 0000000000000000000000000000000000000000..d542102d2d35796736eb682377bc8015096186af --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `UnpaidExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c8c0748da7220dd49fdcc196a4fa3ab353564e8c --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr @@ -0,0 +1,6 @@ +error: No UnpaidExecution instruction + --> tests/ui/builder_pattern/no_unpaid_execution.rs:25:5 + | +25 | / BuyExecution { fees: u128 }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs new file mode 100644 index 0000000000000000000000000000000000000000..5808ec571ce75f3d7e25ae8489137ac90ced5687 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using wrong attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1ff9d18513686293bc56f438c2bd7fa543820c84 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 + | +25 | #[builder(funds_holding)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs new file mode 100644 index 0000000000000000000000000000000000000000..bb98d603fd91567406063b72d6133ba87671b4c9 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + UnpaidExecution(u32, u32), + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0a3c0a40a33b0a5224f6b940c1ed18552a15fdf3 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: UnpaidExecution should have named fields + --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 + | +26 | UnpaidExecution(u32, u32), + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4bcda572ad74ccaafb0c144cb4606690db7639b --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when attaching the derive builder macro to something +//! other than the XCM `Instruction` enum. + +use xcm_procedural::Builder; + +#[derive(Builder)] +struct SomeStruct; + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr new file mode 100644 index 0000000000000000000000000000000000000000..007aa0b5ff3035761779a1220f364d980c30936b --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr @@ -0,0 +1,5 @@ +error: Expected the `Instruction` enum + --> tests/ui/builder_pattern/wrong_target.rs:23:1 + | +23 | struct SomeStruct; + | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/double_encoded.rs b/polkadot/xcm/src/double_encoded.rs index c4c1276fad8dd14167d9fa9c0df27671268d7c51..875b811da3f71c7f74762ec24acc05b8380e2110 100644 --- a/polkadot/xcm/src/double_encoded.rs +++ b/polkadot/xcm/src/double_encoded.rs @@ -24,6 +24,7 @@ use parity_scale_codec::{Decode, DecodeLimit, Encode}; #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(T))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct DoubleEncoded { encoded: Vec, #[codec(skip)] @@ -69,6 +70,11 @@ impl DoubleEncoded { pub fn as_ref(&self) -> Option<&T> { self.decoded.as_ref() } + + /// Access the encoded data. + pub fn into_encoded(self) -> Vec { + self.encoded + } } impl DoubleEncoded { diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index d3e2baf4f8aa570ffb3722870b3328f1f15c1c03..d804e4bf7351b3fffe5df2e4bfcba35d52b7704d 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -89,6 +89,7 @@ macro_rules! versioned_type { )] #[codec(encode_bound())] #[codec(decode_bound())] + #[scale_info(replace_segment("staging_xcm", "xcm"))] $(#[$attr])* pub enum $n { $(#[$index3])* @@ -150,6 +151,7 @@ macro_rules! versioned_type { )] #[codec(encode_bound())] #[codec(decode_bound())] + #[scale_info(replace_segment("staging_xcm", "xcm"))] $(#[$attr])* pub enum $n { $(#[$index2])* @@ -310,6 +312,7 @@ versioned_type! { #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(RuntimeCall))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum VersionedXcm { #[codec(index = 2)] V2(v2::Xcm), diff --git a/polkadot/xcm/src/tests.rs b/polkadot/xcm/src/tests.rs index 10b14ffec992941a912cb944484411f09e21b984..a3a60f6477c798094e2d1f13327ff995fe1bdc6d 100644 --- a/polkadot/xcm/src/tests.rs +++ b/polkadot/xcm/src/tests.rs @@ -181,3 +181,12 @@ fn encode_decode_versioned_xcm_v3() { let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); assert_eq!(xcm, decoded); } + +// With the renaming of the crate to `staging-xcm` the naming in the metadata changed as well and +// this broke downstream users. This test ensures that the name in the metadata isn't changed. +#[test] +fn ensure_type_info_is_correct() { + let type_info = VersionedXcm::<()>::type_info(); + + assert_eq!(type_info.path.segments, vec!["xcm", "VersionedXcm"]); +} diff --git a/polkadot/xcm/src/v2/junction.rs b/polkadot/xcm/src/v2/junction.rs index 73a5029994624b1c12ef8b7d9bd393b98e8660cd..771931f4b566678829ebf07049842bab0232afc2 100644 --- a/polkadot/xcm/src/v2/junction.rs +++ b/polkadot/xcm/src/v2/junction.rs @@ -27,6 +27,7 @@ use scale_info::TypeInfo; /// Each item assumes a pre-existing location as its context and is defined in terms of it. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junction { /// An indexed parachain belonging to and operated by the context. /// diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index a81468cd4812d9a08baefad6bee85130d172788d..7f654ebfd9e9b242c920c7e88d02c2ce69714874 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -81,6 +81,7 @@ pub use traits::{Error, ExecuteXcm, GetWeight, Outcome, Result, SendError, SendR /// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum OriginKind { /// Origin should just be the native dispatch origin representation for the sender in the /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin @@ -105,6 +106,7 @@ pub enum OriginKind { /// A global identifier of an account-bearing consensus system. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum NetworkId { /// Unidentified/any. Any, @@ -141,6 +143,7 @@ impl TryFrom for NetworkId { /// An identifier of a pluralistic body. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyId { /// The only body in its context. Unit, @@ -195,6 +198,7 @@ impl From for BodyId { /// A part of a pluralistic body. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyPart { /// The body's declaration, under whatever means it decides. Voice, @@ -262,6 +266,7 @@ pub type QueryId = u64; #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(RuntimeCall))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct Xcm(pub Vec>); impl Xcm { @@ -357,6 +362,7 @@ pub mod prelude { /// Response data to a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Response { /// No response. Serves as a neutral default. Null, @@ -376,6 +382,7 @@ impl Default for Response { /// An optional weight limit. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WeightLimit { /// No weight limit imposed. Unlimited, @@ -428,6 +435,7 @@ pub type Weight = u64; #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(RuntimeCall))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Instruction { /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding /// Register. diff --git a/polkadot/xcm/src/v2/multiasset.rs b/polkadot/xcm/src/v2/multiasset.rs index fdd7797a123049132452e692f8aa3dfe1829d1d5..5681e9ef8a447cfaf67c6e65c75adc1e1337120f 100644 --- a/polkadot/xcm/src/v2/multiasset.rs +++ b/polkadot/xcm/src/v2/multiasset.rs @@ -41,6 +41,7 @@ use scale_info::TypeInfo; /// A general identifier for an instance of a non-fungible asset class. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, @@ -119,6 +120,7 @@ impl TryFrom for AssetInstance { /// Classification of an asset being concrete or abstract. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetId { Concrete(MultiLocation), Abstract(Vec), @@ -185,6 +187,7 @@ impl AssetId { /// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Fungibility { Fungible(#[codec(compact)] u128), NonFungible(AssetInstance), @@ -224,6 +227,7 @@ impl TryFrom for Fungibility { #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAsset { pub id: AssetId, pub fun: Fungibility, @@ -309,6 +313,7 @@ impl TryFrom for MultiAsset { /// they must be sorted. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAssets(Vec); impl Decode for MultiAssets { @@ -479,6 +484,7 @@ impl MultiAssets { /// Classification of whether an asset is fungible or not. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildFungibility { Fungible, NonFungible, @@ -498,6 +504,7 @@ impl TryFrom for WildFungibility { /// A wildcard representing a set of assets. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildMultiAsset { /// All assets in the holding register, up to `usize` individual assets (different instances of /// non-fungibles could be separate assets). @@ -543,6 +550,7 @@ impl, B: Into> From<(A, B)> for WildMultiAsset /// in this implementation and will result in a decode error. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum MultiAssetFilter { Definite(MultiAssets), Wild(WildMultiAsset), diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs index 7d6bdece335f5659c9c7afa81dae2373da01c8c5..81b67eee9744bb58ff3a8303a5c7757b63642197 100644 --- a/polkadot/xcm/src/v2/multilocation.rs +++ b/polkadot/xcm/src/v2/multilocation.rs @@ -50,6 +50,7 @@ use scale_info::TypeInfo; /// The `MultiLocation` value of `Null` simply refers to the interpreting consensus system. #[derive(Clone, Decode, Encode, Eq, PartialEq, Ord, PartialOrd, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiLocation { /// The number of parent junctions at the beginning of this `MultiLocation`. pub parents: u8, @@ -465,6 +466,7 @@ const MAX_JUNCTIONS: usize = 8; /// instructions on constructing parent junctions. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junctions { /// The interpreting consensus system. Here, diff --git a/polkadot/xcm/src/v2/traits.rs b/polkadot/xcm/src/v2/traits.rs index 8173beb19d87c8150f5118082b9cf193482b5e4f..6453f91a1f1e2d43b5366fc60fd990f1869e2c85 100644 --- a/polkadot/xcm/src/v2/traits.rs +++ b/polkadot/xcm/src/v2/traits.rs @@ -29,6 +29,7 @@ pub trait GetWeight { } #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Error { // Errors that happen due to instructions being executed. These alone are defined in the // XCM specification. @@ -165,6 +166,7 @@ pub type Result = result::Result<(), Error>; /// Outcome of an XCM execution. #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), @@ -246,6 +248,7 @@ impl ExecuteXcm for () { /// Error result value when attempting to send an XCM message. #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, scale_info::TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum SendError { /// The message and destination combination was not recognized as being reachable. /// diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index b5dd5bc7c88fe5a24792e3bc2660809435ebae08..47429a8c36e9a76c29b495feb0e2b765e8c7a21a 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -49,6 +49,7 @@ use serde::{Deserialize, Serialize}; Serialize, Deserialize, )] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum NetworkId { /// Network specified by the first 32 bytes of its genesis block. ByGenesis([u8; 32]), @@ -116,6 +117,7 @@ impl TryFrom for NetworkId { Serialize, Deserialize, )] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyId { /// The only body in its context. Unit, @@ -186,6 +188,7 @@ impl TryFrom for BodyId { Serialize, Deserialize, )] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyPart { /// The body's declaration, under whatever means it decides. Voice, @@ -261,6 +264,7 @@ impl TryFrom for BodyPart { Serialize, Deserialize, )] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junction { /// An indexed parachain belonging to and operated by the context. /// diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index f7624c91c861259fa6f747e0d8e811a68ac56310..d1cbc2dbed42cddd4fae2eae398a120e2272679b 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -44,6 +44,7 @@ pub(crate) const MAX_JUNCTIONS: usize = 8; serde::Serialize, serde::Deserialize, )] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junctions { /// The interpreting consensus system. Here, diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index a428f1277a5fe479422b3dd7bf80d741b267dd20..bbdd504ceb0ff0415d73ed7891958a4cad7b3b67 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -45,7 +45,7 @@ pub use junction::{BodyId, BodyPart, Junction, NetworkId}; pub use junctions::Junctions; pub use multiasset::{ AssetId, AssetInstance, Fungibility, MultiAsset, MultiAssetFilter, MultiAssets, - WildFungibility, WildMultiAsset, + WildFungibility, WildMultiAsset, MAX_ITEMS_IN_MULTIASSETS, }; pub use multilocation::{ Ancestor, AncestorThen, InteriorMultiLocation, MultiLocation, Parent, ParentThen, @@ -68,9 +68,13 @@ pub type QueryId = u64; #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] #[scale_info(bounds(), skip_type_params(Call))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct Xcm(pub Vec>); -const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; +/// The maximal number of instructions in an XCM before decoding fails. +/// +/// This is a deliberate limit - not a technical one. +pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; environmental::environmental!(instructions_count: u8); @@ -236,17 +240,18 @@ parameter_types! { } #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct PalletInfo { #[codec(compact)] - index: u32, - name: BoundedVec, - module_name: BoundedVec, + pub index: u32, + pub name: BoundedVec, + pub module_name: BoundedVec, #[codec(compact)] - major: u32, + pub major: u32, #[codec(compact)] - minor: u32, + pub minor: u32, #[codec(compact)] - patch: u32, + pub patch: u32, } impl PalletInfo { @@ -266,6 +271,7 @@ impl PalletInfo { } #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum MaybeErrorCode { Success, Error(BoundedVec), @@ -289,6 +295,7 @@ impl Default for MaybeErrorCode { /// Response data to a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Response { /// No response. Serves as a neutral default. Null, @@ -312,6 +319,7 @@ impl Default for Response { /// Information regarding the composition of a query response. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct QueryResponseInfo { /// The destination to which the query response message should be send. pub destination: MultiLocation, @@ -324,6 +332,7 @@ pub struct QueryResponseInfo { /// An optional weight limit. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WeightLimit { /// No weight limit imposed. Unlimited, @@ -395,11 +404,19 @@ impl XcmContext { /// /// This is the inner XCM format and is version-sensitive. Messages are typically passed using the /// outer XCM format, known as `VersionedXcm`. -#[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] +#[derive( + Derivative, + Encode, + Decode, + TypeInfo, + xcm_procedural::XcmWeightInfoTrait, + xcm_procedural::Builder, +)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Instruction { /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding /// Register. @@ -409,6 +426,7 @@ pub enum Instruction { /// Kind: *Command*. /// /// Errors: + #[builder(loads_holding)] WithdrawAsset(MultiAssets), /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` @@ -422,6 +440,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReserveAssetDeposited(MultiAssets), /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should @@ -435,6 +454,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReceiveTeleportedAsset(MultiAssets), /// Respond with information that the local system is expecting. @@ -759,6 +779,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(loads_holding)] ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. @@ -932,7 +953,7 @@ pub enum Instruction { /// should be sent on arrival. /// - `xcm`: The message to be exported. /// - /// As an example, to export a message for execution on Statemine (parachain #1000 in the + /// As an example, to export a message for execution on Asset Hub (parachain #1000 in the /// Kusama network), you would call with `network: NetworkId::Kusama` and /// `destination: X1(Parachain(1000))`. Alternatively, to export a message for execution on /// Polkadot, you would call with `network: NetworkId:: Polkadot` and `destination: Here`. diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 188555318c8c0ba16e129407845ffdb3b056e81d..454120a1a7b9c701423236004c36aea55a6425a7 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -47,6 +47,7 @@ use scale_info::TypeInfo; Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, @@ -242,6 +243,7 @@ impl TryFrom for u128 { /// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Fungibility { /// A fungible asset; we record a number of units, as a `u128` in the inner item. Fungible(#[codec(compact)] u128), @@ -311,6 +313,7 @@ impl TryFrom for Fungibility { Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildFungibility { /// The asset is fungible. Fungible, @@ -334,6 +337,7 @@ impl TryFrom for WildFungibility { Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetId { /// A specific location identifying an asset. Concrete(MultiLocation), @@ -408,6 +412,7 @@ impl AssetId { /// Either an amount of a single fungible asset, or a single well-identified non-fungible asset. #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAsset { /// The overall asset identity (aka *class*, in the case of a non-fungible). pub id: AssetId, @@ -505,6 +510,7 @@ impl TryFrom for MultiAsset { /// - The number of items should grow no larger than `MAX_ITEMS_IN_MULTIASSETS`. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAssets(Vec); /// Maximum number of items in a single `MultiAssets` value that can be decoded. @@ -683,12 +689,16 @@ impl MultiAssets { /// Mutate the location of the asset identifier if concrete, giving it the same location /// relative to a `target` context. The local context is provided as `context`. + /// + /// This will also re-sort the inner assets to preserve ordering guarantees. pub fn reanchor( &mut self, target: &MultiLocation, context: InteriorMultiLocation, ) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.reanchor(target, context)) + self.0.iter_mut().try_for_each(|i| i.reanchor(target, context))?; + self.0.sort(); + Ok(()) } /// Return a reference to an item at a specific index or `None` if it doesn't exist. @@ -700,6 +710,7 @@ impl MultiAssets { /// A wildcard representing a set of assets. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildMultiAsset { /// All assets in Holding. All, @@ -812,6 +823,7 @@ impl, B: Into> From<(A, B)> for WildMultiAsset /// `MultiAsset` collection, defined either by a number of `MultiAssets` or a single wildcard. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum MultiAssetFilter { /// Specify the filter as being everything contained by the given `MultiAssets` inner. Definite(MultiAssets), @@ -977,6 +989,42 @@ mod tests { assert!(r.is_err()); } + #[test] + fn reanchor_preserves_sorting() { + use super::*; + use alloc::vec; + + let reanchor_context = X1(Parachain(2000)); + let dest = MultiLocation::new(1, Here); + + let asset_1: MultiAsset = + (MultiLocation::new(0, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_1_reanchored = asset_1.clone(); + assert!(asset_1_reanchored.reanchor(&dest, reanchor_context).is_ok()); + assert_eq!( + asset_1_reanchored, + (MultiLocation::new(0, X3(Parachain(2000), PalletInstance(50), GeneralIndex(1))), 10) + .into() + ); + + let asset_2: MultiAsset = (MultiLocation::new(1, Here), 10).into(); + let mut asset_2_reanchored = asset_2.clone(); + assert!(asset_2_reanchored.reanchor(&dest, reanchor_context).is_ok()); + assert_eq!(asset_2_reanchored, (MultiLocation::new(0, Here), 10).into()); + + let asset_3: MultiAsset = (MultiLocation::new(1, X1(Parachain(1000))), 10).into(); + let mut asset_3_reanchored = asset_3.clone(); + assert!(asset_3_reanchored.reanchor(&dest, reanchor_context).is_ok()); + assert_eq!(asset_3_reanchored, (MultiLocation::new(0, X1(Parachain(1000))), 10).into()); + + let mut assets: MultiAssets = + vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); + assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + + assert!(assets.reanchor(&dest, reanchor_context).is_ok()); + assert_eq!(assets, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored].into()); + } + #[test] fn decoding_respects_limit() { use super::*; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 8a1575d9bc950620e3f411349cdca6ef9db5deef..89e2598444382c8bb4ec9a2ab62e19638e1441f7 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -444,6 +444,21 @@ impl MultiLocation { } } } + + /// Return the MultiLocation subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> MultiLocation { + let mut clone = *self; + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + MultiLocation::new(clone.parents, Junctions::Here) + } } impl TryFrom for MultiLocation { @@ -674,6 +689,57 @@ mod tests { assert_eq!(iter.next_back(), None); } + #[test] + fn chain_location_works() { + // Relay-chain or parachain context pointing to local resource, + let relay_to_local = MultiLocation::new(0, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(relay_to_local.chain_location(), MultiLocation::here()); + + // Relay-chain context pointing to child parachain, + let relay_to_child = + MultiLocation::new(0, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(0, Parachain(42)); + assert_eq!(relay_to_child.chain_location(), expected); + + // Relay-chain context pointing to different consensus relay, + let relay_to_remote_relay = + MultiLocation::new(1, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, GlobalConsensus(Kusama)); + assert_eq!(relay_to_remote_relay.chain_location(), expected); + + // Relay-chain context pointing to different consensus parachain, + let relay_to_remote_para = MultiLocation::new( + 1, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(1, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(relay_to_remote_para.chain_location(), expected); + + // Parachain context pointing to relay chain, + let para_to_relay = MultiLocation::new(1, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(para_to_relay.chain_location(), MultiLocation::parent()); + + // Parachain context pointing to sibling parachain, + let para_to_sibling = + MultiLocation::new(1, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, Parachain(42)); + assert_eq!(para_to_sibling.chain_location(), expected); + + // Parachain context pointing to different consensus relay, + let para_to_remote_relay = + MultiLocation::new(2, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(2, GlobalConsensus(Kusama)); + assert_eq!(para_to_remote_relay.chain_location(), expected); + + // Parachain context pointing to different consensus parachain, + let para_to_remote_para = MultiLocation::new( + 2, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(para_to_remote_para.chain_location(), expected); + } + #[test] fn conversion_from_other_types_works() { use crate::v2; diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index a3cbeada91bd258545664b72b79d88ff7ca09f4c..1043d17b7106647e86f9290ff008e00f2ead4555 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -29,6 +29,7 @@ use super::*; /// format. Those trailing are merely part of the XCM implementation; there is no expectation that /// they will retain the same index over time. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Error { // Errors that happen due to instructions being executed. These alone are defined in the // XCM specification. @@ -262,6 +263,7 @@ impl From for Outcome { /// Outcome of an XCM execution. #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), @@ -410,6 +412,7 @@ impl ExecuteXcm for () { /// Error result value when attempting to send an XCM message. #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, scale_info::TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum SendError { /// The message and destination combination was not recognized as being reachable. /// diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 1f0e1cf347758b01f7a06c65f75d6caf7944aa51..7d6c40eb8417e55c3bf04b8cb8c85520312386ee 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -9,7 +9,7 @@ version = "1.0.0" [dependencies] impl-trait-for-tuples = "0.2.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/polkadot/xcm/xcm-builder/src/asset_conversion.rs b/polkadot/xcm/xcm-builder/src/asset_conversion.rs index a246436a9d6d3ece32175f80b4c97ccc37c65530..5b76ed764a8128c80afcf7369e96628553a2b570 100644 --- a/polkadot/xcm/xcm-builder/src/asset_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/asset_conversion.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Adapters to work with `frame_support::traits::tokens::fungibles` through XCM. +//! Adapters to work with [`frame_support::traits::fungibles`] through XCM. use frame_support::traits::{Contains, Get}; use sp_runtime::traits::MaybeEquivalence; diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 0d04e0971bf7bbd4be0f32b088d5a518f4d58030..c2b62751c688f73a217b497c2f585c392ca72166 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -52,7 +52,7 @@ impl ShouldExecute for TakeWeightCredit { } } -const MAX_ASSETS_FOR_BUY_EXECUTION: usize = 1; +const MAX_ASSETS_FOR_BUY_EXECUTION: usize = 2; /// Allows execution from `origin` if it is contained in `T` (i.e. `T::Contains(origin)`) taking /// payments into account. @@ -81,10 +81,15 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFro instructions[..end] .matcher() .match_next_inst(|inst| match inst { - ReceiveTeleportedAsset(..) | ReserveAssetDeposited(..) => Ok(()), - WithdrawAsset(ref assets) if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => Ok(()), - ClaimAsset { ref assets, .. } if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => - Ok(()), + ReceiveTeleportedAsset(ref assets) | + ReserveAssetDeposited(ref assets) | + WithdrawAsset(ref assets) | + ClaimAsset { ref assets, .. } => + if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION { + Ok(()) + } else { + Err(ProcessMessageError::BadFormat) + }, _ => Err(ProcessMessageError::BadFormat), })? .skip_inst_while(|inst| matches!(inst, ClearOrigin))? diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs new file mode 100644 index 0000000000000000000000000000000000000000..931d812eaaf192c49ed2bfed3fbd123fdd89e691 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -0,0 +1,187 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A set of traits that define how a pallet interface with XCM. +//! Controller traits defined in this module are high-level traits that will rely on other traits +//! from `xcm-executor` to perform their tasks. + +use frame_support::pallet_prelude::DispatchError; +use sp_std::boxed::Box; +use xcm::prelude::*; +pub use xcm_executor::traits::QueryHandler; + +/// Umbrella trait for all Controller traits. +pub trait Controller: + ExecuteController + SendController + QueryController +{ +} + +impl Controller for T where + T: ExecuteController + + SendController + + QueryController +{ +} + +/// Weight functions needed for [`ExecuteController`]. +pub trait ExecuteControllerWeightInfo { + /// Weight for [`ExecuteController::execute`] + fn execute() -> Weight; +} + +/// Execute an XCM locally, for a given origin. +/// +/// An implementation of that trait will handle the low-level details of the execution, such as: +/// - Validating and Converting the origin to a MultiLocation. +/// - Handling versioning. +/// - Calling the internal executor, which implements [`ExecuteXcm`]. +pub trait ExecuteController { + /// Weight information for ExecuteController functions. + type WeightInfo: ExecuteControllerWeightInfo; + + /// Attempt to execute an XCM locally, and return the outcome. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `message`: the XCM program to be executed. + /// - `max_weight`: the maximum weight that can be consumed by the execution. + fn execute( + origin: Origin, + message: Box>, + max_weight: Weight, + ) -> Result; +} + +/// Weight functions needed for [`SendController`]. +pub trait SendControllerWeightInfo { + /// Weight for [`SendController::send`] + fn send() -> Weight; +} + +/// Send an XCM from a given origin. +/// +/// An implementation of that trait will handle the low-level details of dispatching an XCM, such +/// as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the internal router, which implements [`SendXcm`]. +pub trait SendController { + /// Weight information for SendController functions. + type WeightInfo: SendControllerWeightInfo; + + /// Send an XCM to be executed by a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `dest`: the destination of the message. + /// - `msg`: the XCM to be sent. + fn send( + origin: Origin, + dest: Box, + message: Box>, + ) -> Result; +} + +/// Weight functions needed for [`QueryController`]. +pub trait QueryControllerWeightInfo { + /// Weight for [`QueryController::query`] + fn query() -> Weight; + + /// Weight for [`QueryHandler::take_response`] + fn take_response() -> Weight; +} + +/// Query a remote location, from a given origin. +/// +/// An implementation of that trait will handle the low-level details of querying a remote location, +/// such as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the [`QueryHandler`] to register the query. +pub trait QueryController: QueryHandler { + /// Weight information for QueryController functions. + type WeightInfo: QueryControllerWeightInfo; + + /// Query a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call, used to determine the responder. + /// - `timeout`: the maximum block number that the query should be responded to. + /// - `match_querier`: the querier that the query should be responded to. + fn query( + origin: Origin, + timeout: Timeout, + match_querier: VersionedMultiLocation, + ) -> Result; +} + +impl ExecuteController for () { + type WeightInfo = (); + fn execute( + _origin: Origin, + _message: Box>, + _max_weight: Weight, + ) -> Result { + Ok(Outcome::Error(XcmError::Unimplemented)) + } +} + +impl ExecuteControllerWeightInfo for () { + fn execute() -> Weight { + Weight::zero() + } +} + +impl SendController for () { + type WeightInfo = (); + fn send( + _origin: Origin, + _dest: Box, + _message: Box>, + ) -> Result { + Ok(Default::default()) + } +} + +impl SendControllerWeightInfo for () { + fn send() -> Weight { + Weight::zero() + } +} + +impl QueryControllerWeightInfo for () { + fn query() -> Weight { + Weight::zero() + } + fn take_response() -> Weight { + Weight::zero() + } +} + +impl QueryController for () { + type WeightInfo = (); + + fn query( + _origin: Origin, + _timeout: Timeout, + _match_querier: VersionedMultiLocation, + ) -> Result { + Ok(Default::default()) + } +} diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index 23dc7958fe15426dbec058cdcd01dc1f9b34a126..8ecf1dee72db00695a0a48ca7b2f489ec6f8bf4c 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -191,7 +191,11 @@ impl< } } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, _context: &XcmContext) -> Result { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> Result { log::trace!(target: "xcm::currency_adapter", "deposit_asset what: {:?}, who: {:?}", what, who); // Check we handle this asset. let amount = Matcher::matches_fungible(&what).ok_or(Error::AssetNotHandled)?; diff --git a/polkadot/xcm/xcm-builder/src/fee_handling.rs b/polkadot/xcm/xcm-builder/src/fee_handling.rs new file mode 100644 index 0000000000000000000000000000000000000000..c158d5d862d7515aaf0358e717717f86d32a2a28 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/fee_handling.rs @@ -0,0 +1,121 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use core::marker::PhantomData; +use frame_support::traits::{Contains, Get}; +use xcm::prelude::*; +use xcm_executor::traits::{FeeManager, FeeReason, TransactAsset}; + +/// Handles the fees that are taken by certain XCM instructions. +pub trait HandleFee { + /// Do something with the fee which has been paid. Doing nothing here silently burns the + /// fees. + /// + /// Returns any part of the fee that wasn't consumed. + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) + -> MultiAssets; +} + +// Default `HandleFee` implementation that just burns the fee. +impl HandleFee for () { + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) -> MultiAssets { + MultiAssets::new() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl HandleFee for Tuple { + fn handle_fee( + fee: MultiAssets, + context: Option<&XcmContext>, + reason: FeeReason, + ) -> MultiAssets { + let mut unconsumed_fee = fee; + for_tuples!( #( + unconsumed_fee = Tuple::handle_fee(unconsumed_fee, context, reason); + if unconsumed_fee.is_none() { + return unconsumed_fee; + } + )* ); + + unconsumed_fee + } +} + +/// A `FeeManager` implementation that permits the specified `WaivedLocations` to not pay for fees +/// and that uses the provided `HandleFee` implementation otherwise. +pub struct XcmFeeManagerFromComponents( + PhantomData<(WaivedLocations, HandleFee)>, +); +impl, FeeHandler: HandleFee> FeeManager + for XcmFeeManagerFromComponents +{ + fn is_waived(origin: Option<&MultiLocation>, _: FeeReason) -> bool { + let Some(loc) = origin else { return false }; + WaivedLocations::contains(loc) + } + + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) { + FeeHandler::handle_fee(fee, context, reason); + } +} + +/// Try to deposit the given fee in the specified account. +/// Burns the fee in case of a failure. +pub fn deposit_or_burn_fee>( + fee: MultiAssets, + context: Option<&XcmContext>, + receiver: AccountId, +) { + let dest = AccountId32 { network: None, id: receiver.into() }.into(); + for asset in fee.into_inner() { + if let Err(e) = AssetTransactor::deposit_asset(&asset, &dest, context) { + log::trace!( + target: "xcm::fees", + "`AssetTransactor::deposit_asset` returned error: {:?}. Burning fee: {:?}. \ + They might be burned.", + e, asset, + ); + } + } +} + +/// A `HandleFee` implementation that simply deposits the fees into a specific on-chain +/// `ReceiverAccount`. +/// +/// It reuses the `AssetTransactor` configured on the XCM executor to deposit fee assets. If +/// the `AssetTransactor` returns an error while calling `deposit_asset`, then a warning will be +/// logged and the fee burned. +pub struct XcmFeeToAccount( + PhantomData<(AssetTransactor, AccountId, ReceiverAccount)>, +); + +impl< + AssetTransactor: TransactAsset, + AccountId: Clone + Into<[u8; 32]>, + ReceiverAccount: Get, + > HandleFee for XcmFeeToAccount +{ + fn handle_fee( + fee: MultiAssets, + context: Option<&XcmContext>, + _reason: FeeReason, + ) -> MultiAssets { + deposit_or_burn_fee::(fee, context, ReceiverAccount::get()); + + MultiAssets::new() + } +} diff --git a/polkadot/xcm/xcm-builder/src/filter_asset_location.rs b/polkadot/xcm/xcm-builder/src/filter_asset_location.rs index 68dd86d9bfeb371e4a4baca92a0e20b31d32f3fc..df81f536f7b7133fb6dcbaa311e40e29860deb65 100644 --- a/polkadot/xcm/xcm-builder/src/filter_asset_location.rs +++ b/polkadot/xcm/xcm-builder/src/filter_asset_location.rs @@ -14,11 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Various implementations of `ContainsPair`. +//! Various implementations of `ContainsPair` or +//! `Contains<(MultiLocation, Vec)>`. -use frame_support::traits::{ContainsPair, Get}; -use sp_std::marker::PhantomData; -use xcm::latest::{AssetId::Concrete, MultiAsset, MultiAssetFilter, MultiLocation}; +use frame_support::traits::{Contains, ContainsPair, Get}; +use sp_std::{marker::PhantomData, vec::Vec}; +use xcm::latest::{AssetId::Concrete, MultiAsset, MultiAssetFilter, MultiLocation, WildMultiAsset}; /// Accepts an asset iff it is a native asset. pub struct NativeAsset; @@ -40,3 +41,171 @@ impl> ContainsPair( + sp_std::marker::PhantomData<(Location, AssetFilters)>, +); +impl, AssetFilters: Get>> + Contains<(MultiLocation, Vec)> for LocationWithAssetFilters +{ + fn contains((location, assets): &(MultiLocation, Vec)) -> bool { + log::trace!(target: "xcm::contains", "LocationWithAssetFilters location: {:?}, assets: {:?}", location, assets); + + // `location` must match the `Location` filter. + if !Location::contains(location) { + return false + } + + // All `assets` must match at least one of the `AssetFilters`. + let filters = AssetFilters::get(); + assets.iter().all(|asset| { + for filter in &filters { + if filter.matches(asset) { + return true + } + } + false + }) + } +} + +/// Implementation of `Get>` which accepts every asset. +/// (For example, it can be used with `LocationWithAssetFilters`). +pub struct AllAssets; +impl Get> for AllAssets { + fn get() -> Vec { + sp_std::vec![MultiAssetFilter::Wild(WildMultiAsset::All)] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::traits::Equals; + use xcm::latest::prelude::*; + + #[test] + fn location_with_asset_filters_works() { + frame_support::parameter_types! { + pub ParaA: MultiLocation = MultiLocation::new(1, X1(Parachain(1001))); + pub ParaB: MultiLocation = MultiLocation::new(1, X1(Parachain(1002))); + pub ParaC: MultiLocation = MultiLocation::new(1, X1(Parachain(1003))); + + pub AssetXLocation: MultiLocation = MultiLocation::new(1, X1(GeneralIndex(1111))); + pub AssetYLocation: MultiLocation = MultiLocation::new(1, X1(GeneralIndex(2222))); + pub AssetZLocation: MultiLocation = MultiLocation::new(1, X1(GeneralIndex(3333))); + + pub OnlyAssetXOrAssetY: sp_std::vec::Vec = sp_std::vec![ + Wild(AllOf { fun: WildFungible, id: Concrete(AssetXLocation::get()) }), + Wild(AllOf { fun: WildFungible, id: Concrete(AssetYLocation::get()) }), + ]; + pub OnlyAssetZ: sp_std::vec::Vec = sp_std::vec![ + Wild(AllOf { fun: WildFungible, id: Concrete(AssetZLocation::get()) }) + ]; + } + + let test_data: Vec<(MultiLocation, Vec, bool)> = vec![ + (ParaA::get(), vec![(AssetXLocation::get(), 1).into()], true), + (ParaA::get(), vec![(AssetYLocation::get(), 1).into()], true), + (ParaA::get(), vec![(AssetZLocation::get(), 1).into()], false), + ( + ParaA::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetYLocation::get(), 1).into()], + true, + ), + ( + ParaA::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + false, + ), + ( + ParaA::get(), + vec![(AssetYLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + false, + ), + ( + ParaA::get(), + vec![ + (AssetXLocation::get(), 1).into(), + (AssetYLocation::get(), 1).into(), + (AssetZLocation::get(), 1).into(), + ], + false, + ), + (ParaB::get(), vec![(AssetXLocation::get(), 1).into()], false), + (ParaB::get(), vec![(AssetYLocation::get(), 1).into()], false), + (ParaB::get(), vec![(AssetZLocation::get(), 1).into()], true), + ( + ParaB::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetYLocation::get(), 1).into()], + false, + ), + ( + ParaB::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + false, + ), + ( + ParaB::get(), + vec![(AssetYLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + false, + ), + ( + ParaB::get(), + vec![ + (AssetXLocation::get(), 1).into(), + (AssetYLocation::get(), 1).into(), + (AssetZLocation::get(), 1).into(), + ], + false, + ), + (ParaC::get(), vec![(AssetXLocation::get(), 1).into()], true), + (ParaC::get(), vec![(AssetYLocation::get(), 1).into()], true), + (ParaC::get(), vec![(AssetZLocation::get(), 1).into()], true), + ( + ParaC::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetYLocation::get(), 1).into()], + true, + ), + ( + ParaC::get(), + vec![(AssetXLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + true, + ), + ( + ParaC::get(), + vec![(AssetYLocation::get(), 1).into(), (AssetZLocation::get(), 1).into()], + true, + ), + ( + ParaC::get(), + vec![ + (AssetXLocation::get(), 1).into(), + (AssetYLocation::get(), 1).into(), + (AssetZLocation::get(), 1).into(), + ], + true, + ), + ]; + + type Filter = ( + // For ParaA accept only asset X and Y. + LocationWithAssetFilters, OnlyAssetXOrAssetY>, + // For ParaB accept only asset Z. + LocationWithAssetFilters, OnlyAssetZ>, + // For ParaC accept all assets. + LocationWithAssetFilters, AllAssets>, + ); + + for (location, assets, expected_result) in test_data { + assert_eq!( + Filter::contains(&(location, assets.clone())), + expected_result, + "expected_result: {expected_result} not matched for (location, assets): ({:?}, {:?})!", location, assets, + ) + } + } +} diff --git a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs index d7fded01e2db7a0114bada2bdce5a06249907efc..63ce608824eb46eb7f91ca5510cfe5af1594803d 100644 --- a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Adapters to work with `frame_support::traits::tokens::fungibles` through XCM. +//! Adapters to work with [`frame_support::traits::fungibles`] through XCM. use frame_support::traits::{ tokens::{ @@ -34,7 +34,7 @@ impl< Assets: fungibles::Mutate, Matcher: MatchesFungibles, AccountIdConverter: ConvertLocation, - AccountId: Clone, // can't get away without it since Currency is generic over it. + AccountId: Eq + Clone, /* can't get away without it since Currency is generic over it. */ > TransactAsset for FungiblesTransferAdapter { fn internal_transfer_asset( @@ -150,7 +150,7 @@ impl< Assets: fungibles::Mutate, Matcher: MatchesFungibles, AccountIdConverter: ConvertLocation, - AccountId: Clone, // can't get away without it since Currency is generic over it. + AccountId: Eq + Clone, /* can't get away without it since Currency is generic over it. */ CheckAsset: AssetChecking, CheckingAccount: Get, > @@ -185,7 +185,7 @@ impl< Assets: fungibles::Mutate, Matcher: MatchesFungibles, AccountIdConverter: ConvertLocation, - AccountId: Clone, // can't get away without it since Currency is generic over it. + AccountId: Eq + Clone, /* can't get away without it since Currency is generic over it. */ CheckAsset: AssetChecking, CheckingAccount: Get, > TransactAsset @@ -274,7 +274,11 @@ impl< } } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, _context: &XcmContext) -> XcmResult { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> XcmResult { log::trace!( target: "xcm::fungibles_adapter", "deposit_asset what: {:?}, who: {:?}", @@ -321,7 +325,7 @@ impl< Assets: fungibles::Mutate, Matcher: MatchesFungibles, AccountIdConverter: ConvertLocation, - AccountId: Clone, // can't get away without it since Currency is generic over it. + AccountId: Eq + Clone, /* can't get away without it since Currency is generic over it. */ CheckAsset: AssetChecking, CheckingAccount: Get, > TransactAsset @@ -371,7 +375,11 @@ impl< >::check_out(dest, what, context) } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, context: &XcmContext) -> XcmResult { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + context: Option<&XcmContext>, + ) -> XcmResult { FungiblesMutateAdapter::< Assets, Matcher, diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 26f2aadadf3de61d74eb6258d2a697477733dc52..455f17a5348cd9b84acb7cd6fea9581babd903b2 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -33,9 +33,9 @@ pub use location_conversion::{ Account32Hash, AccountId32Aliases, AccountKey20Aliases, AliasesIntoAccountId32, ChildParachainConvertsVia, DescribeAccountId32Terminal, DescribeAccountIdTerminal, DescribeAccountKey20Terminal, DescribeAllTerminal, DescribeBodyTerminal, DescribeFamily, - DescribeLocation, DescribePalletTerminal, DescribeTerminus, GlobalConsensusConvertsFor, - GlobalConsensusParachainConvertsFor, HashedDescription, ParentIsPreset, - SiblingParachainConvertsVia, + DescribeLocation, DescribePalletTerminal, DescribeTerminus, DescribeTreasuryVoiceTerminal, + GlobalConsensusConvertsFor, GlobalConsensusParachainConvertsFor, HashedDescription, + LocalTreasuryVoiceConvertsVia, ParentIsPreset, SiblingParachainConvertsVia, }; mod origin_conversion; @@ -67,6 +67,11 @@ pub use process_xcm_message::ProcessXcmMessage; mod currency_adapter; pub use currency_adapter::CurrencyAdapter; +mod fee_handling; +pub use fee_handling::{ + deposit_or_burn_fee, HandleFee, XcmFeeManagerFromComponents, XcmFeeToAccount, +}; + mod fungibles_adapter; pub use fungibles_adapter::{ AssetChecking, DualMint, FungiblesAdapter, FungiblesMutateAdapter, FungiblesTransferAdapter, @@ -83,6 +88,9 @@ pub use weight::{ FixedRateOfFungible, FixedWeightBounds, TakeRevenue, UsingComponents, WeightInfoBounds, }; +mod matches_location; +pub use matches_location::{StartsWith, StartsWithExplicitGlobalConsensus}; + mod matches_token; pub use matches_token::{IsAbstract, IsConcrete}; @@ -90,7 +98,7 @@ mod matcher; pub use matcher::{CreateMatcher, MatchXcm, Matcher}; mod filter_asset_location; -pub use filter_asset_location::{Case, NativeAsset}; +pub use filter_asset_location::{AllAssets, Case, LocationWithAssetFilters, NativeAsset}; mod routing; pub use routing::{WithTopicSource, WithUniqueTopic}; @@ -99,7 +107,7 @@ mod universal_exports; pub use universal_exports::{ ensure_is_remote, BridgeBlobDispatcher, BridgeMessage, DispatchBlob, DispatchBlobError, ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, NetworkExportTable, - SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter, + NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter, }; mod origin_aliases; @@ -107,3 +115,9 @@ pub use origin_aliases::AliasForeignAccountId32; mod pay; pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, PayOverXcm}; + +mod controller; +pub use controller::{ + Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, +}; diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index 4a5fbbb123be167a60997e9dac19a33bd191b954..25d16f7eb8ccc7c6bc40e4454e77be012de172b4 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -84,6 +84,20 @@ impl DescribeLocation for DescribeAccountKey20Terminal { } } +/// Create a description of the remote treasury `location` if possible. No two locations should have +/// the same descriptor. +pub struct DescribeTreasuryVoiceTerminal; + +impl DescribeLocation for DescribeTreasuryVoiceTerminal { + fn describe_location(l: &MultiLocation) -> Option> { + match (l.parents, &l.interior) { + (0, X1(Plurality { id: BodyId::Treasury, part: BodyPart::Voice })) => + Some((b"Treasury", b"Voice").encode()), + _ => None, + } + } +} + pub type DescribeAccountIdTerminal = (DescribeAccountId32Terminal, DescribeAccountKey20Terminal); pub struct DescribeBodyTerminal; @@ -101,6 +115,7 @@ pub type DescribeAllTerminal = ( DescribePalletTerminal, DescribeAccountId32Terminal, DescribeAccountKey20Terminal, + DescribeTreasuryVoiceTerminal, DescribeBodyTerminal, ); @@ -328,6 +343,25 @@ impl>, AccountId: From<[u8; 32]> + Into<[u8; 32]> } } +/// Returns specified `TreasuryAccount` as `AccountId32` if passed `location` matches Treasury +/// plurality. +pub struct LocalTreasuryVoiceConvertsVia( + PhantomData<(TreasuryAccount, AccountId)>, +); +impl, AccountId: From<[u8; 32]> + Into<[u8; 32]> + Clone> + ConvertLocation for LocalTreasuryVoiceConvertsVia +{ + fn convert_location(location: &MultiLocation) -> Option { + match *location { + MultiLocation { + parents: 0, + interior: X1(Plurality { id: BodyId::Treasury, part: BodyPart::Voice }), + } => Some((TreasuryAccount::get().into() as [u8; 32]).into()), + _ => None, + } + } +} + /// Conversion implementation which converts from a `[u8; 32]`-based `AccountId` into a /// `MultiLocation` consisting solely of a `AccountId32` junction with a fixed value for its /// network (provided by `Network`) and the `AccountId`'s `[u8; 32]` datum for the `id`. @@ -442,10 +476,13 @@ impl #[cfg(test)] mod tests { use super::*; - + use primitives::AccountId; pub type ForeignChainAliasAccount = HashedDescription; + pub type ForeignChainAliasTreasuryAccount = + HashedDescription>; + use frame_support::parameter_types; use xcm::latest::Junction; @@ -936,4 +973,70 @@ mod tests { }; assert!(ForeignChainAliasAccount::<[u8; 32]>::convert_location(&mul).is_none()); } + + #[test] + fn remote_account_convert_on_para_sending_from_remote_para_treasury() { + let relay_treasury_to_para_location = MultiLocation { + parents: 1, + interior: X1(Plurality { id: BodyId::Treasury, part: BodyPart::Voice }), + }; + let actual_description = ForeignChainAliasTreasuryAccount::<[u8; 32]>::convert_location( + &relay_treasury_to_para_location, + ) + .unwrap(); + + assert_eq!( + [ + 18, 84, 93, 74, 187, 212, 254, 71, 192, 127, 112, 51, 3, 42, 54, 24, 220, 185, 161, + 67, 205, 154, 108, 116, 108, 166, 226, 211, 29, 11, 244, 115 + ], + actual_description + ); + + let para_to_para_treasury_location = MultiLocation { + parents: 1, + interior: X2( + Parachain(1001), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ), + }; + let actual_description = ForeignChainAliasTreasuryAccount::<[u8; 32]>::convert_location( + ¶_to_para_treasury_location, + ) + .unwrap(); + + assert_eq!( + [ + 202, 52, 249, 30, 7, 99, 135, 128, 153, 139, 176, 141, 138, 234, 163, 150, 7, 36, + 204, 92, 220, 137, 87, 57, 73, 91, 243, 189, 245, 200, 217, 204 + ], + actual_description + ); + } + + #[test] + fn local_account_convert_on_para_from_relay_treasury() { + let location = MultiLocation { + parents: 0, + interior: X1(Plurality { id: BodyId::Treasury, part: BodyPart::Voice }), + }; + + parameter_types! { + pub TreasuryAccountId: AccountId = AccountId::new([42u8; 32]); + } + + let actual_description = + LocalTreasuryVoiceConvertsVia::::convert_location( + &location, + ) + .unwrap(); + + assert_eq!( + [ + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 + ], + actual_description + ); + } } diff --git a/polkadot/xcm/xcm-builder/src/matches_location.rs b/polkadot/xcm/xcm-builder/src/matches_location.rs new file mode 100644 index 0000000000000000000000000000000000000000..cfc71eafd0284b8c0c09695252fd49ac7e2a4b9c --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/matches_location.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Various implementations and utilities for matching and filtering `MultiLocation` and +//! `InteriorMultiLocation` types. + +use frame_support::traits::{Contains, Get}; +use xcm::latest::{InteriorMultiLocation, MultiLocation, NetworkId}; + +/// An implementation of `Contains` that checks for `MultiLocation` or +/// `InteriorMultiLocation` if starts with the provided type `T`. +pub struct StartsWith(sp_std::marker::PhantomData); +impl> Contains for StartsWith { + fn contains(t: &MultiLocation) -> bool { + t.starts_with(&T::get()) + } +} +impl> Contains for StartsWith { + fn contains(t: &InteriorMultiLocation) -> bool { + t.starts_with(&T::get()) + } +} + +/// An implementation of `Contains` that checks for `MultiLocation` or +/// `InteriorMultiLocation` if starts with expected `GlobalConsensus(NetworkId)` provided as type +/// `T`. +pub struct StartsWithExplicitGlobalConsensus(sp_std::marker::PhantomData); +impl> Contains for StartsWithExplicitGlobalConsensus { + fn contains(location: &MultiLocation) -> bool { + matches!(location.interior.global_consensus(), Ok(requested_network) if requested_network.eq(&T::get())) + } +} +impl> Contains for StartsWithExplicitGlobalConsensus { + fn contains(location: &InteriorMultiLocation) -> bool { + matches!(location.global_consensus(), Ok(requested_network) if requested_network.eq(&T::get())) + } +} diff --git a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs index 6cf5980df0e9e2227c8bda76a41493efcd248e32..357dc534a5f115fd2690c4e3cc5b52606e9e239b 100644 --- a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Adapters to work with `frame_support::traits::tokens::fungibles` through XCM. +//! Adapters to work with [`frame_support::traits::fungibles`] through XCM. use crate::{AssetChecking, MintLocation}; use frame_support::{ @@ -207,7 +207,11 @@ impl< } } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, context: &XcmContext) -> XcmResult { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + context: Option<&XcmContext>, + ) -> XcmResult { log::trace!( target: LOG_TARGET, "deposit_asset what: {:?}, who: {:?}, context: {:?}", @@ -307,7 +311,11 @@ impl< >::check_out(dest, what, context) } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, context: &XcmContext) -> XcmResult { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + context: Option<&XcmContext>, + ) -> XcmResult { NonFungiblesMutateAdapter::< Assets, Matcher, diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 0f3a622f4ece1769a4314b507bcc4fd917b911f7..4c9b9a6088de87d006ee32c67a60d3da275e3154 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -110,11 +110,14 @@ impl< let message = Xcm(vec![ DescendOrigin(Interior::get()), UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination, - query_id, - max_weight: Weight::zero(), - })])), + SetAppendix(Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + ReportError(QueryResponseInfo { + destination, + query_id, + max_weight: Weight::zero(), + }), + ])), TransferAsset { beneficiary, assets: vec![MultiAsset { id: asset_id, fun: Fungibility::Fungible(amount) }] diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index dba45a8310aca1cedac249a01eb0ded7435be98a..330ff40aac0fe4bccf3317171f5af8fa50dfb931 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -50,6 +50,7 @@ impl< let message = Xcm::::try_from(versioned_message) .map_err(|_| ProcessMessageError::Unsupported)?; let pre = XcmExecutor::prepare(message).map_err(|_| ProcessMessageError::Unsupported)?; + // The worst-case weight: let required = pre.weight_of(); ensure!(meter.can_consume(required), ProcessMessageError::Overweight(required)); diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 39e9eab410bf14106b8817a8c2944cd0f5081f36..f4c18adddb3739af9526bdc5d95531668f7241c7 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -49,8 +49,7 @@ impl SendXcm for WithUniqueTopic { message.0.push(SetTopic(unique_id)); unique_id }; - let (ticket, assets) = Inner::validate(destination, &mut Some(message)) - .map_err(|_| SendError::NotApplicable)?; + let (ticket, assets) = Inner::validate(destination, &mut Some(message))?; Ok(((ticket, unique_id), assets)) } diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs index 406843a0fe8ae69fe9b9754890c40e9138e45701..de08dbee953aa3265c584c87a65911ec015982b6 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs @@ -44,7 +44,7 @@ fn sending_to_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); assert_eq!( take_received_remote_messages(), @@ -78,7 +78,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( (Parent, Parachain(1000)).into(), @@ -110,7 +110,7 @@ fn sending_to_relay_chain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get()).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parent.into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs index 02c454bb2129184e14da7b1892de07b6aa6b7d73..8433b6e02129474403892f8dd2dbb10c82988078 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs @@ -41,7 +41,7 @@ fn sending_to_bridged_chain_works() { let msg = Xcm(vec![Trap(1)]); assert_eq!( send_xcm::((Parent, Remote::get()).into(), msg).unwrap().1, - (Here, 100).into() + Price::get() ); assert_eq!(TheBridge::service(), 1); let expected = vec![( @@ -68,7 +68,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parachain(1000).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 7593ea5f17c084ac7f31aaeb439e4dc7a19924c2..23d6eb99a90915ffa5b4c1af18c938e0735ba1ff 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -24,13 +24,20 @@ use super::*; parameter_types! { + // 100 to use the bridge (export) and 80 for the remote execution weight (4 instructions x (10 + + // 10) weight each). + pub SendOverBridgePrice: u128 = 180u128 + if UsingTopic::get() { 20 } else { 0 }; pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(100)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> - = vec![(Remote::get(), MultiLocation::parent(), Some((Parent, 200u128 + if UsingTopic::get() { 20 } else { 0 }).into()))]; - // ^^^ 100 to use the bridge (export) and 100 for the remote execution weight (5 instructions - // x (10 + 10) weight each). + pub BridgeTable: Vec = vec![ + NetworkExportTableItem::new( + Remote::get(), + None, + MultiLocation::parent(), + Some((Parent, SendOverBridgePrice::get()).into()) + ) + ]; } type TheBridge = TestBridge>; @@ -62,7 +69,7 @@ fn sending_to_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -80,7 +87,7 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -98,11 +105,10 @@ fn sending_to_bridged_chain_works() { destination: Here, xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); @@ -137,7 +143,7 @@ fn sending_to_parachain_of_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -155,7 +161,7 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -173,11 +179,10 @@ fn sending_to_parachain_of_bridged_chain_works() { destination: Parachain(100).into(), xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs index 124a909bc072b01810a8708841514f292725095a..f11143ab9f6fc9095bc6bdcc220e5742e86a9bcc 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs @@ -24,8 +24,14 @@ parameter_types! { pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1000)); pub ParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1)); pub RemoteParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Remote::get()), Parachain(1)); - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> - = vec![(Remote::get(), (Parent, Parachain(1)).into(), None)]; + pub BridgeTable: Vec = vec![ + NetworkExportTableItem::new( + Remote::get(), + None, + (Parent, Parachain(1)).into(), + None + ) + ]; } type TheBridge = TestBridge< BridgeBlobDispatcher, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs index 2ff1f9fb9764695b5290a0b34c50d38f19fc84fb..7218e0a04880fdb2a49ec470385e551f1ab884ac 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs @@ -24,8 +24,14 @@ parameter_types! { pub UniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub ParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1)); pub RemoteParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Remote::get()), Parachain(1)); - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> - = vec![(Remote::get(), Parachain(1).into(), None)]; + pub BridgeTable: Vec = vec![ + NetworkExportTableItem::new( + Remote::get(), + None, + Parachain(1).into(), + None + ) + ]; } type TheBridge = TestBridge< BridgeBlobDispatcher, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs index cdd6b89b7b1b53cfe789715ae7cded58e7830e66..45b5efbc44c549b9a68fbcfc968a6589854d74df 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs @@ -24,8 +24,14 @@ parameter_types! { pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1000)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> - = vec![(Remote::get(), MultiLocation::parent(), None)]; + pub BridgeTable: Vec = vec![ + NetworkExportTableItem::new( + Remote::get(), + None, + MultiLocation::parent(), + None + ) + ]; } type TheBridge = TestBridge>; diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index e73b62f007314ae06034f35030e4474cc290183d..189274eb5f5b8034f5bf0aa5ccfc26700fb3aa95 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -253,7 +253,7 @@ impl TransactAsset for TestAssetTransactor { fn deposit_asset( what: &MultiAsset, who: &MultiLocation, - _context: &XcmContext, + _context: Option<&XcmContext>, ) -> Result<(), XcmError> { add_asset(*who, what.clone()); Ok(()) @@ -414,7 +414,7 @@ pub fn response(query_id: u64) -> Option { /// Mock implementation of the [`QueryHandler`] trait for creating XCM success queries and expecting /// responses. pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockNumber)>); -impl QueryHandler +impl QueryHandler for TestQueryHandler { type QueryId = u64; @@ -526,7 +526,8 @@ impl FeeManager for TestFeeManager { fn is_waived(_: Option<&MultiLocation>, r: FeeReason) -> bool { IS_WAIVED.with(|l| l.borrow().contains(&r)) } - fn handle_fee(_: MultiAssets) {} + + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) {} } #[derive(Clone, Eq, PartialEq, Debug)] diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 5b6fa3ee5a0b05fbfb868f813b673e749dfaaf18..78b9284c689fec49c1f35d193189badfb61fac26 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -75,6 +75,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -245,11 +246,6 @@ type SovereignAccountOf = ( HashedDescription>, ); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -273,8 +269,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index 491a2bcef7a0c8a3d026e13cf48f226161c0b9cf..178b93842736a789913a7fc297d4441432414c03 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -71,11 +71,14 @@ fn pay_over_xcm_works() { let expected_message = Xcm(vec![ DescendOrigin(AccountId32 { id: SenderAccount::get().into(), network: None }.into()), UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination: (Parent, Parachain(42)).into(), - query_id: 1, - max_weight: Weight::zero(), - })])), + SetAppendix(Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + ReportError(QueryResponseInfo { + destination: (Parent, Parachain(42)).into(), + query_id: 1, + max_weight: Weight::zero(), + }), + ])), TransferAsset { assets: (Here, amount).into(), beneficiary: AccountId32 { id: recipient.clone().into(), network: None }.into(), @@ -130,11 +133,14 @@ fn pay_over_xcm_governance_body() { let expected_message = Xcm(vec![ DescendOrigin(Plurality { id: BodyId::Treasury, part: BodyPart::Voice }.into()), UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination: (Parent, Parachain(42)).into(), - query_id: 1, - max_weight: Weight::zero(), - })])), + SetAppendix(Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + ReportError(QueryResponseInfo { + destination: (Parent, Parachain(42)).into(), + query_id: 1, + max_weight: Weight::zero(), + }), + ])), TransferAsset { assets: (Parent, amount).into(), beneficiary: AccountId32 { id: recipient.clone().into(), network: None }.into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/salary.rs b/polkadot/xcm/xcm-builder/src/tests/pay/salary.rs index 1d40345f302abb725fac66da866b3a81ab73805c..e490fe326b372371dd31f9e554effa986901057a 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/salary.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/salary.rs @@ -148,11 +148,14 @@ fn salary_pay_over_xcm_works() { let expected_message: Xcm = Xcm::(vec![ DescendOrigin(Plurality { id: BodyId::Treasury, part: BodyPart::Voice }.into()), UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination: (Parent, Parachain(42)).into(), - query_id: 1, - max_weight: Weight::zero(), - })])), + SetAppendix(Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + ReportError(QueryResponseInfo { + destination: (Parent, Parachain(42)).into(), + query_id: 1, + max_weight: Weight::zero(), + }), + ])), TransferAsset { assets: (AssetHubAssetId::get(), FixedSalaryAmount::get()).into(), beneficiary: AccountId32 { id: recipient.clone().into(), network: None }.into(), diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index a24631ffa9423c31c78b6f2860ab3df48e502725..8e2cf88b3c32a1d25ca9d1c48ce2a8717de00c69 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -117,16 +117,54 @@ impl ExporterFor for Tuple { } } +/// Configuration item representing a single exporter in the `NetworkExportTable`. +pub struct NetworkExportTableItem { + /// Supported remote network. + pub remote_network: NetworkId, + /// Remote location filter. + /// If `Some`, the requested remote location must be equal to one of the items in the vector. + /// These are locations in the remote network. + /// If `None`, then the check is skipped. + pub remote_location_filter: Option>, + /// Locally-routable bridge with bridging capabilities to the `remote_network` and + /// `remote_location`. See [`ExporterFor`] for more details. + pub bridge: MultiLocation, + /// The local payment. + /// See [`ExporterFor`] for more details. + pub payment: Option, +} + +impl NetworkExportTableItem { + pub fn new( + remote_network: NetworkId, + remote_location_filter: Option>, + bridge: MultiLocation, + payment: Option, + ) -> Self { + Self { remote_network, remote_location_filter, bridge, payment } + } +} + +/// An adapter for the implementation of `ExporterFor`, which attempts to find the +/// `(bridge_location, payment)` for the requested `network` and `remote_location` in the provided +/// `T` table containing various exporters. pub struct NetworkExportTable(sp_std::marker::PhantomData); -impl)>>> ExporterFor - for NetworkExportTable -{ +impl>> ExporterFor for NetworkExportTable { fn exporter_for( network: &NetworkId, - _: &InteriorMultiLocation, + remote_location: &InteriorMultiLocation, _: &Xcm<()>, ) -> Option<(MultiLocation, Option)> { - T::get().into_iter().find(|(ref j, ..)| j == network).map(|(_, l, p)| (l, p)) + T::get() + .into_iter() + .find(|item| { + &item.remote_network == network && + item.remote_location_filter + .as_ref() + .map(|filters| filters.iter().any(|filter| filter == remote_location)) + .unwrap_or(true) + }) + .map(|item| (item.bridge, item.payment)) } } @@ -267,7 +305,6 @@ impl, + pub universal_dest: VersionedInteriorMultiLocation, + pub message: VersionedXcm<()>, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -504,7 +541,7 @@ mod tests { pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Local::get()), Parachain(1234)); pub DifferentRemote: NetworkId = ByGenesis([22; 32]); // no routers - pub BridgeTable: Vec<(NetworkId, MultiLocation, Option)> = vec![]; + pub BridgeTable: Vec = vec![]; } // check with local destination (should be remote) @@ -539,4 +576,74 @@ mod tests { >, >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); } + + #[test] + fn network_export_table_works() { + frame_support::parameter_types! { + pub NetworkA: NetworkId = ByGenesis([0; 32]); + pub Parachain1000InNetworkA: InteriorMultiLocation = X1(Parachain(1000)); + pub Parachain2000InNetworkA: InteriorMultiLocation = X1(Parachain(2000)); + + pub NetworkB: NetworkId = ByGenesis([1; 32]); + + pub BridgeToALocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1234))); + pub BridgeToBLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(4321))); + + pub PaymentForNetworkAAndParachain2000: MultiAsset = (MultiLocation::parent(), 150).into(); + + pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + // NetworkA allows `Parachain(1000)` as remote location WITHOUT payment. + NetworkExportTableItem::new( + NetworkA::get(), + Some(vec![Parachain1000InNetworkA::get()]), + BridgeToALocation::get(), + None + ), + // NetworkA allows `Parachain(2000)` as remote location WITH payment. + NetworkExportTableItem::new( + NetworkA::get(), + Some(vec![Parachain2000InNetworkA::get()]), + BridgeToALocation::get(), + Some(PaymentForNetworkAAndParachain2000::get()) + ), + // NetworkB allows all remote location. + NetworkExportTableItem::new( + NetworkB::get(), + None, + BridgeToBLocation::get(), + None + ) + ]; + } + + let test_data = vec![ + (NetworkA::get(), X1(Parachain(1000)), Some((BridgeToALocation::get(), None))), + (NetworkA::get(), X2(Parachain(1000), GeneralIndex(1)), None), + ( + NetworkA::get(), + X1(Parachain(2000)), + Some((BridgeToALocation::get(), Some(PaymentForNetworkAAndParachain2000::get()))), + ), + (NetworkA::get(), X2(Parachain(2000), GeneralIndex(1)), None), + (NetworkA::get(), X1(Parachain(3000)), None), + (NetworkB::get(), X1(Parachain(1000)), Some((BridgeToBLocation::get(), None))), + (NetworkB::get(), X1(Parachain(2000)), Some((BridgeToBLocation::get(), None))), + (NetworkB::get(), X1(Parachain(3000)), Some((BridgeToBLocation::get(), None))), + ]; + + for (network, remote_location, expected_result) in test_data { + assert_eq!( + NetworkExportTable::::exporter_for( + &network, + &remote_location, + &Xcm::default() + ), + expected_result, + "expected_result: {:?} not matched for network: {:?} and remote_location: {:?}", + expected_result, + network, + remote_location, + ) + } + } } diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 363748940ca624e12212733a897db3a1d840c531..4f183c7a15b622a1f75763ea0877440d5560bd49 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -119,6 +119,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -171,13 +172,13 @@ pub type Barrier = ( ); parameter_types! { - pub KusamaForStatemine: (MultiAssetFilter, MultiLocation) = + pub KusamaForAssetHub: (MultiAssetFilter, MultiLocation) = (Wild(AllOf { id: Concrete(Here.into()), fun: WildFungible }), Parachain(1000).into()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 4; } -pub type TrustedTeleporters = (xcm_builder::Case,); +pub type TrustedTeleporters = (xcm_builder::Case,); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -209,11 +210,6 @@ impl xcm_executor::Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Here.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; @@ -238,8 +234,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index 426efcaccdbaf7f2dec0894e8b615bd0e199c188..36780b9f0078786a7e061858f30e76eaeac34a96 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -174,7 +174,7 @@ fn report_holding_works() { } /// Scenario: -/// A parachain wants to move KSM from Kusama to Statemine. +/// A parachain wants to move KSM from Kusama to Asset Hub. /// The parachain sends an XCM to withdraw funds combined with a teleport to the destination. /// /// This way of moving funds from a relay to a parachain will only work for trusted chains. @@ -182,12 +182,12 @@ fn report_holding_works() { /// /// Asserts that the balances are updated accordingly and the correct XCM is sent. #[test] -fn teleport_to_statemine_works() { +fn teleport_to_asset_hub_works() { use xcm::opaque::latest::prelude::*; let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); let balances = vec![(ALICE, INITIAL_BALANCE), (para_acc.clone(), INITIAL_BALANCE)]; kusama_like_with_balances(balances).execute_with(|| { - let statemine_id = 1000; + let asset_hub_id = 1000; let other_para_id = 3000; let amount = REGISTER_AMOUNT; let teleport_effects = vec![ @@ -222,13 +222,13 @@ fn teleport_to_statemine_works() { vec![(Parachain(other_para_id).into(), expected_msg, expected_hash,)] ); - // teleports are allowed from statemine to kusama. + // teleports are allowed from asset hub to kusama. let message = Xcm(vec![ WithdrawAsset((Here, amount).into()), buy_execution(), InitiateTeleport { assets: All.into(), - dest: Parachain(statemine_id).into(), + dest: Parachain(asset_hub_id).into(), xcm: Xcm(teleport_effects.clone()), }, ]); @@ -246,7 +246,7 @@ fn teleport_to_statemine_works() { mock::sent_xcm(), vec![ (Parachain(other_para_id).into(), expected_msg.clone(), expected_hash,), - (Parachain(statemine_id).into(), expected_msg, expected_hash,) + (Parachain(asset_hub_id).into(), expected_msg, expected_hash,) ] ); }); diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 902f55901d6c8195ac6af2a44ddb68a9ab177768..d5edb1ea0f5507de3d948c2c36bc9114e7f5209d 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -10,6 +10,7 @@ version = "1.0.0" impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -29,10 +30,12 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] std = [ + "environmental/std", "frame-benchmarking/std", "frame-support/std", "log/std", "parity-scale-codec/std", + "scale-info/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index a48cd3259d67a0ae0eaab55a373489122d1e8037..ac256ea14899c1a7305903b575a1178d6377b2b4 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -32,7 +32,7 @@ pub mod traits; use traits::{ validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, Enact, ExportXcm, FeeManager, FeeReason, OnResponse, Properties, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; mod assets; @@ -248,12 +248,18 @@ impl ExecuteXcm for XcmExecutor XcmAssetTransfers for XcmExecutor { + type IsReserve = Config::IsReserve; + type IsTeleporter = Config::IsTeleporter; + type AssetTransactor = Config::AssetTransactor; +} + #[derive(Debug)] pub struct ExecutorError { pub index: u32, @@ -405,10 +411,7 @@ impl XcmExecutor { reason: FeeReason, ) -> Result { let (ticket, fee) = validate_send::(dest, msg)?; - if !Config::FeeManager::is_waived(self.origin_ref(), reason) { - let paid = self.holding.try_take(fee.into()).map_err(|_| XcmError::NotHoldingFees)?; - Config::FeeManager::handle_fee(paid.into()); - } + self.take_fee(fee, reason)?; Config::XcmSender::deliver(ticket).map_err(Into::into) } @@ -618,14 +621,18 @@ impl XcmExecutor { DepositAsset { assets, beneficiary } => { let deposited = self.holding.saturating_take(assets); for asset in deposited.into_assets_iter() { - Config::AssetTransactor::deposit_asset(&asset, &beneficiary, &self.context)?; + Config::AssetTransactor::deposit_asset( + &asset, + &beneficiary, + Some(&self.context), + )?; } Ok(()) }, DepositReserveAsset { assets, dest, xcm } => { let deposited = self.holding.saturating_take(assets); for asset in deposited.assets_iter() { - Config::AssetTransactor::deposit_asset(&asset, &dest, &self.context)?; + Config::AssetTransactor::deposit_asset(&asset, &dest, Some(&self.context))?; } // Note that we pass `None` as `maybe_failed_bin` and drop any assets which cannot // be reanchored because we have already called `deposit_asset` on all assets. @@ -850,7 +857,7 @@ impl XcmExecutor { destination, xcm, )?; - self.take_fee(fee, FeeReason::Export(network))?; + self.take_fee(fee, FeeReason::Export { network, destination })?; Config::MessageExporter::deliver(ticket)?; Ok(()) }, @@ -944,6 +951,14 @@ impl XcmExecutor { if Config::FeeManager::is_waived(self.origin_ref(), reason) { return Ok(()) } + log::trace!( + target: "xcm::fees", + "taking fee: {:?} from origin_ref: {:?} in fees_mode: {:?} for a reason: {:?}", + fee, + self.origin_ref(), + self.fees_mode, + reason, + ); let paid = if self.fees_mode.jit_withdraw { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; for asset in fee.inner() { @@ -953,7 +968,7 @@ impl XcmExecutor { } else { self.holding.try_take(fee.into()).map_err(|_| XcmError::NotHoldingFees)?.into() }; - Config::FeeManager::handle_fee(paid); + Config::FeeManager::handle_fee(paid, Some(&self.context), reason); Ok(()) } @@ -985,12 +1000,7 @@ impl XcmExecutor { let QueryResponseInfo { destination, query_id, max_weight } = info; let instruction = QueryResponse { query_id, response, max_weight, querier }; let message = Xcm(vec![instruction]); - let (ticket, fee) = validate_send::(destination, message)?; - if !Config::FeeManager::is_waived(self.origin_ref(), fee_reason) { - let paid = self.holding.try_take(fee.into()).map_err(|_| XcmError::NotHoldingFees)?; - Config::FeeManager::handle_fee(paid.into()); - } - Config::XcmSender::deliver(ticket).map_err(Into::into) + self.send(destination, message, fee_reason) } fn try_reanchor( diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..5fdc9b15e01541e0f77d126b4cbf4c69ba09a254 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::traits::TransactAsset; +use frame_support::traits::ContainsPair; +use scale_info::TypeInfo; +use sp_runtime::codec::{Decode, Encode}; +use xcm::prelude::*; + +/// Errors related to determining asset transfer support. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Invalid non-concrete asset. + NotConcrete, + /// Reserve chain could not be determined for assets. + UnknownReserve, +} + +/// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TransferType { + /// should teleport `asset` to `dest` + Teleport, + /// should reserve-transfer `asset` to `dest`, using local chain as reserve + LocalReserve, + /// should reserve-transfer `asset` to `dest`, using `dest` as reserve + DestinationReserve, + /// should reserve-transfer `asset` to `dest`, using remote chain `MultiLocation` as reserve + RemoteReserve(MultiLocation), +} + +/// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` +/// configurations. +pub trait XcmAssetTransfers { + /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning + /// reserve-based-transfers are to be used for assets matching this filter. + type IsReserve: ContainsPair; + + /// Combinations of (Asset, Location) pairs which we trust as teleporters. Meaning teleports are + /// to be used for assets matching this filter. + type IsTeleporter: ContainsPair; + + /// How to withdraw and deposit an asset. + type AssetTransactor: TransactAsset; + + /// Determine transfer type to be used for transferring `asset` from local chain to `dest`. + fn determine_for(asset: &MultiAsset, dest: &MultiLocation) -> Result { + if Self::IsTeleporter::contains(asset, dest) { + // we trust destination for teleporting asset + return Ok(TransferType::Teleport) + } else if Self::IsReserve::contains(asset, dest) { + // we trust destination as asset reserve location + return Ok(TransferType::DestinationReserve) + } + + // try to determine reserve location based on asset id/location + let asset_location = match asset.id { + Concrete(location) => Ok(location.chain_location()), + _ => Err(Error::NotConcrete), + }?; + if asset_location == MultiLocation::here() || + Self::IsTeleporter::contains(asset, &asset_location) + { + // if the asset is local, then it's a local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location + // where it can be teleported to `here` => local reserve + Ok(TransferType::LocalReserve) + } else if Self::IsReserve::contains(asset, &asset_location) { + // remote location that is recognized as reserve location for asset + Ok(TransferType::RemoteReserve(asset_location)) + } else { + // remote location that is not configured either as teleporter or reserve => cannot + // determine asset reserve + Err(Error::UnknownReserve) + } + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs index ad2e108e7dee7fc29fe95765390936aab6d603fc..d7146457f3b993b319bf8fe70ae5e925854b744c 100644 --- a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs +++ b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs @@ -18,16 +18,16 @@ use xcm::prelude::*; /// Handle stuff to do with taking fees in certain XCM instructions. pub trait FeeManager { - /// Determine if a fee which would normally payable should be waived. + /// Determine if a fee should be waived. fn is_waived(origin: Option<&MultiLocation>, r: FeeReason) -> bool; /// Do something with the fee which has been paid. Doing nothing here silently burns the /// fees. - fn handle_fee(fee: MultiAssets); + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, r: FeeReason); } /// Context under which a fee is paid. -#[derive(Copy, Clone, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum FeeReason { /// When a reporting instruction is called. Report, @@ -42,7 +42,7 @@ pub enum FeeReason { /// When the `QueryPallet` instruction is called. QueryPallet, /// When the `ExportMessage` instruction is called (and includes the network ID). - Export(NetworkId), + Export { network: NetworkId, destination: InteriorMultiLocation }, /// The `charge_fees` API. ChargeFees, /// When the `LockAsset` instruction is called. @@ -53,7 +53,8 @@ pub enum FeeReason { impl FeeManager for () { fn is_waived(_: Option<&MultiLocation>, _: FeeReason) -> bool { - true + false } - fn handle_fee(_: MultiAssets) {} + + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) {} } diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index a9439968fa6ca5b2d1bfae6097d985ae5a887f19..71e75c77e9394129c65505cc1daddb825fd4c077 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -20,10 +20,12 @@ mod conversion; pub use conversion::{CallDispatcher, ConvertLocation, ConvertOrigin, WithOriginFilter}; mod drop_assets; pub use drop_assets::{ClaimAssets, DropAssets}; -mod asset_lock; -pub use asset_lock::{AssetLock, Enact, LockError}; mod asset_exchange; pub use asset_exchange::AssetExchange; +mod asset_lock; +pub use asset_lock::{AssetLock, Enact, LockError}; +mod asset_transfer; +pub use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; mod export; pub use export::{export_xcm, validate_export, ExportXcm}; mod fee_manager; diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 3558160dc87d37acc03833518875d980a458f4e2..ea41f242a97d03c3c23882073c1904705c47e7ba 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::Xcm; +use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::pallet_prelude::{Get, TypeInfo}; -use parity_scale_codec::{FullCodec, MaxEncodedLen}; +use frame_support::{ + pallet_prelude::{Get, TypeInfo}, + parameter_types, +}; +use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -103,7 +106,7 @@ impl VersionChangeNotifier for () { } /// The possible state of an XCM query response. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Encode, Decode)] pub enum QueryResponseStatus { /// The response has arrived, and includes the inner Response and the block number it arrived /// at. @@ -129,7 +132,7 @@ pub trait QueryHandler { + PartialEq + Debug + Copy; - type BlockNumber: Zero; + type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -165,3 +168,36 @@ pub trait QueryHandler { #[cfg(feature = "runtime-benchmarks")] fn expect_response(id: Self::QueryId, response: Response); } + +parameter_types! { + pub UniversalLocation: InteriorMultiLocation = Here; +} + +impl QueryHandler for () { + type BlockNumber = u64; + type Error = (); + type QueryId = u64; + type UniversalLocation = UniversalLocation; + + fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + QueryResponseStatus::NotFound + } + fn new_query( + _responder: impl Into, + _timeout: Self::BlockNumber, + _match_querier: impl Into, + ) -> Self::QueryId { + 0u64 + } + + fn report_outcome( + _message: &mut Xcm<()>, + _responder: impl Into, + _timeout: Self::BlockNumber, + ) -> Result { + Err(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn expect_response(_id: Self::QueryId, _response: crate::Response) {} +} diff --git a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs index 34cdb0c7141356edd4113d576f45069a569eabe9..c51befff88a61d5a4c40422a66165c2f34daf8c3 100644 --- a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs @@ -87,7 +87,11 @@ pub trait TransactAsset { /// Deposit the `what` asset into the account of `who`. /// /// Implementations should return `XcmError::FailedToTransactAsset` if deposit failed. - fn deposit_asset(_what: &MultiAsset, _who: &MultiLocation, _context: &XcmContext) -> XcmResult { + fn deposit_asset( + _what: &MultiAsset, + _who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> XcmResult { Err(XcmError::Unimplemented) } @@ -139,7 +143,7 @@ pub trait TransactAsset { Err(XcmError::AssetNotFound | XcmError::Unimplemented) => { let assets = Self::withdraw_asset(asset, from, Some(context))?; // Not a very forgiving attitude; once we implement roll-backs then it'll be nicer. - Self::deposit_asset(asset, to, context)?; + Self::deposit_asset(asset, to, Some(context))?; Ok(assets) }, result => result, @@ -195,7 +199,11 @@ impl TransactAsset for Tuple { )* ); } - fn deposit_asset(what: &MultiAsset, who: &MultiLocation, context: &XcmContext) -> XcmResult { + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + context: Option<&XcmContext>, + ) -> XcmResult { for_tuples!( #( match Tuple::deposit_asset(what, who, context) { Err(XcmError::AssetNotFound) | Err(XcmError::Unimplemented) => (), @@ -286,7 +294,7 @@ mod tests { fn deposit_asset( _what: &MultiAsset, _who: &MultiLocation, - _context: &XcmContext, + _context: Option<&XcmContext>, ) -> XcmResult { Err(XcmError::AssetNotFound) } @@ -330,7 +338,7 @@ mod tests { fn deposit_asset( _what: &MultiAsset, _who: &MultiLocation, - _context: &XcmContext, + _context: Option<&XcmContext>, ) -> XcmResult { Err(XcmError::Overflow) } @@ -374,7 +382,7 @@ mod tests { fn deposit_asset( _what: &MultiAsset, _who: &MultiLocation, - _context: &XcmContext, + _context: Option<&XcmContext>, ) -> XcmResult { Ok(()) } @@ -406,7 +414,7 @@ mod tests { MultiTransactor::deposit_asset( &(Here, 1u128).into(), &Here.into(), - &XcmContext::with_message_id([0; 32]), + Some(&XcmContext::with_message_id([0; 32])), ), Err(XcmError::AssetNotFound) ); @@ -420,7 +428,7 @@ mod tests { MultiTransactor::deposit_asset( &(Here, 1u128).into(), &Here.into(), - &XcmContext::with_message_id([0; 32]), + Some(&XcmContext::with_message_id([0; 32])), ), Ok(()) ); @@ -434,7 +442,7 @@ mod tests { MultiTransactor::deposit_asset( &(Here, 1u128).into(), &Here.into(), - &XcmContext::with_message_id([0; 32]), + Some(&XcmContext::with_message_id([0; 32])), ), Err(XcmError::Overflow) ); @@ -448,7 +456,7 @@ mod tests { MultiTransactor::deposit_asset( &(Here, 1u128).into(), &Here.into(), - &XcmContext::with_message_id([0; 32]), + Some(&XcmContext::with_message_id([0; 32])), ), Ok(()), ); diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 9c48b11f6226fd4a1af2a241cee91f37c010d1a1..f0caa5ab48ec821debdcac62f8de6a860caa1d2c 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -8,7 +8,7 @@ version = "1.0.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.5.0", features = ["derive"] } +scale-info = { version = "2.10.0", features = ["derive"] } log = { version = "0.4.14", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index bc7cba313828b76e6079dcaa9d7ba7f9342a046f..9f0411970ce7a3a7eabd6516dfb022e9db3942a1 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -106,6 +106,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -398,11 +399,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - pub struct TrustedLockerCase(PhantomData); impl> ContainsPair for TrustedLockerCase @@ -442,8 +438,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 4e9195a8454f5091dde7bcbf2c841eaaeb148285..bdd7ff6d3eaf2522ba1c990055d915ce33a93923 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -92,6 +92,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -198,11 +199,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -227,8 +223,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 0ca57e680d1ddff4ec784f49614d7747ea9cfb74..acf28bec4f19480ae8135cb4e7f1845fc8d76566 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,7 +11,7 @@ publish = false codec = { package = "parity-scale-codec", version = "3.6.1" } honggfuzz = "0.5.55" arbitrary = "1.2.0" -scale-info = { version = "2.5.0", features = ["derive"] } +scale-info = { version = "2.10.0", features = ["derive"] } frame-system = { path = "../../../../substrate/frame/system" } frame-support = { path = "../../../../substrate/frame/support" } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 95f875eca06e5c6fcb7d0d5610e1719a5f1b15b5..41234837aca0b559ecca1e195c8087879e4ff8ec 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -95,6 +95,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -312,11 +313,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -340,8 +336,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index a29ead9e6c3beaed1ef5e625012e3866ef5808f1..c9a57db970a73fe80ef56dd72ecf4de670162288 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -91,6 +91,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; @@ -162,11 +163,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -191,8 +187,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml b/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml index 9ae4d899e69024651580e446b96f457f1fdb885c..53205867fd12c7e3f310e9c2c65d896855bdb5c0 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml @@ -4,7 +4,6 @@ timeout = 1000 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" [relaychain.default_resources] limits = { memory = "4G", cpu = "2" } @@ -133,4 +132,4 @@ genesis_state_generator = "undying-collator export-genesis-state --pov-size=1000 [types.Header] number = "u64" parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl index 46bb8bcdf72b103a9b72be9a68ff90134a54466f..135999a092a7a54a212d3a82eb2b58ba92aa955e 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl @@ -54,8 +54,8 @@ one: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets [" two: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "120", "+Inf"] within 10 seconds # Check execution time. -# There are two different timeout conditions: BACKING_EXECUTION_TIMEOUT(2s) and -# APPROVAL_EXECUTION_TIMEOUT(6s). Currently these are not differentiated by metrics +# There are two different timeout conditions: DEFAULT_BACKING_EXECUTION_TIMEOUT(2s) and +# DEFAULT_APPROVAL_EXECUTION_TIMEOUT(12s). Currently these are not differentiated by metrics # because the metrics are defined in `polkadot-node-core-pvf` which is a level below # the relevant subsystems. # That being said, we will take the simplifying assumption of testing only the diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index e6aeb8e245c2bab3e53628498aeb2c7caa626e0f..e70322e13e6bc4a1eabb477f22afff0cc8ed2afb 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -1,14 +1,13 @@ [settings] timeout = 1000 -[relaychain.genesis.runtime.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config] max_validators_per_core = 5 needed_approvals = 8 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" default_command = "polkadot" [relaychain.default_resources] diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index ef27d7b92f029d7978c9b7ab1a175c882fbb1d70..5d6f299d46133e52c645bd128fbdcc8171490d4e 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -2,14 +2,13 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtime.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config] max_validators_per_core = 1 needed_approvals = 2 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" default_command = "polkadot" [relaychain.default_resources] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml index 25d922bb6827cbf8e129a07f64b537391bc72d7a..e2fbec079b1a81ec50054a30ef89d049bb717482 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml @@ -2,7 +2,7 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtime.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config] max_validators_per_core = 1 needed_approvals = 2 group_rotation_frequency = 2 diff --git a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml new file mode 100644 index 0000000000000000000000000000000000000000..bef54cb8ca416fb3210849fb9801c44a31e846a4 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml @@ -0,0 +1,39 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 1 + needed_approvals = 7 + relay_vrf_modulo_samples = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "some-validator" + count = 8 + args = ["-lparachain=debug,runtime=debug"] + +{% for id in range(2000,2005) %} +[[parachains]] +id = {{id}} +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size={{10000*(id-1999)}} --pvf-complexity={{id - 1999}}" + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size={{10000*(id-1999)}}", "--parachain-id={{id}}", "--pvf-complexity={{id - 1999}}"] +{% endfor %} + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.zndsl b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..6cec6c66797ac574be35a44a0e107d79f6dd71cd --- /dev/null +++ b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.zndsl @@ -0,0 +1,27 @@ +Description: Test if parachains make progress with most of approvals being tranch0 +Network: ./0006-parachains-max-tranche0.toml +Creds: config + +# Check authority status. +some-validator-0: reports node_roles is 4 +some-validator-1: reports node_roles is 4 +some-validator-3: reports node_roles is 4 +some-validator-4: reports node_roles is 4 +some-validator-5: reports node_roles is 4 +some-validator-6: reports node_roles is 4 +some-validator-7: reports node_roles is 4 + +some-validator-0: parachain 2000 block height is at least 5 within 180 seconds +some-validator-1: parachain 2001 block height is at least 5 within 180 seconds +some-validator-2: parachain 2002 block height is at least 5 within 180 seconds +some-validator-3: parachain 2003 block height is at least 5 within 180 seconds +some-validator-4: parachain 2004 block height is at least 5 within 180 seconds + +some-validator-0: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-1: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-2: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-3: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-4: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-5: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-6: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-7: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 99dc9c66e26ee6b04bfea1951d7818e23be70118..399f848d3ac49e7e9950617c170c13d5c63593dd 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -2,14 +2,13 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtime.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config] max_validators_per_core = 1 needed_approvals = 3 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local" default_command = "polkadot" [relaychain.default_resources] diff --git a/prdoc/pr_1178.prdoc b/prdoc/pr_1178.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..36c3b05c7a3f0166e606fe2c2ce1c8801ee0a0c2 --- /dev/null +++ b/prdoc/pr_1178.prdoc @@ -0,0 +1,23 @@ +title: tranche0 assignments in one certificate part1 + +doc: + - audience: Node Operator + description: | + Changed approval-voting, approval-distribution to send all messages tranche0 assignments in one message. + This required: + * A new parachains_db version. + * A new validation protocol to support the new message types. + The new logic will be disabled and will be enabled at a later date after all validators have upgraded. + +migrations: + db: + - name: Parachains database change from v3 to v4. + description: | + Approval-voting column format has been updated with several new fields. All existing data will be automatically + be migrated to the new values. + +crates: + - name: "polkadot" + semver: patch + +host_functions: [] diff --git a/prdoc/pr_1234.prdoc b/prdoc/pr_1234.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cc22a02d88b9d7a105bfe50dd548e9a9e98a34e9 --- /dev/null +++ b/prdoc/pr_1234.prdoc @@ -0,0 +1,20 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Introduce XcmFeesToAccount fee manager + +doc: + - audience: Builder + description: | + Now all XCM sending, unless done by the system for the system, will be charged delivery fees. + All runtimes are now configured to send these delivery fees to a treasury account. + The fee formula is `delivery_fee_factor * (base_fee + encoded_msg_len * per_byte_fee)`. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] diff --git a/prdoc/pr_1246.prdoc b/prdoc/pr_1246.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f9c867812331bfdd18f7c14a38a1dcc735cd2c9c --- /dev/null +++ b/prdoc/pr_1246.prdoc @@ -0,0 +1,17 @@ +title: Use the `Message Queue` Pallet for DMP and XCMP dispatch queueing + +doc: + - audience: Parachain Dev + description: Replaces the queueing capabilities of the `DMP and `XCMP-Queue` pallet for incoming messages with the `MessageQueue` pallet. This simplifies the code and improves security. + +migrations: + runtime: + - + pallet: "cumulus_pallet_dmp_queue" + description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." + +crates: + - name: "cumulus_pallet_xcmp_queue" + note: Pallet config must be altered according to the MR description. + +host_functions: [] diff --git a/prdoc/pr_1256.prdoc b/prdoc/pr_1256.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f486786fec3522def1a0c7989c37827bf64c60b8 --- /dev/null +++ b/prdoc/pr_1256.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "`chain-spec`: getting ready for native-runtime-free world" + +doc: + - audience: Node Dev + description: | + - [`ChainSpec::from_genesis`](https://github.com/paritytech/polkadot-sdk/blob/3df6b4d00eb310900de6f4858114baf68239412c/substrate/client/chain-spec/src/chain_spec.rs#L525) becomes deprecated in favor of [`ChainSpec::builder()`](https://github.com/paritytech/polkadot-sdk/blob/3df6b4d00eb310900de6f4858114baf68239412c/substrate/client/chain-spec/src/chain_spec.rs#L432), + - The signature of [`ChainSpec::from_genesis`] method was changed by extending it with `code` argument. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] diff --git a/prdoc/pr_1289.prdoc b/prdoc/pr_1289.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f3d8801d9d82efc4be2f9f0b0ee45c1c593ae9f7 --- /dev/null +++ b/prdoc/pr_1289.prdoc @@ -0,0 +1,28 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Supporting paged rewards allowing all nominators to be rewarded + +doc: + - audience: Validator + description: | + We used to clip top `MaxNominatorRewardedPerValidator` nominators by stake that are eligible for staking reward. + This was done to limit computation cost of paying out rewards. This PR introduces paging to reward payouts, + meaning we still clip nominators upto MaxExposurePageSize per page and there could be multiple pages of rewards to + be paid out. Validators get commission pro-rata to the amount of reward that is paid out for the page. + + notes: + - payout_stakers should be called multiple times, once for each page of nominators. + - payout_stakers_by_page can be used to pay out rewards for a specific page. + - Some old non-paged era storage items are deprecated, and can be removed in a future upgrade. + +migrations: + db: [] + + runtime: + - { pallet: "pallet-staking", description: "v14: Migration of era exposure storage items to paged exposures."} + +crates: + - name: pallet-staking + +host_functions: [] \ No newline at end of file diff --git a/prdoc/pr_1805.prdoc b/prdoc/pr_1805.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8a8e6c2fde2665eca306ff3adc8a2e701cc29a7d --- /dev/null +++ b/prdoc/pr_1805.prdoc @@ -0,0 +1,19 @@ +title: Introduce state decoding check after runtime upgrades. + +doc: + - audience: Core Dev + description: | + Adds a check to the try-runtime logic that will verify that all pallet on-chain storage still decodes. This can help to spot missing migrations before they become a problem. The check is enabled as soon as the `--checks` option of the `try-runtime` CLI is not `None`. + +migrations: + db: [] + + runtime: [] + +crates: + - name: frame-support + semver: minor + - name: frame-support-procedural + semver: minor + +host_functions: [] diff --git a/prdoc/pr_1818.prdoc b/prdoc/pr_1818.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cbafa02f9af563e88f38a4fb4f74e4ae7c13fc1a --- /dev/null +++ b/prdoc/pr_1818.prdoc @@ -0,0 +1,16 @@ +title: FRAME pallets warning for unchecked weight witness + +doc: + - audience: Core Dev + description: | + FRAME pallets now emit a warning when a call uses a function argument that starts with an underscore in its weight declaration. + +migrations: + db: [ ] + runtime: [ ] + +host_functions: [] + +crates: +- name: "frame-support-procedural" + semver: minor diff --git a/prdoc/pr_1873.prdoc b/prdoc/pr_1873.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6f3bc7646db2a95d8ce8c15093317ed2d2b5af61 --- /dev/null +++ b/prdoc/pr_1873.prdoc @@ -0,0 +1,15 @@ +title: Message Queue use proper overweight limit + +doc: + - audience: Core Dev + description: | + Changed the overweight cutoff limit from the full `Config::ServiceWeight` to a lower value that is calculated based on the weight of the functions being called. + +migrations: + db: [] + + runtime: [] + +crates: ["pallet-message-queue", patch] + +host_functions: [] diff --git a/prdoc/pr_1913.prdoc b/prdoc/pr_1913.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..155057054eb5c5686805bfa7b970b99b401d0850 --- /dev/null +++ b/prdoc/pr_1913.prdoc @@ -0,0 +1,19 @@ +title: BEEFY on Polkadot + +doc: + - audience: Node Operator + description: | + The BEEFY gadget has been enabled on Polkadot by default. + If experiencing stability issues caused by BEEFY, it can be disabled using `--no-beefy` flag. + BEEFY doesn't (yet) support warp sync. So, attempting to Warp sync as a validator will throw an error. + +migrations: + db: [] + + runtime: [] + +crates: + - name: polkadot-cli + - name: polkadot-service + +host_functions: [] diff --git a/prdoc/pr_1921.prdoc b/prdoc/pr_1921.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5ed0137cd5f9ee850265fe816060d34fbdf9fb38 --- /dev/null +++ b/prdoc/pr_1921.prdoc @@ -0,0 +1,19 @@ +title: Fix para-scheduler migration + +doc: + - audience: Core Dev + description: | + Changing the `MigrateToV1` migration in the `ParachainScheduler` pallet to be truly idempotent. It is achieved by wrapping it in a `VersionedMigration`. + +migrations: + db: [] + + runtime: + - pallet: "ParachainScheduler" + description: Non-critical fixup for `MigrateToV1`. + +crates: + - name: "polkadot-runtime-parachains" + semver: patch + +host_functions: [] diff --git a/prdoc/pr_1926.prdoc b/prdoc/pr_1926.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9dc656f1260d068b3632b381302e8157e2c47b09 --- /dev/null +++ b/prdoc/pr_1926.prdoc @@ -0,0 +1,30 @@ +title: Adds syntax for marking calls feeless + +doc: + - audience: Core Dev + description: | + 1. Adds an attribute `#[pallet::feeless_if]` that can be optionally attached to a `pallet::call`. + 2. Adds a signed extension SkipCheckIfFeeless that wraps a transaction + payment processor to potentially skip payment fees for such calls. + Note that both the attribute and the signed extension are needed to make the call feeless. + +migrations: + db: [] + + runtime: [] + +crates: + - name: "frame-support-procedural" + semver: minor + - name: "pallet-skip-feeless-payment" + semver: major + - pallet-example-kitchensink + semver: patch + - kitchensink-runtime + semver: major + - node-testing + semver: patch + - node-cli + semver: patch + +host_functions: [] diff --git a/prdoc/pr_2086.prdoc b/prdoc/pr_2086.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a9bbd0729d5b08b8b2d482ba062ece9e443458a3 --- /dev/null +++ b/prdoc/pr_2086.prdoc @@ -0,0 +1,15 @@ +title: "Contracts: Add XCM traits to interface with contracts" + +doc: + - audience: Core Dev + description: | + We are introducing a new set of `XcmController` traits in `pallet-xcm`. + These traits extract functionality from `pallet-xcm` and provide high-level interaction with XCM. + They enable other pallets, like `pallet_contracts`, to rely on these traits instead of tight coupling to `pallet-xcm` itself. + +crates: + - name: "pallet-xcm" + semver: patch + - name: "xcm-executor" + semver: patch + diff --git a/prdoc/pr_2107.prdoc b/prdoc/pr_2107.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0e33680555ace33797db4dcc73f3d003eb1d5a23 --- /dev/null +++ b/prdoc/pr_2107.prdoc @@ -0,0 +1,24 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Add a builder pattern to create XCM programs + +doc: + - audience: Core Dev + description: | + XCMs can now be built using a builder pattern like so: + Xcm::builder() + .withdraw_asset(assets) + .buy_execution(fees, weight_limit) + .deposit_asset(assets, beneficiary) + .build(); + +migrations: + db: [] + + runtime: [] + +crates: + - name: xcm + +host_functions: [] diff --git a/prdoc/pr_2165.prdoc b/prdoc/pr_2165.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..31cb691c43aabb58c81de10ca91abedaeaae0bb0 --- /dev/null +++ b/prdoc/pr_2165.prdoc @@ -0,0 +1,17 @@ +title: Add sudo::remove_key + +doc: + - audience: Core Dev + description: | + Pallet `Sudo` now has the ability to remove the sudo key via `remove_key`. This is a less-invasive way of rendering the sudo pallet useless without needing a code upgrade. + +migrations: + db: [] + + runtime: [] + +crates: + - name: pallet-sudo + semver: minor + +host_functions: [] diff --git a/prdoc/pr_2253.prdoc b/prdoc/pr_2253.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..398b0a29066baa1dd2cd353177a96d4cff895b47 --- /dev/null +++ b/prdoc/pr_2253.prdoc @@ -0,0 +1,24 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Different builder pattern constructors for XCM + +doc: + - audience: Core Dev + description: | + The `builder()` constructor for XCM programs now only allows building messages that pay for fees, + i.e. messages that would pass the `AllowTopLevelPaidExecutionFrom` barrier. + Another constructor, `builder_unpaid()` requires an explicit `UnpaidExecution` instruction before + anything else. + For building messages without any restriction, `builder_unsafe` can be used. + This has been named like that since in general the other two should be used instead, but it's okay + to use it for teaching purposes or for experimenting. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] diff --git a/scripts/update-ui-tests.sh b/scripts/update-ui-tests.sh index 785cc7bd3291ccdaff1d80558e9f3d52418068c3..02c1eec8c4b9c8eaf55bd5b46a533d0dbf895de7 100755 --- a/scripts/update-ui-tests.sh +++ b/scripts/update-ui-tests.sh @@ -34,7 +34,7 @@ export SKIP_WASM_BUILD=1 export TRYBUILD=overwrite # ./substrate -$RUSTUP_RUN cargo test -p sp-runtime-interface ui +$RUSTUP_RUN cargo test --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui $RUSTUP_RUN cargo test -p sp-api-test ui $RUSTUP_RUN cargo test -p frame-election-provider-solution-type ui $RUSTUP_RUN cargo test -p frame-support-test ui diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml index d77f02c60603150ce2627ae5594d1fc96d1bfac8..8fb1be5821bad1f04b483188f190f99866f80a2f 100644 --- a/substrate/Cargo.toml +++ b/substrate/Cargo.toml @@ -7,15 +7,17 @@ repository.workspace = true authors.workspace = true edition.workspace = true version = "1.0.0" +publish = false # The dependencies are only needed for docs. [dependencies] -aquamarine = "0.3.2" +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } subkey = { path = "bin/utils/subkey" } -chain-spec-builder = { path = "bin/utils/chain-spec-builder" } +chain-spec-builder = { package = "staging-chain-spec-builder", path = "bin/utils/chain-spec-builder" } sc-service = { path = "client/service" } +sc-chain-spec = { path = "client/chain-spec" } sc-cli = { path = "client/cli" } sc-consensus-aura = { path = "client/consensus/aura" } sc-consensus-babe = { path = "client/consensus/babe" } diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0506d0838f1f930679d23792e6bbe241350a8cc4 --- /dev/null +++ b/substrate/bin/minimal/node/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "minimal-node" +version = "4.0.0-dev" +description = "A fresh FRAME-based Substrate node, ready for hacking." +authors = ["Substrate DevHub "] +homepage = "https://substrate.io/" +edition = "2021" +license = "MIT-0" +publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +build = "build.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[bin]] +name = "minimal-node" + +[dependencies] +clap = { version = "4.0.9", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.1" +jsonrpsee = { version = "0.16.2", features = ["server"] } +serde_json = "1.0.108" + +sc-cli = { path = "../../../client/cli" } +sc-executor = { path = "../../../client/executor" } +sc-network = { path = "../../../client/network" } +sc-service = { path = "../../../client/service" } +sc-telemetry = { path = "../../../client/telemetry" } +sc-transaction-pool = { path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } +sc-consensus = { path = "../../../client/consensus/common" } +sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } +sc-rpc-api = { path = "../../../client/rpc-api" } +sc-basic-authorship = { path = "../../../client/basic-authorship" } +sc-offchain = { path = "../../../client/offchain" } +sc-client-api = { path = "../../../client/api" } + +sp-timestamp = { path = "../../../primitives/timestamp" } +sp-keyring = { path = "../../../primitives/keyring" } +sp-api = { path = "../../../primitives/api" } +sp-blockchain = { path = "../../../primitives/blockchain" } +sp-block-builder = { path = "../../../primitives/block-builder" } +sp-io = { path = "../../../primitives/io" } +sp-runtime = { path = "../../../primitives/runtime" } + +substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } + +frame = { path = "../../../frame", features = ["runtime", "experimental"] } +runtime = { package = "minimal-runtime", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } + +[features] +default = [] diff --git a/substrate/bin/minimal/node/build.rs b/substrate/bin/minimal/node/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa7686e01099f2aed61bb140326ed8bb1b3a85c2 --- /dev/null +++ b/substrate/bin/minimal/node/build.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + rerun_if_git_head_changed(); +} diff --git a/substrate/bin/minimal/node/src/chain_spec.rs b/substrate/bin/minimal/node/src/chain_spec.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b721deb6d1df402d811ba40cd6c848c01a83754 --- /dev/null +++ b/substrate/bin/minimal/node/src/chain_spec.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; +use sc_service::{ChainType, Properties}; +use serde_json::{json, Value}; +use sp_keyring::AccountKeyring; + +/// This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec<()>; + +fn props() -> Properties { + let mut properties = Properties::new(); + properties.insert("tokenDecimals".to_string(), 0.into()); + properties.insert("tokenSymbol".to_string(), "MINI".into()); + properties +} + +pub fn development_config() -> Result { + Ok(ChainSpec::builder(WASM_BINARY.expect("Development wasm not available"), Default::default()) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis()) + .with_properties(props()) + .build()) +} + +/// Configure initial storage state for FRAME pallets. +fn testnet_genesis() -> Value { + use frame::traits::Get; + use runtime::interface::{Balance, MinimumBalance}; + let endowment = >::get().max(1) * 1000; + let balances = AccountKeyring::iter() + .map(|a| (a.to_account_id(), endowment)) + .collect::>(); + json!({ + "balances": BalancesConfig { balances }, + "sudo": SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + }) +} diff --git a/substrate/bin/minimal/node/src/cli.rs b/substrate/bin/minimal/node/src/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..e464fa7d6caa36ce41b45ae79a3883713ef50aca --- /dev/null +++ b/substrate/bin/minimal/node/src/cli.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sc_cli::RunCmd; + +#[derive(Debug, Clone)] +pub enum Consensus { + ManualSeal(u64), + InstantSeal, +} + +impl std::str::FromStr for Consensus { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(if s == "instant-seal" { + Consensus::InstantSeal + } else if let Some(block_time) = s.strip_prefix("manual-seal-") { + Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) + } else { + return Err("incorrect consensus identifier".into()) + }) + } +} + +#[derive(Debug, clap::Parser)] +pub struct Cli { + #[command(subcommand)] + pub subcommand: Option, + + #[clap(long, default_value = "manual-seal-3000")] + pub consensus: Consensus, + + #[clap(flatten)] + pub run: RunCmd, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + /// Key management cli utilities + #[command(subcommand)] + Key(sc_cli::KeySubcommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Db meta columns information. + ChainInfo(sc_cli::ChainInfoCmd), +} diff --git a/substrate/bin/minimal/node/src/command.rs b/substrate/bin/minimal/node/src/command.rs new file mode 100644 index 0000000000000000000000000000000000000000..a985370c2d87f4d0e86191a79bd88db484953355 --- /dev/null +++ b/substrate/bin/minimal/node/src/command.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; +use sc_cli::SubstrateCli; +use sc_service::PartialComponents; + +#[cfg(feature = "try-runtime")] +use try_runtime_cli::block_building_info::timestamp_with_aura_info; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Substrate Node".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "support.anonymous.an".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + + match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, backend, None), task_manager)) + }) + }, + Some(Subcommand::ChainInfo(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(&config)) + }, + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + service::new_full(config, cli.consensus).map_err(sc_cli::Error::Service) + }) + }, + } +} diff --git a/substrate/bin/minimal/node/src/lib.rs b/substrate/bin/minimal/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2065def736aeeb3b1102bade04063532128f3cd --- /dev/null +++ b/substrate/bin/minimal/node/src/lib.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod chain_spec; +pub(crate) mod cli; +pub mod rpc; +pub mod service; diff --git a/substrate/bin/minimal/node/src/main.rs b/substrate/bin/minimal/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..900651fd1fdb83c6146b67490eb1bd6551a1f9b6 --- /dev/null +++ b/substrate/bin/minimal/node/src/main.rs @@ -0,0 +1,30 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; +mod rpc; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/substrate/bin/minimal/node/src/rpc.rs b/substrate/bin/minimal/node/src/rpc.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0c417a93d7aa6c8c8a6c6163628aac9845c6eb3 --- /dev/null +++ b/substrate/bin/minimal/node/src/rpc.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use jsonrpsee::RpcModule; +use runtime::interface::{AccountId, Nonce, OpaqueBlock}; +use sc_transaction_pool_api::TransactionPool; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use std::sync::Arc; +use substrate_frame_rpc_system::{System, SystemApiServer}; + +pub use sc_rpc_api::DenyUnsafe; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

= StorageValue<

::CounterPrefix, u32, ValueQuery>; +/// The numeric counter type. +pub type Counter = u32; + +type CounterFor

= + StorageValue<

::CounterPrefix, Counter, ValueQuery>; /// On removal logic for updating counter while draining upon some prefix with /// [`crate::storage::PrefixIterator`]. @@ -378,14 +427,14 @@ where /// can be very heavy, so use with caution. /// /// Returns the number of items in the map which is used to set the counter. - pub fn initialize_counter() -> u32 { - let count = Self::iter_values().count() as u32; + pub fn initialize_counter() -> Counter { + let count = Self::iter_values().count() as Counter; CounterFor::::set(count); count } /// Return the count. - pub fn count() -> u32 { + pub fn count() -> Counter { CounterFor::::get() } } @@ -1162,7 +1211,7 @@ mod test { StorageEntryMetadataIR { name: "counter_for_foo", modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: if cfg!(feature = "no-metadata-docs") { vec![] @@ -1173,4 +1222,15 @@ mod test { ] ); } + + #[docify::export] + #[test] + fn test_simple_count_works() { + type FooCountedMap = CountedStorageMap; + TestExternalities::default().execute_with(|| { + FooCountedMap::insert(1, 1); + FooCountedMap::insert(2, 2); + assert_eq!(FooCountedMap::count(), 2); + }); + } } diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index 5da31c05922541f0cbc7c987d09fa79894c02569..279894ee97363b38462f83e011f8fee9f0fc233b 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -33,8 +33,8 @@ use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; use sp_std::prelude::*; -/// A wrapper around a `StorageNMap` and a `StorageValue` to keep track of how many items -/// are in a map, without needing to iterate over all of the values. +/// A wrapper around a [`StorageNMap`] and a [`StorageValue`] (with the value being `u32`) to keep +/// track of how many items are in a map, without needing to iterate all the values. /// /// This storage item has some additional storage read and write overhead when manipulating values /// compared to a regular storage map. @@ -45,6 +45,49 @@ use sp_std::prelude::*; /// /// Whenever the counter needs to be updated, an additional read and write occurs to update that /// counter. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink CountedStorageNMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = CountedStorageNMap< +/// _, +/// ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// u64, +/// ValueQuery, +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = CountedStorageNMap< +/// Key = ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// Value = u64, +/// QueryKind = ValueQuery, +/// >; +/// } +/// ``` pub struct CountedStorageNMap< Prefix, Key, @@ -71,8 +114,10 @@ impl MapWrapper type Map = StorageNMap; } +type Counter = super::counted_map::Counter; + type CounterFor

= - StorageValue<

::CounterPrefix, u32, ValueQuery>; + StorageValue<

::CounterPrefix, Counter, ValueQuery>; /// On removal logic for updating counter while draining upon some prefix with /// [`crate::storage::PrefixIterator`]. @@ -429,7 +474,7 @@ where } /// Return the count. - pub fn count() -> u32 { + pub fn count() -> Counter { CounterFor::::get() } } diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index 519ffcbafadee09dbfbaf4f78ea76939cf34a64b..cb9479d491cff7206eb36996097e231439fd1769 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -27,26 +27,71 @@ use crate::{ StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store values for `(key1, key2)` couple. Similar to `StorageMap` but allow -/// to iterate and remove value associated to first key. +/// A type representing a *double map* in storage. This structure associates a pair of keys with a +/// value of a specified type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) +/// A double map with keys `k1` and `k2` can be likened to a +/// [`StorageMap`](frame_support::storage::types::StorageMap) with a key of type `(k1, k2)`. +/// However, a double map offers functions specific to each key, enabling partial iteration and +/// deletion based on one key alone. +/// +/// Also, conceptually, a double map is a special case of a +/// [`StorageNMap`](frame_support::storage::types::StorageNMap) using two keys. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Examples +/// +/// ### Kitchen-sink +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageDoubleMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageDoubleMap< +/// _, +/// Blake2_128Concat, +/// u8, +/// Twox64Concat, +/// u16, +/// u32, +/// ValueQuery +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = StorageDoubleMap< +/// Hasher1 = Blake2_128Concat, +/// Key1 = u8, +/// Hasher2 = Twox64Concat, +/// Key2 = u16, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } /// ``` /// -/// # Warning +/// ### Partial Iteration & Removal /// -/// If the key1s (or key2s) are not trusted (e.g. can be set by a user), a cryptographic `hasher` -/// such as `blake2_128_concat` must be used for Hasher1 (resp. Hasher2). Otherwise, other values -/// in storage can be compromised. +/// When `Hasher1` and `Hasher2` implement the +/// [`ReversibleStorageHasher`](frame_support::ReversibleStorageHasher) trait, the first key `k1` +/// can be used to partially iterate over keys and values of the double map, and to delete items. +#[doc = docify::embed!("src/storage/types/double_map.rs", example_double_map_partial_operations)] pub struct StorageDoubleMap< Prefix, Hasher1, @@ -411,6 +456,31 @@ where >::decode_len(key1, key2) } + /// Read the length of the storage value without decoding the entire value. + /// + /// `Value` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + /// + /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This + /// means that any duplicate items are included. + pub fn decode_non_dedup_len(key1: KArg1, key2: KArg2) -> Option + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Value: StorageDecodeNonDedupLength, + { + >::decode_non_dedup_len( + key1, key2, + ) + } + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and /// `OldHasher2` to the current hashers. /// @@ -742,6 +812,7 @@ mod test { use crate::{hash::*, storage::types::ValueQuery}; use sp_io::{hashing::twox_128, TestExternalities}; use sp_metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}; + use std::collections::BTreeSet; struct Prefix; impl StorageInstance for Prefix { @@ -972,4 +1043,30 @@ mod test { assert_eq!(A::drain_prefix(4).collect::>(), vec![]); }) } + + #[docify::export] + #[test] + fn example_double_map_partial_operations() { + type FooDoubleMap = + StorageDoubleMap; + + TestExternalities::default().execute_with(|| { + FooDoubleMap::insert(0, 0, 42); + FooDoubleMap::insert(0, 1, 43); + FooDoubleMap::insert(1, 0, 314); + + // should be equal to {0,1} (ordering is random) + let collected_k2_keys: BTreeSet<_> = FooDoubleMap::iter_key_prefix(0).collect(); + assert_eq!(collected_k2_keys, [0, 1].iter().copied().collect::>()); + + // should be equal to {42,43} (ordering is random) + let collected_k2_values: BTreeSet<_> = FooDoubleMap::iter_prefix_values(0).collect(); + assert_eq!(collected_k2_values, [42, 43].iter().copied().collect::>()); + + // Remove items from the map using k1 = 0 + let _ = FooDoubleMap::clear_prefix(0, u32::max_value(), None); + // Values associated with (0, _) should have been removed + assert_eq!(FooDoubleMap::iter_prefix(0).collect::>(), vec![]); + }); + } } diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index 7f936a8a35a61c4d8d23aa3c1d0aebe315ee6229..d0149cf3fc87bdca1fb519317bce508579ef14a9 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -27,23 +27,50 @@ use crate::{ StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store value for given key. Allowing to insert/remove/iterate on values. +/// A type representing a *map* in storage. A *storage map* is a mapping of keys to values of a +/// given type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key)) -/// ``` +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example /// -/// # Warning +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageMap< +/// _, +/// Blake2_128Concat, +/// u32, +/// u32, +/// ValueQuery +/// >; /// -/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as -/// `blake2_128_concat` must be used. Otherwise, other values in storage can be compromised. +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = StorageMap< +/// Hasher = Blake2_128Concat, +/// Key = u32, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } +/// ``` pub struct StorageMap< Prefix, Hasher, @@ -259,6 +286,27 @@ where >::decode_len(key) } + /// Read the length of the storage value without decoding the entire value. + /// + /// `Value` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + /// + /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This + /// means that any duplicate items are included. + pub fn decode_non_dedup_len>(key: KeyArg) -> Option + where + Value: StorageDecodeNonDedupLength, + { + >::decode_non_dedup_len(key) + } + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. /// /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index c7f2557099b36a3b740b6038694401b35838d396..9dd6f4066e432068b97899e94017bb06fe0dc014 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -30,7 +30,7 @@ mod map; mod nmap; mod value; -pub use counted_map::{CountedStorageMap, CountedStorageMapInstance}; +pub use counted_map::{CountedStorageMap, CountedStorageMapInstance, Counter}; pub use counted_nmap::{CountedStorageNMap, CountedStorageNMapInstance}; pub use double_map::StorageDoubleMap; pub use key::{ @@ -43,13 +43,17 @@ pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. /// -/// It is implemented by: -/// * `OptionQuery` which converts an optional value to an optional value, used when querying +/// It is implemented most notable by: +/// +/// * [`OptionQuery`] which converts an optional value to an optional value, used when querying /// storage returns an optional value. -/// * `ResultQuery` which converts an optional value to a result value, used when querying storage +/// * [`ResultQuery`] which converts an optional value to a result value, used when querying storage /// returns a result value. -/// * `ValueQuery` which converts an optional value to a value, used when querying storage returns a -/// value. +/// * [`ValueQuery`] which converts an optional value to a value, used when querying storage returns +/// a value. +/// +/// ## Example +#[doc = docify::embed!("src/storage/types/mod.rs", value_query_examples)] pub trait QueryKindTrait { /// Metadata for the storage kind. const METADATA: StorageEntryModifierIR; @@ -65,11 +69,10 @@ pub trait QueryKindTrait { fn from_query_to_optional_value(v: Self::Query) -> Option; } -/// Implement QueryKindTrait with query being `Option` +/// Implements [`QueryKindTrait`] with `Query` type being `Option<_>`. /// -/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be -/// returned when no value is found. To use another `OnEmpty` implementation, `ValueQuery` can be -/// used instead. +/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be returned when no +/// value is found. To use another `OnEmpty` implementation, `ValueQuery` can be used instead. pub struct OptionQuery; impl QueryKindTrait for OptionQuery where @@ -89,7 +92,7 @@ where } } -/// Implement QueryKindTrait with query being `Result` +/// Implements [`QueryKindTrait`] with `Query` type being `Result`. pub struct ResultQuery(sp_std::marker::PhantomData); impl QueryKindTrait for ResultQuery where @@ -113,7 +116,7 @@ where } } -/// Implement QueryKindTrait with query being `Value` +/// Implements [`QueryKindTrait`] with `Query` type being `Value`. pub struct ValueQuery; impl QueryKindTrait for ValueQuery where @@ -140,3 +143,60 @@ pub trait StorageEntryMetadataBuilder { /// Build into `entries` the storage metadata entries of a storage given some `docs`. fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); } + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + storage::types::ValueQuery, + traits::{Get, StorageInstance}, + }; + use sp_io::TestExternalities; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + #[docify::export] + #[test] + pub fn value_query_examples() { + /// Custom default impl to be used with `ValueQuery`. + struct UniverseSecret; + impl Get for UniverseSecret { + fn get() -> u32 { + 42 + } + } + + /// Custom default impl to be used with `ResultQuery`. + struct GetDefaultForResult; + impl Get> for GetDefaultForResult { + fn get() -> Result { + Err(()) + } + } + + type A = StorageValue; + type B = StorageValue; + type C = StorageValue, GetDefaultForResult>; + type D = StorageValue; + + TestExternalities::default().execute_with(|| { + // normal value query returns default + assert_eq!(A::get(), 0); + + // option query returns none + assert_eq!(B::get(), None); + + // result query returns error + assert_eq!(C::get(), Err(())); + + // value query with custom onempty returns 42 + assert_eq!(D::get(), 42); + }); + } +} diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 406fd42eaf7b3773e960f4489d5ede2b090ca0a2..0723db68900273ff729e8d7cceb4e408e18a00e3 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -33,24 +33,54 @@ use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_runtime::SaturatedConversion; use sp_std::prelude::*; -/// A type that allow to store values for an arbitrary number of keys in the form of -/// `(Key, Key, ..., Key)`. +/// A type representing an *NMap* in storage. This structure associates an arbitrary number of keys +/// with a value of a specified type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) -/// ++ ... -/// ++ HasherN(encode(keyN)) -/// ``` +/// For example, [`StorageDoubleMap`](frame_support::storage::types::StorageDoubleMap) is a special +/// case of an *NMap* with N = 2. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. /// -/// # Warning +/// # Example /// -/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` -/// such as `blake2_128_concat` must be used for the key hashers. Otherwise, other values -/// in storage can be compromised. +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageNMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageNMap< +/// _, +/// ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// u64, +/// ValueQuery, +/// >; +/// +/// /// Named alternative syntax. +/// #[pallet::storage] +/// pub type Bar = StorageNMap< +/// Key = ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// Value = u64, +/// QueryKind = ValueQuery, +/// >; +/// } +/// ``` pub struct StorageNMap< Prefix, Key, diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 3e1f2fe9551d3578678959b09b9a4c60fa962c07..263091dd25237da759c54a0a7668306e5850aec9 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -23,18 +23,44 @@ use crate::{ types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, StorageAppend, StorageDecodeLength, StorageTryAppend, }, - traits::{GetDefault, StorageInfo, StorageInstance}, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store a value. +/// A type representing a *value* in storage. A *storage value* is a single value of a given type +/// stored on-chain. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageValue, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageValue<_, u32,ValueQuery>; +/// +/// /// Named alternative syntax. +/// #[pallet::storage] +/// pub type Bar = StorageValue< +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } /// ``` pub struct StorageValue( core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)>, @@ -46,7 +72,7 @@ where Prefix: StorageInstance, Value: FullCodec, QueryKind: QueryKindTrait, - OnEmpty: crate::traits::Get + 'static, + OnEmpty: Get + 'static, { type Query = QueryKind::Query; fn pallet_prefix() -> &'static [u8] { @@ -208,6 +234,27 @@ where >::decode_len() } + /// Read the length of the storage value without decoding the entire value. + /// + /// `Value` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + /// + /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This + /// means that any duplicate items are included. + pub fn decode_non_dedup_len() -> Option + where + Value: StorageDecodeNonDedupLength, + { + >::decode_non_dedup_len() + } + /// Try and append the given item to the value in the storage. /// /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 2179ee38f8481e19ce2ba3ccc83a16415a8b25c3..6362e750d2ab98e5ae08e4e03ed57b72885c1f6f 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -27,14 +27,15 @@ pub use tokens::{ }, fungible, fungibles, imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, - nonfungible, nonfungibles, BalanceStatus, ExistenceRequirement, Locker, WithdrawReasons, + nonfungible, nonfungible_v2, nonfungibles, nonfungibles_v2, BalanceStatus, + ExistenceRequirement, Locker, WithdrawReasons, }; mod members; #[allow(deprecated)] pub use members::{AllowAll, DenyAll, Filter}; pub use members::{ - AsContains, ChangeMembers, Contains, ContainsLengthBound, ContainsPair, Everything, + AsContains, ChangeMembers, Contains, ContainsLengthBound, ContainsPair, Equals, Everything, EverythingBut, FromContainsPair, InitializeMembers, InsideBoth, IsInVec, Nothing, RankedMembers, SortedMembers, TheseExcept, }; @@ -60,7 +61,7 @@ pub use misc::{ DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, - TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, + TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -83,8 +84,8 @@ mod hooks; #[allow(deprecated)] pub use hooks::GenesisBuild; pub use hooks::{ - BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, - OnRuntimeUpgrade, OnTimestampSet, + BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, + OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, }; pub mod schedule; @@ -112,7 +113,8 @@ pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePre mod messages; pub use messages::{ EnqueueMessage, EnqueueWithOrigin, ExecuteOverweightError, HandleMessage, NoopServiceQueues, - ProcessMessage, ProcessMessageError, QueuePausedQuery, ServiceQueues, TransformOrigin, + ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, + TransformOrigin, }; mod safe_mode; @@ -124,4 +126,7 @@ pub use tx_pause::{TransactionPause, TransactionPauseError}; #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] -pub use try_runtime::{Select as TryStateSelect, TryState, UpgradeCheckSelect}; +pub use try_runtime::{ + Select as TryStateSelect, TryDecodeEntireStorage, TryDecodeEntireStorageError, TryState, + UpgradeCheckSelect, +}; diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 6acecb8928d8fa76ab122b299d27f5e626c0e6f3..20788ce932d8b397990767c6ae8a0d82016cccbd 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -99,6 +99,19 @@ pub trait OnGenesis { fn on_genesis() {} } +/// Implemented by pallets, allows defining logic to run prior to any [`OnRuntimeUpgrade`] logic. +/// +/// This hook is intended to be used internally in FRAME and not be exposed to FRAME developers. +/// +/// It is defined as a seperate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public +/// API. +pub trait BeforeAllRuntimeMigrations { + /// Something that should happen before runtime migrations are executed. + fn before_all_runtime_migrations() -> Weight { + Weight::zero() + } +} + /// See [`Hooks::on_runtime_upgrade`]. pub trait OnRuntimeUpgrade { /// See [`Hooks::on_runtime_upgrade`]. @@ -150,10 +163,24 @@ pub trait OnRuntimeUpgrade { } } +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl BeforeAllRuntimeMigrations for Tuple { + /// Implements the default behavior of + /// [`BeforeAllRuntimeMigrations::before_all_runtime_migrations`] for tuples. + fn before_all_runtime_migrations() -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple::before_all_runtime_migrations()); )* ); + weight + } +} + #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { + /// Implements the default behavior of [`OnRuntimeUpgrade::on_runtime_upgrade`] for tuples. fn on_runtime_upgrade() -> Weight { let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); diff --git a/substrate/frame/support/src/traits/members.rs b/substrate/frame/support/src/traits/members.rs index fbba742ebeb4395b4ec17ad6656956929f8fec76..d667eaa7e9d8d2b46b39254fccf5e0c6052c9751 100644 --- a/substrate/frame/support/src/traits/members.rs +++ b/substrate/frame/support/src/traits/members.rs @@ -137,6 +137,14 @@ impl, Those: ContainsPair> ContainsPair(PhantomData); +impl> Contains for Equals { + fn contains(t: &X) -> bool { + t == &T::get() + } +} + /// Create a type which implements the `Contains` trait for a particular type with syntax similar /// to `matches!`. #[macro_export] diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 0db163e072b179100a44c0c39dcc76b35c02b997..58815b107c829845a8a1a8df6c82a7dde4203ff4 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -116,6 +116,15 @@ impl ServiceQueues for NoopServiceQueues { } } +/// The resource footprint of a queue. +#[derive(Default, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +pub struct QueueFootprint { + /// The number of pages in the queue (including overweight pages). + pub pages: u32, + /// The storage footprint of the queue (including overweight messages). + pub storage: Footprint, +} + /// Can enqueue messages for multiple origins. pub trait EnqueueMessage { /// The maximal length any enqueued message may have. @@ -134,7 +143,7 @@ pub trait EnqueueMessage { fn sweep_queue(origin: Origin); /// Return the state footprint of the given queue. - fn footprint(origin: Origin) -> Footprint; + fn footprint(origin: Origin) -> QueueFootprint; } impl EnqueueMessage for () { @@ -146,8 +155,8 @@ impl EnqueueMessage for () { ) { } fn sweep_queue(_: Origin) {} - fn footprint(_: Origin) -> Footprint { - Footprint::default() + fn footprint(_: Origin) -> QueueFootprint { + QueueFootprint::default() } } @@ -173,7 +182,7 @@ impl, O: MaxEncodedLen, N: MaxEncodedLen, C: Convert> E::sweep_queue(C::convert(origin)); } - fn footprint(origin: N) -> Footprint { + fn footprint(origin: N) -> QueueFootprint { E::footprint(C::convert(origin)) } } @@ -195,7 +204,7 @@ pub trait HandleMessage { fn sweep_queue(); /// Return the state footprint of the queue. - fn footprint() -> Footprint; + fn footprint() -> QueueFootprint; } /// Adapter type to transform an [`EnqueueMessage`] with an origin into a [`HandleMessage`] impl. @@ -220,7 +229,7 @@ where E::sweep_queue(O::get()); } - fn footprint() -> Footprint { + fn footprint() -> QueueFootprint { E::footprint(O::get()) } } @@ -231,8 +240,14 @@ pub trait QueuePausedQuery { fn is_paused(origin: &Origin) -> bool; } -impl QueuePausedQuery for () { - fn is_paused(_: &Origin) -> bool { +#[impl_trait_for_tuples::impl_for_tuples(8)] +impl QueuePausedQuery for Tuple { + fn is_paused(origin: &Origin) -> bool { + for_tuples!( #( + if Tuple::is_paused(origin) { + return true; + } + )* ); false } } diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index bd29b600916138828d7b78a4f3c4a416598c4f3b..0af8d06719fe3543f95fcbd638f4340b321aae49 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -222,6 +222,23 @@ impl StorageVersion { crate::storage::unhashed::get_or_default(&key) } + + /// Returns if the storage version key for the given pallet exists in storage. + /// + /// See [`STORAGE_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. + /// + /// # Panics + /// + /// This function will panic iff `Pallet` can not be found by `PalletInfo`. + /// In a runtime that is put together using + /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// + /// It will also panic if this function isn't executed in an externalities + /// provided environment. + pub fn exists() -> bool { + let key = Self::storage_key::

(); + crate::storage::unhashed::exists(&key) + } } impl PartialEq for StorageVersion { diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index eb704de4353c7318ce3ae709c60b0a9dc1fafe76..78032cc0a9407cb2505863ebac39c3f1bd0aa4f6 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -36,6 +36,18 @@ pub const DEFENSIVE_OP_PUBLIC_ERROR: &str = "a defensive failure has been trigge #[doc(hidden)] pub const DEFENSIVE_OP_INTERNAL_ERROR: &str = "Defensive failure has been triggered!"; +/// Trait to get the number of variants in any enum. +/// +/// NOTE: can be removed once is stable. +pub trait VariantCount { + /// Get the number of variants. + const VARIANT_COUNT: u32; +} + +impl VariantCount for () { + const VARIANT_COUNT: u32 = 0; +} + /// Generic function to mark an execution path as ONLY defensive. /// /// Similar to mark a match arm or `if/else` branch as `unreachable!`. @@ -43,7 +55,7 @@ pub const DEFENSIVE_OP_INTERNAL_ERROR: &str = "Defensive failure has been trigge macro_rules! defensive { () => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR ); @@ -51,7 +63,7 @@ macro_rules! defensive { }; ($error:expr $(,)?) => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, $error @@ -60,7 +72,7 @@ macro_rules! defensive { }; ($error:expr, $proof:expr $(,)?) => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}: {:?}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, $error, diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs index fe1b9bf13bb0209be2f44d5017e2a1af8345b58d..9e467aea4220dd7296a3ccad8236f122ff39e7d8 100644 --- a/substrate/frame/support/src/traits/storage.rs +++ b/substrate/frame/support/src/traits/storage.rs @@ -93,9 +93,7 @@ pub trait StorageInstance { } /// Metadata about storage from the runtime. -#[derive( - codec::Encode, codec::Decode, RuntimeDebug, Eq, PartialEq, Clone, scale_info::TypeInfo, -)] +#[derive(Debug, codec::Encode, codec::Decode, Eq, PartialEq, Clone, scale_info::TypeInfo)] pub struct StorageInfo { /// Encoded string of pallet name. pub pallet_name: Vec, diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index 32a63fd25b291b9f77dd567f0b369cb4c97eef7f..995797bc8f66b10861a68831984302057ecfbe34 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -113,6 +113,13 @@ impl, OppositeOnDrop: HandleImbalance sp_std::mem::forget(self); (Imbalance::new(first), Imbalance::new(second)) } + + fn extract(&mut self, amount: B) -> Self { + let new = self.amount.min(amount); + self.amount = self.amount - new; + Imbalance::new(new) + } + fn merge(mut self, other: Self) -> Self { self.amount = self.amount.saturating_add(other.amount); sp_std::mem::forget(other); diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 88b9de7fdbf6461e3c89029a4d6d20734d95f9e9..636866ab93c9ba76379a9fe6b43a371e0819ea9c 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -219,7 +219,7 @@ impl< impl< F: fungibles::Mutate, A: Get<>::AssetId>, - AccountId, + AccountId: Eq, > Mutate for ItemOf { fn mint_into(who: &AccountId, amount: Self::Balance) -> Result { @@ -354,7 +354,7 @@ impl< Balance, AssetIdType, AssetId: Get, - Handler: crate::traits::tokens::fungibles::HandleImbalanceDrop, + Handler: crate::traits::fungibles::HandleImbalanceDrop, > HandleImbalanceDrop for ConvertImbalanceDropHandler { diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index fe2a1f2a14a6604bee2aaa85f80a3349d3b80043..f2fb5c5f7c24e4fa3517c0f4dd331e874512e36e 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -235,7 +235,10 @@ pub trait Unbalanced: Inspect { } /// Trait for providing a basic fungible asset. -pub trait Mutate: Inspect + Unbalanced { +pub trait Mutate: Inspect + Unbalanced +where + AccountId: Eq, +{ /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't /// possible then an `Err` is returned and nothing is changed. fn mint_into(who: &AccountId, amount: Self::Balance) -> Result { @@ -303,6 +306,9 @@ pub trait Mutate: Inspect + Unbalanced { } /// Transfer funds from one account into another. + /// + /// A transfer where the source and destination account are identical is treated as No-OP after + /// checking the preconditions. fn transfer( source: &AccountId, dest: &AccountId, @@ -311,6 +317,10 @@ pub trait Mutate: Inspect + Unbalanced { ) -> Result { let _extra = Self::can_withdraw(source, amount).into_result(preservation != Expendable)?; Self::can_deposit(dest, amount, Extant).into_result()?; + if source == dest { + return Ok(amount) + } + Self::decrease_balance(source, amount, BestEffort, preservation, Polite)?; // This should never fail as we checked `can_deposit` earlier. But we do a best-effort // anyway. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 9f660a5f16894bc00c6eae6cd49eb4218c3f10e8..7c0d7721a2e60908f1a5a4be8a8c876ff2bd1ae2 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -109,6 +109,15 @@ impl< sp_std::mem::forget(self); (Imbalance::new(asset.clone(), first), Imbalance::new(asset, second)) } + + /// Mutate `self` by extracting a new instance with at most `amount` value, reducing `self` + /// accordingly. + pub fn extract(&mut self, amount: B) -> Self { + let new = self.amount.min(amount); + self.amount = self.amount - new; + Imbalance::new(self.asset.clone(), new) + } + pub fn merge(mut self, other: Self) -> Result { if self.asset == other.asset { self.amount = self.amount.saturating_add(other.amount); diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index 7c39acdf42416d47cb27b880477e52136a97a2e6..a2fc4e55095222257953099af7b5795da9a2321a 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -250,7 +250,10 @@ pub trait Unbalanced: Inspect { } /// Trait for providing a basic fungible asset. -pub trait Mutate: Inspect + Unbalanced { +pub trait Mutate: Inspect + Unbalanced +where + AccountId: Eq, +{ /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't /// possible then an `Err` is returned and nothing is changed. fn mint_into( @@ -353,6 +356,9 @@ pub trait Mutate: Inspect + Unbalanced { } /// Transfer funds from one account into another. + /// + /// A transfer where the source and destination account are identical is treated as No-OP after + /// checking the preconditions. fn transfer( asset: Self::AssetId, source: &AccountId, @@ -363,6 +369,10 @@ pub trait Mutate: Inspect + Unbalanced { let _extra = Self::can_withdraw(asset.clone(), source, amount) .into_result(preservation != Expendable)?; Self::can_deposit(asset.clone(), dest, amount, Extant).into_result()?; + if source == dest { + return Ok(amount) + } + Self::decrease_balance(asset.clone(), source, amount, BestEffort, preservation, Polite)?; // This should never fail as we checked `can_deposit` earlier. But we do a best-effort // anyway. diff --git a/substrate/frame/support/src/traits/tokens/imbalance.rs b/substrate/frame/support/src/traits/tokens/imbalance.rs index 403321725042c6899c16d90f1ba1652965a336cb..9dd8531324dc9d4a2845fd40b09a0d32746a99b6 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance.rs @@ -25,7 +25,7 @@ use sp_std::ops::Div; mod on_unbalanced; mod signed_imbalance; mod split_two_ways; -pub use on_unbalanced::OnUnbalanced; +pub use on_unbalanced::{OnUnbalanced, ResolveAssetTo, ResolveTo}; pub use signed_imbalance::SignedImbalance; pub use split_two_ways::SplitTwoWays; @@ -72,6 +72,10 @@ pub trait Imbalance: Sized + TryDrop + Default { /// is guaranteed to be at most `amount` and the second will be the remainder. fn split(self, amount: Balance) -> (Self, Self); + /// Mutate `self` by extracting a new instance with at most `amount` value, reducing `self` + /// accordingly. + fn extract(&mut self, amount: Balance) -> Self; + /// Consume `self` and return two independent instances; the amounts returned will be in /// approximately the same ratio as `first`:`second`. /// @@ -190,6 +194,9 @@ impl Imbalance for () { fn split(self, _: Balance) -> (Self, Self) { ((), ()) } + fn extract(&mut self, _: Balance) -> Self { + () + } fn ration(self, _: u32, _: u32) -> (Self, Self) where Balance: From + Saturating + Div, diff --git a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs index 27bfe46e181e2255204ce4b377bd6a561e8bbdfc..ecb8de8841f91ca65a05eca687da5eaa8164a4a2 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -17,7 +17,9 @@ //! Trait for handling imbalances. -use crate::traits::misc::TryDrop; +use frame_support::traits::{fungible, fungibles, misc::TryDrop}; +use sp_core::TypedGet; +use sp_std::marker::PhantomData; /// Handler for when some currency "account" decreased in balance for /// some reason. @@ -53,3 +55,29 @@ pub trait OnUnbalanced { } impl OnUnbalanced for () {} + +/// Resolves received asset credit to account `A`, implementing [`OnUnbalanced`]. +/// +/// Credits that cannot be resolved to account `A` are dropped. This may occur if the account for +/// address `A` does not exist and the existential deposit requirement is not met. +pub struct ResolveTo(PhantomData<(A, F)>); +impl> OnUnbalanced> + for ResolveTo +{ + fn on_nonzero_unbalanced(credit: fungible::Credit) { + let _ = F::resolve(&A::get(), credit).map_err(|c| drop(c)); + } +} + +/// Resolves received asset credit to account `A`, implementing [`OnUnbalanced`]. +/// +/// Credits that cannot be resolved to account `A` are dropped. This may occur if the account for +/// address `A` does not exist and the existential deposit requirement is not met. +pub struct ResolveAssetTo(PhantomData<(A, F)>); +impl> OnUnbalanced> + for ResolveAssetTo +{ + fn on_nonzero_unbalanced(credit: fungibles::Credit) { + let _ = F::resolve(&A::get(), credit).map_err(|c| drop(c)); + } +} diff --git a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs index c4463e0070f9a1df2183e982043e63cd309180ce..05f76e2859d2ead6b998d0c63304e2fa818db7a2 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -119,7 +119,7 @@ pub trait InspectEnumerable: Inspect { } /// Trait for providing an interface for NFT-like items which may be minted, burned and/or have -/// attributes set on them. +/// attributes and metadata set on them. pub trait Mutate: Inspect { /// Mint some `item` to be owned by `who`. /// @@ -158,6 +158,13 @@ pub trait Mutate: Inspect { key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(item, k, v))) } + /// Set the metadata `data` of an `item`. + /// + /// By default, this is not a supported operation. + fn set_metadata(_who: &AccountId, _item: &Self::ItemId, _data: &[u8]) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + /// Clear attribute of `item`'s `key`. /// /// By default, this is not a supported operation. @@ -171,6 +178,13 @@ pub trait Mutate: Inspect { fn clear_typed_attribute(item: &Self::ItemId, key: &K) -> DispatchResult { key.using_encoded(|k| Self::clear_attribute(item, k)) } + + /// Clear the metadata of an `item`. + /// + /// By default, this is not a supported operation. + fn clear_metadata(_who: &AccountId, _item: &Self::ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } } /// Trait for transferring and controlling the transfer of non-fungible sets of items. @@ -212,7 +226,7 @@ impl< >::custom_attribute(account, &A::get(), item, key) } fn system_attribute(item: &Self::ItemId, key: &[u8]) -> Option> { - >::system_attribute(&A::get(), item, key) + >::system_attribute(&A::get(), Some(item), key) } fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { >::typed_attribute(&A::get(), item, key) @@ -230,7 +244,7 @@ impl< ) } fn typed_system_attribute(item: &Self::ItemId, key: &K) -> Option { - >::typed_system_attribute(&A::get(), item, key) + >::typed_system_attribute(&A::get(), Some(item), key) } fn can_transfer(item: &Self::ItemId) -> bool { >::can_transfer(&A::get(), item) diff --git a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs index ec064bdebf62ac2a356787f8fc302f275448116b..c0209b6d5123dbefb8e813d11f6478b00c9e82bb 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -75,12 +75,14 @@ pub trait Inspect { None } - /// Returns the system attribute value of `item` of `collection` corresponding to `key`. + /// Returns the system attribute value of `item` of `collection` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the system attribute value of `collection` + /// corresponding to `key`. /// /// By default this is `None`; no attributes are defined. fn system_attribute( _collection: &Self::CollectionId, - _item: &Self::ItemId, + _item: Option<&Self::ItemId>, _key: &[u8], ) -> Option> { None @@ -113,13 +115,14 @@ pub trait Inspect { .and_then(|v| V::decode(&mut &v[..]).ok()) } - /// Returns the strongly-typed system attribute value of `item` of `collection` corresponding to - /// `key`. + /// Returns the strongly-typed system attribute value of `item` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the strongly-typed system attribute value of + /// `collection` corresponding to `key`. /// /// By default this just attempts to use `system_attribute`. fn typed_system_attribute( collection: &Self::CollectionId, - item: &Self::ItemId, + item: Option<&Self::ItemId>, key: &K, ) -> Option { key.using_encoded(|d| Self::system_attribute(collection, item, d)) @@ -233,7 +236,7 @@ pub trait Destroy: Inspect { } /// Trait for providing an interface for multiple collections of NFT-like items which may be -/// minted, burned and/or have attributes set on them. +/// minted, burned and/or have attributes and metadata set on them. pub trait Mutate: Inspect { /// Mint some `item` of `collection` to be owned by `who`. /// @@ -307,6 +310,29 @@ pub trait Mutate: Inspect { }) } + /// Set the metadata `data` of an `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn set_item_metadata( + _who: Option<&AccountId>, + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _data: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set the metadata `data` of a `collection`. + /// + /// By default, this is not a supported operation. + fn set_collection_metadata( + _who: Option<&AccountId>, + _collection: &Self::CollectionId, + _data: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + /// Clear attribute of `item` of `collection`'s `key`. /// /// By default, this is not a supported operation. @@ -345,6 +371,27 @@ pub trait Mutate: Inspect { ) -> DispatchResult { key.using_encoded(|k| Self::clear_collection_attribute(collection, k)) } + + /// Clear the metadata of an `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn clear_item_metadata( + _who: Option<&AccountId>, + _collection: &Self::CollectionId, + _item: &Self::ItemId, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Clear the metadata of a `collection`. + /// + /// By default, this is not a supported operation. + fn clear_collection_metadata( + _who: Option<&AccountId>, + _collection: &Self::CollectionId, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } } /// Trait for transferring non-fungible sets of items. @@ -370,3 +417,27 @@ pub trait Transfer: Inspect { Err(TokenError::Unsupported.into()) } } + +/// Trait for trading non-fungible items. +pub trait Trading: Inspect { + /// Allows `buyer` to buy an `item` of `collection` if it's up for sale with a `bid_price` to + /// pay. + fn buy_item( + collection: &Self::CollectionId, + item: &Self::ItemId, + buyer: &AccountId, + bid_price: &ItemPrice, + ) -> DispatchResult; + + /// Sets the item price for `item` to make it available for sale. + fn set_price( + collection: &Self::CollectionId, + item: &Self::ItemId, + sender: &AccountId, + price: Option, + whitelisted_buyer: Option, + ) -> DispatchResult; + + /// Returns the item price of `item` or `None` if the item is not for sale. + fn item_price(collection: &Self::CollectionId, item: &Self::ItemId) -> Option; +} diff --git a/substrate/frame/support/src/traits/tokens/pay.rs b/substrate/frame/support/src/traits/tokens/pay.rs index 18af7e5e54838020092a2fee52fe1a78a644858a..4d1d80b5b507f178e1192b7eed6c21e9800ee507 100644 --- a/substrate/frame/support/src/traits/tokens/pay.rs +++ b/substrate/frame/support/src/traits/tokens/pay.rs @@ -82,8 +82,13 @@ pub enum PaymentStatus { } /// Simple implementation of `Pay` which makes a payment from a "pot" - i.e. a single account. -pub struct PayFromAccount(sp_std::marker::PhantomData<(F, A)>); -impl> Pay for PayFromAccount { +pub struct PayFromAccount(core::marker::PhantomData<(F, A)>); +impl Pay for PayFromAccount +where + A: TypedGet, + F: fungible::Mutate, + A::Type: Eq, +{ type Balance = F::Balance; type Beneficiary = A::Type; type AssetKind = (); @@ -110,11 +115,12 @@ impl> Pay for PayFromAccount { /// Simple implementation of `Pay` for assets which makes a payment from a "pot" - i.e. a single /// account. -pub struct PayAssetFromAccount(sp_std::marker::PhantomData<(F, A)>); +pub struct PayAssetFromAccount(core::marker::PhantomData<(F, A)>); impl frame_support::traits::tokens::Pay for PayAssetFromAccount where A: TypedGet, F: fungibles::Mutate + fungibles::Create, + A::Type: Eq, { type Balance = F::Balance; type Beneficiary = A::Type; diff --git a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs new file mode 100644 index 0000000000000000000000000000000000000000..afbf291a0bbb92eed7b461110c3f33b43c3a9de3 --- /dev/null +++ b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs @@ -0,0 +1,498 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types to check that the entire storage can be decoded. + +use super::StorageInstance; +use crate::{ + storage::types::{ + CountedStorageMapInstance, CountedStorageNMapInstance, Counter, KeyGenerator, + QueryKindTrait, + }, + traits::{PartialStorageInfoTrait, StorageInfo}, + StorageHasher, +}; +use codec::{Decode, DecodeAll, FullCodec}; +use impl_trait_for_tuples::impl_for_tuples; +use sp_core::Get; +use sp_std::prelude::*; + +/// Decode the entire data under the given storage type. +/// +/// For values, this is trivial. For all kinds of maps, it should decode all the values associated +/// with all keys existing in the map. +/// +/// Tuple implementations are provided and simply decode each type in the tuple, summing up the +/// decoded bytes if `Ok(_)`. +pub trait TryDecodeEntireStorage { + /// Decode the entire data under the given storage, returning `Ok(bytes_decoded)` if success. + fn try_decode_entire_state() -> Result>; +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl TryDecodeEntireStorage for Tuple { + fn try_decode_entire_state() -> Result> { + let mut errors = Vec::new(); + let mut len = 0usize; + + for_tuples!(#( + match Tuple::try_decode_entire_state() { + Ok(bytes) => len += bytes, + Err(errs) => errors.extend(errs), + } + )*); + + if errors.is_empty() { + Ok(len) + } else { + Err(errors) + } + } +} + +/// A value could not be decoded. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TryDecodeEntireStorageError { + /// The key of the undecodable value. + pub key: Vec, + /// The raw value. + pub raw: Option>, + /// The storage info of the key. + pub info: StorageInfo, +} + +impl core::fmt::Display for TryDecodeEntireStorageError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Failed to decode storage item `{}::{}`", + &sp_std::str::from_utf8(&self.info.pallet_name).unwrap_or(""), + &sp_std::str::from_utf8(&self.info.storage_name).unwrap_or(""), + ) + } +} + +/// Decode all the values based on the prefix of `info` to `V`. +/// +/// Basically, it decodes and sums up all the values who's key start with `info.prefix`. For values, +/// this would be the value itself. For all sorts of maps, this should be all map items in the +/// absence of key collision. +fn decode_storage_info( + info: StorageInfo, +) -> Result> { + let mut next_key = info.prefix.clone(); + let mut decoded = 0; + + let decode_key = |key: &[u8]| match sp_io::storage::get(key) { + None => Ok(0), + Some(bytes) => { + let len = bytes.len(); + let _ = ::decode_all(&mut bytes.as_ref()).map_err(|_| { + vec![TryDecodeEntireStorageError { + key: key.to_vec(), + raw: Some(bytes.to_vec()), + info: info.clone(), + }] + })?; + + Ok::>(len) + }, + }; + + decoded += decode_key(&next_key)?; + loop { + match sp_io::storage::next_key(&next_key) { + Some(key) if key.starts_with(&info.prefix) => { + decoded += decode_key(&key)?; + next_key = key; + }, + _ => break, + } + } + + Ok(decoded) +} + +impl TryDecodeEntireStorage + for crate::storage::types::StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, +{ + fn try_decode_entire_state() -> Result> { + let info = Self::partial_storage_info() + .first() + .cloned() + .expect("Value has only one storage info; qed"); + decode_storage_info::(info) + } +} + +impl TryDecodeEntireStorage + for crate::storage::types::StorageMap +where + Prefix: StorageInstance, + Hasher: StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn try_decode_entire_state() -> Result> { + let info = Self::partial_storage_info() + .first() + .cloned() + .expect("Map has only one storage info; qed"); + decode_storage_info::(info) + } +} + +impl TryDecodeEntireStorage + for crate::storage::types::CountedStorageMap< + Prefix, + Hasher, + Key, + Value, + QueryKind, + OnEmpty, + MaxValues, + > where + Prefix: CountedStorageMapInstance, + Hasher: StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn try_decode_entire_state() -> Result> { + let (map_info, counter_info) = match &Self::partial_storage_info()[..] { + [a, b] => (a.clone(), b.clone()), + _ => panic!("Counted map has two storage info items; qed"), + }; + let mut decoded = decode_storage_info::(counter_info)?; + decoded += decode_storage_info::(map_info)?; + Ok(decoded) + } +} + +impl + TryDecodeEntireStorage + for crate::storage::types::StorageDoubleMap< + Prefix, + Hasher1, + Key1, + Hasher2, + Key2, + Value, + QueryKind, + OnEmpty, + MaxValues, + > where + Prefix: StorageInstance, + Hasher1: StorageHasher, + Key1: FullCodec, + Hasher2: StorageHasher, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn try_decode_entire_state() -> Result> { + let info = Self::partial_storage_info() + .first() + .cloned() + .expect("Double-map has only one storage info; qed"); + decode_storage_info::(info) + } +} + +impl TryDecodeEntireStorage + for crate::storage::types::StorageNMap +where + Prefix: StorageInstance, + Key: KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn try_decode_entire_state() -> Result> { + let info = Self::partial_storage_info() + .first() + .cloned() + .expect("N-map has only one storage info; qed"); + decode_storage_info::(info) + } +} + +impl TryDecodeEntireStorage + for crate::storage::types::CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn try_decode_entire_state() -> Result> { + let (map_info, counter_info) = match &Self::partial_storage_info()[..] { + [a, b] => (a.clone(), b.clone()), + _ => panic!("Counted NMap has two storage info items; qed"), + }; + + let mut decoded = decode_storage_info::(counter_info)?; + decoded += decode_storage_info::(map_info)?; + Ok(decoded) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + storage::types::{self, CountedStorageMapInstance, CountedStorageNMapInstance, Key}, + Blake2_128Concat, + }; + + type H = Blake2_128Concat; + + macro_rules! build_prefix { + ($name:ident) => { + struct $name; + impl StorageInstance for $name { + fn pallet_prefix() -> &'static str { + "test_pallet" + } + const STORAGE_PREFIX: &'static str = stringify!($name); + } + }; + } + + build_prefix!(ValuePrefix); + type Value = types::StorageValue; + + build_prefix!(MapPrefix); + type Map = types::StorageMap; + + build_prefix!(CMapCounterPrefix); + build_prefix!(CMapPrefix); + impl CountedStorageMapInstance for CMapPrefix { + type CounterPrefix = CMapCounterPrefix; + } + type CMap = types::CountedStorageMap; + + build_prefix!(DMapPrefix); + type DMap = types::StorageDoubleMap; + + build_prefix!(NMapPrefix); + type NMap = types::StorageNMap, Key), u128>; + + build_prefix!(CountedNMapCounterPrefix); + build_prefix!(CountedNMapPrefix); + impl CountedStorageNMapInstance for CountedNMapPrefix { + type CounterPrefix = CountedNMapCounterPrefix; + } + type CNMap = types::CountedStorageNMap, Key), u128>; + + #[test] + fn try_decode_entire_state_value_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(Value::try_decode_entire_state(), Ok(0)); + + Value::put(42); + assert_eq!(Value::try_decode_entire_state(), Ok(4)); + + Value::kill(); + assert_eq!(Value::try_decode_entire_state(), Ok(0)); + + // two bytes, cannot be decoded into u32. + sp_io::storage::set(&Value::hashed_key(), &[0u8, 1]); + assert!(Value::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_map_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(Map::try_decode_entire_state(), Ok(0)); + + Map::insert(0, 42); + assert_eq!(Map::try_decode_entire_state(), Ok(4)); + + Map::insert(0, 42); + assert_eq!(Map::try_decode_entire_state(), Ok(4)); + + Map::insert(1, 42); + assert_eq!(Map::try_decode_entire_state(), Ok(8)); + + Map::remove(0); + assert_eq!(Map::try_decode_entire_state(), Ok(4)); + + // two bytes, cannot be decoded into u32. + sp_io::storage::set(&Map::hashed_key_for(2), &[0u8, 1]); + assert!(Map::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_counted_map_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + // counter is not even initialized; + assert_eq!(CMap::try_decode_entire_state(), Ok(0 + 0)); + + let counter = 4; + let value_size = std::mem::size_of::(); + + CMap::insert(0, 42); + assert_eq!(CMap::try_decode_entire_state(), Ok(value_size + counter)); + + CMap::insert(0, 42); + assert_eq!(CMap::try_decode_entire_state(), Ok(value_size + counter)); + + CMap::insert(1, 42); + assert_eq!(CMap::try_decode_entire_state(), Ok(value_size * 2 + counter)); + + CMap::remove(0); + assert_eq!(CMap::try_decode_entire_state(), Ok(value_size + counter)); + + // counter is cleared again. + let _ = CMap::clear(u32::MAX, None); + assert_eq!(CMap::try_decode_entire_state(), Ok(0 + 0)); + + // 1 bytes, cannot be decoded into u16. + sp_io::storage::set(&CMap::hashed_key_for(2), &[0u8]); + assert!(CMap::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_double_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(DMap::try_decode_entire_state(), Ok(0)); + + DMap::insert(0, 0, 42); + assert_eq!(DMap::try_decode_entire_state(), Ok(4)); + + DMap::insert(0, 0, 42); + assert_eq!(DMap::try_decode_entire_state(), Ok(4)); + + DMap::insert(0, 1, 42); + assert_eq!(DMap::try_decode_entire_state(), Ok(8)); + + DMap::insert(1, 0, 42); + assert_eq!(DMap::try_decode_entire_state(), Ok(12)); + + DMap::remove(0, 0); + assert_eq!(DMap::try_decode_entire_state(), Ok(8)); + + // two bytes, cannot be decoded into u32. + sp_io::storage::set(&DMap::hashed_key_for(1, 1), &[0u8, 1]); + assert!(DMap::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_n_map_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(NMap::try_decode_entire_state(), Ok(0)); + + let value_size = std::mem::size_of::(); + + NMap::insert((0u8, 0), 42); + assert_eq!(NMap::try_decode_entire_state(), Ok(value_size)); + + NMap::insert((0, 0), 42); + assert_eq!(NMap::try_decode_entire_state(), Ok(value_size)); + + NMap::insert((0, 1), 42); + assert_eq!(NMap::try_decode_entire_state(), Ok(value_size * 2)); + + NMap::insert((1, 0), 42); + assert_eq!(NMap::try_decode_entire_state(), Ok(value_size * 3)); + + NMap::remove((0, 0)); + assert_eq!(NMap::try_decode_entire_state(), Ok(value_size * 2)); + + // two bytes, cannot be decoded into u128. + sp_io::storage::set(&NMap::hashed_key_for((1, 1)), &[0u8, 1]); + assert!(NMap::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_counted_n_map_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(NMap::try_decode_entire_state(), Ok(0)); + + let value_size = std::mem::size_of::(); + let counter = 4; + + CNMap::insert((0u8, 0), 42); + assert_eq!(CNMap::try_decode_entire_state(), Ok(value_size + counter)); + + CNMap::insert((0, 0), 42); + assert_eq!(CNMap::try_decode_entire_state(), Ok(value_size + counter)); + + CNMap::insert((0, 1), 42); + assert_eq!(CNMap::try_decode_entire_state(), Ok(value_size * 2 + counter)); + + CNMap::insert((1, 0), 42); + assert_eq!(CNMap::try_decode_entire_state(), Ok(value_size * 3 + counter)); + + CNMap::remove((0, 0)); + assert_eq!(CNMap::try_decode_entire_state(), Ok(value_size * 2 + counter)); + + // two bytes, cannot be decoded into u128. + sp_io::storage::set(&CNMap::hashed_key_for((1, 1)), &[0u8, 1]); + assert!(CNMap::try_decode_entire_state().is_err()); + }) + }) + } + + #[test] + fn extra_bytes_are_rejected() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(Map::try_decode_entire_state(), Ok(0)); + + // 6bytes, too many to fit in u32, should be rejected. + sp_io::storage::set(&Map::hashed_key_for(2), &[0u8, 1, 3, 4, 5, 6]); + assert!(Map::try_decode_entire_state().is_err()); + }) + } + + #[test] + fn try_decode_entire_state_tuple_of_storage_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!(<(Value, Map) as TryDecodeEntireStorage>::try_decode_entire_state(), Ok(0)); + + Value::put(42); + assert_eq!(<(Value, Map) as TryDecodeEntireStorage>::try_decode_entire_state(), Ok(4)); + + Map::insert(0, 42); + assert_eq!(<(Value, Map) as TryDecodeEntireStorage>::try_decode_entire_state(), Ok(8)); + }); + } +} diff --git a/substrate/frame/support/src/traits/try_runtime.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs similarity index 87% rename from substrate/frame/support/src/traits/try_runtime.rs rename to substrate/frame/support/src/traits/try_runtime/mod.rs index 31aebeeb4d99b2cd5fab1dd833c215a433562213..bec2dbf549a111b8b5caad19764fb827a619d194 100644 --- a/substrate/frame/support/src/traits/try_runtime.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -17,6 +17,11 @@ //! Try-runtime specific traits and types. +pub mod decode_entire_state; +pub use decode_entire_state::{TryDecodeEntireStorage, TryDecodeEntireStorageError}; + +use super::StorageInstance; + use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_runtime::TryRuntimeError; @@ -37,6 +42,13 @@ pub enum Select { Only(Vec>), } +impl Select { + /// Whether to run any checks at all. + pub fn any(&self) -> bool { + !matches!(self, Select::None) + } +} + impl Default for Select { fn default() -> Self { Select::None @@ -105,6 +117,11 @@ impl UpgradeCheckSelect { pub fn try_state(&self) -> bool { matches!(self, Self::All | Self::TryState) } + + /// Whether to run any checks at all. + pub fn any(&self) -> bool { + !matches!(self, Self::None) + } } #[cfg(feature = "std")] @@ -144,9 +161,27 @@ impl TryState Ok(()), Select::All => { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::try_state(n.clone(), targets.clone())); )* ); - result + let mut error_count = 0; + for_tuples!(#( + if let Err(_) = Tuple::try_state(n.clone(), targets.clone()) { + error_count += 1; + } + )*); + + if error_count > 0 { + log::error!( + target: "try-runtime", + "{} pallets exited with errors while executing try_state checks.", + error_count + ); + + return Err( + "Detected errors while executing try_state checks. See logs for more info." + .into(), + ) + } + + Ok(()) }, Select::RoundRobin(len) => { let functions: &[fn(BlockNumber, Select) -> Result<(), TryRuntimeError>] = diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 8b8912799142e02fe914f7320de32c7cea8a21e9..fc10725e814972a5040e3f3da45f00fbf3a041c0 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { version = "1.0.188", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false} sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false} diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 9c165cd03e86664cfe5be8af608bd071db26c817..19465d924ec0c678d3a3059b302078e45dbae672 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -13,9 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false} -frame-system = { path = "../../../system", default-features = false} +renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false} sp-core = { path = "../../../../primitives/core", default-features = false} sp-runtime = { path = "../../../../primitives/runtime", default-features = false} sp-version = { path = "../../../../primitives/version", default-features = false} @@ -24,8 +24,8 @@ sp-version = { path = "../../../../primitives/version", default-features = false default = [ "std" ] std = [ "codec/std", - "frame-system/std", "renamed-frame-support/std", + "renamed-frame-system/std", "scale-info/std", "sp-core/std", "sp-runtime/std", diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index bf90d73acb320a439963ec25d8379982e1338127..6ea37fb27e72f31fe0e88e1f35815675bc33660e 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -16,7 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Test that `construct_runtime!` also works when `frame-support` is renamed in the `Cargo.toml`. +//! Test that `construct_runtime!` also works when `frame-support` or `frame-system` are renamed in +//! the `Cargo.toml`. #![cfg_attr(not(feature = "std"), no_std)] @@ -50,7 +51,7 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Config for Runtime { +impl renamed_frame_system::Config for Runtime { type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); @@ -82,6 +83,6 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic(_); + + #[pallet::config] + // The only valid syntax here is the following or + // ``` + // pub trait Config: frame::deps::frame_system::Config {} + // ``` + pub trait Config: frame_system::Config {} + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + _config: core::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } +} + +#[cfg(test)] +// Dummy test to make sure a runtime would compile. +mod tests { + use super::{ + frame_support::{construct_runtime, derive_impl}, + frame_system, pallet, + }; + + type Block = frame_system::mocking::MockBlock; + + impl crate::pallet::Config for Runtime {} + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = Block; + } + + construct_runtime! { + pub struct Runtime + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Config}, + } + } +} diff --git a/substrate/frame/support/test/tests/construct_runtime_ui.rs b/substrate/frame/support/test/tests/construct_runtime_ui.rs index c3197c99a72bf94bbe134b89d24fced5063c15c4..0cf857e2d73c025392c96e02c3d7439690ea8c88 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui.rs @@ -32,4 +32,5 @@ fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); + t.pass("tests/construct_runtime_ui/pass/*.rs"); } diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index cc2c2e160095dc83ff5ecd13eb4983967fdbc72a..08954bb6ab5c57982b99432ca1ea888d2e70332c 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -148,7 +148,11 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste | ||_- in this macro invocation ... | | - = note: required because it appears within the type `Event` +note: required because it appears within the type `Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub enum Event { + | ^^^^^ note: required by a bound in `From` --> $RUST/core/src/convert/mod.rs | @@ -169,7 +173,11 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste | ||_- in this macro invocation ... | | - = note: required because it appears within the type `Event` +note: required because it appears within the type `Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub enum Event { + | ^^^^^ note: required by a bound in `TryInto` --> $RUST/core/src/convert/mod.rs | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/pass/composite_enum_instance.rs b/substrate/frame/support/test/tests/construct_runtime_ui/pass/composite_enum_instance.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad63708747694ae5935f91efa99ae28752255e40 --- /dev/null +++ b/substrate/frame/support/test/tests/construct_runtime_ui/pass/composite_enum_instance.rs @@ -0,0 +1,80 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::derive_impl; + +pub use pallet::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::composite_enum] + pub enum HoldReason { + SomeHoldReason + } + + #[pallet::composite_enum] + pub enum FreezeReason { + SomeFreezeReason + } + + #[pallet::composite_enum] + pub enum SlashReason { + SomeSlashReason + } + + #[pallet::composite_enum] + pub enum LockId { + SomeLockId + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +pub type Header = sp_runtime::generic::Header; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type Block = sp_runtime::generic::Block; + +frame_support::construct_runtime!( + pub struct Runtime + { + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system, + Pallet1: pallet, + Pallet2: pallet::, + } +); + +impl pallet::Config for Runtime {} + +impl pallet::Config for Runtime {} + +fn main() {} diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr index 5cfbd8c88624971b205c587d70106e3e398581e4..c91226ea9c31074ca5141a121a31e4d1fe1f506d 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr @@ -1,7 +1,5 @@ -error: cannot find macro `__export_tokens_tt_tiger` in this scope - --> tests/derive_impl_ui/bad_default_impl_path.rs:59:1 +error: cannot find macro `Tiger` in this scope + --> tests/derive_impl_ui/bad_default_impl_path.rs:59:15 | 59 | #[derive_impl(Tiger as Animal)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^ diff --git a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr index 79b50a940b80246451e22096293ec723a75e19a7..f3ac6b2328110f0e2c6e95c7c6f995b3dea2d1dc 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr +++ b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr @@ -7,4 +7,4 @@ error[E0412]: cannot find type `RuntimeCall` in this scope 35 | #[derive_impl(Pallet)] // Injects type RuntimeCall = RuntimeCall; | ---------------------- in this macro invocation | - = note: this error originates in the macro `__export_tokens_tt_pallet` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `Pallet` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index dc78027f22ebad3acca901792cf4368522d17409..af2633dc53cae4e8f9c52d0ee08327f339000bb4 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -136,6 +136,7 @@ struct StructNoGenerics { field2: u64, } +#[allow(dead_code)] #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum EnumNoGenerics { #[default] @@ -162,6 +163,7 @@ enum Enum { } // enum that will have a named default. +#[allow(dead_code)] #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum2 { #[default] @@ -175,6 +177,7 @@ enum Enum2 { VariantUnit2, } +#[allow(dead_code)] // enum that will have a unit default. #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum3 { diff --git a/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr index d86292d71b7a1d77a27322bed069cc077f5ba101..3291f658f10e1fce9877acc69f02aa67a1c974f9 100644 --- a/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -5,4 +5,4 @@ error[E0277]: `::C` doesn't implement `std::fmt::Debug` | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::C` - = note: required for the cast from `::C` to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::C` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 1898246470c73fee4bd046d737d90580015989b1..00e7adafb0b704251359afdab89190da8b5c039c 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -21,7 +21,7 @@ use frame_support::{ dispatch_context::with_context, pallet_prelude::{StorageInfoTrait, ValueQuery}, parameter_types, - storage::unhashed, + storage::{unhashed, unhashed::contains_prefixed_key}, traits::{ ConstU32, GetCallIndex, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, @@ -210,12 +210,13 @@ pub mod pallet { { /// Doc comment put in metadata #[pallet::call_index(0)] - #[pallet::weight(Weight::from_parts(*_foo as u64, 0))] + #[pallet::weight(Weight::from_parts(*foo as u64, 0))] pub fn foo( origin: OriginFor, - #[pallet::compact] _foo: u32, + #[pallet::compact] foo: u32, _bar: u32, ) -> DispatchResultWithPostInfo { + let _ = foo; let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType3); // Test for where clause let _ = origin; @@ -2329,6 +2330,58 @@ fn test_storage_alias() { }) } +#[test] +fn pallet_on_chain_storage_version_initializes_correctly() { + type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + >; + + // Simple example of a pallet with current version 10 being added to the runtime for the first + // time. + TestExternalities::default().execute_with(|| { + let current_version = Example::current_storage_version(); + + // Check the pallet has no storage items set. + let pallet_hashed_prefix = twox_128(Example::name().as_bytes()); + let exists = contains_prefixed_key(&pallet_hashed_prefix); + assert_eq!(exists, false); + + // [`frame_support::traits::BeforeAllRuntimeMigrations`] hook should initialize the storage + // version. + Executive::execute_on_runtime_upgrade(); + + // Check that the storage version was initialized to the current version + let on_chain_version_after = StorageVersion::get::(); + assert_eq!(on_chain_version_after, current_version); + }); + + // Pallet with no current storage version should have the on-chain version initialized to 0. + TestExternalities::default().execute_with(|| { + // Example4 current_storage_version is NoStorageVersionSet. + + // Check the pallet has no storage items set. + let pallet_hashed_prefix = twox_128(Example4::name().as_bytes()); + let exists = contains_prefixed_key(&pallet_hashed_prefix); + assert_eq!(exists, false); + + // Confirm the storage version is implicitly 0. + let on_chain_version_before = StorageVersion::get::(); + assert_eq!(on_chain_version_before, StorageVersion::new(0)); + + // [`frame_support::traits::BeforeAllRuntimeMigrations`] initializes the storage version. + Executive::execute_on_runtime_upgrade(); + + // Check that the storage version now exists and was initialized to 0. + let on_chain_version_after = StorageVersion::get::(); + assert_eq!(StorageVersion::exists::(), true); + assert_eq!(on_chain_version_after, StorageVersion::new(0)); + }); +} + #[cfg(feature = "try-runtime")] #[test] fn post_runtime_upgrade_detects_storage_version_issues() { @@ -2381,8 +2434,10 @@ fn post_runtime_upgrade_detects_storage_version_issues() { >; TestExternalities::default().execute_with(|| { - // Call `on_genesis` to put the storage version of `Example` into the storage. - Example::on_genesis(); + // Set the on-chain version to one less than the current version for `Example`, simulating a + // forgotten migration + StorageVersion::new(9).put::(); + // The version isn't changed, we should detect it. assert!( Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 8d2d52d18852afe5eb2f5b9de059cd7d2ea25b4f..724734ec4fc9dbe7eab0c6e26fc221f165a0e8c9 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -87,12 +87,13 @@ pub mod pallet { impl, I: 'static> Pallet { /// Doc comment put in metadata #[pallet::call_index(0)] - #[pallet::weight(Weight::from_parts(*_foo as u64, 0))] + #[pallet::weight(Weight::from_parts(*foo as u64, 0))] pub fn foo( origin: OriginFor, - #[pallet::compact] _foo: u32, + #[pallet::compact] foo: u32, ) -> DispatchResultWithPostInfo { let _ = origin; + let _ = foo; Self::deposit_event(Event::Something(3)); Ok(().into()) } diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index c86930f8a64e3a287a45ac15ae127982dc184d10..08ea7c0bec3a5dfca33ff497b2981e9acb17f294 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -19,7 +19,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound.rs:38:36 diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 1b04f44c78febbf7232b26dedfd35021ea6e1981..80316fcd2489747ca34a9b030474c453e7f3d08d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -19,7 +19,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:36 diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 7429bce050c28bd0d2e9f372ed1c773646ae1acf..d45b74bad8428d57273e0cb4f0d304706dca9fd1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -20,7 +20,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required for `&Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&&Bar` to `&dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | 34 + #[derive(Debug)] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs new file mode 100644 index 0000000000000000000000000000000000000000..08aaf06a7ef25645f0ed673bae8168e5b11be2db --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9c13d59d79320e10fae958585aed1571723e01c2 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must have same number of arguments as the dispatchable function + --> tests/pallet_ui/call_feeless_invalid_closure_arg1.rs:31:24 + | +31 | #[pallet::feeless_if(|| -> bool { true })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs new file mode 100644 index 0000000000000000000000000000000000000000..b16b4b3ffd94038546533d19feba0e7eb75206ff --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: bool| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1c38ec2368361c455cde5328e326cbeabbdcec6a --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr @@ -0,0 +1,5 @@ +error: Invalid type: expected `&OriginFor` + --> tests/pallet_ui/call_feeless_invalid_closure_arg2.rs:31:28 + | +31 | #[pallet::feeless_if(|_: bool| -> bool { true })] + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs new file mode 100644 index 0000000000000000000000000000000000000000..5f2230744ff87b5463a212a4e4a0735b9e1d8227 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + pub fn foo(_: OriginFor, _something: u64) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1ad9588cead63366913090f730cc92a538964f37 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure argument must have a reference to the same type as the dispatchable function argument + --> tests/pallet_ui/call_feeless_invalid_closure_arg3.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f0399a123ca6f160302b533db43470d1f3b5acd --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr new file mode 100644 index 0000000000000000000000000000000000000000..a8c05242bde838dbb6d57ecaf811c3f6012bc1be --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must return `bool` + --> tests/pallet_ui/call_feeless_invalid_closure_return.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..26bd8a600ab9d4def80868405c8a519802affb3e --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(0)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..add3decbf16020e4ff3680e8a422ebabad22ff1d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr @@ -0,0 +1,11 @@ +error: Invalid feeless_if attribute: expected a closure + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ + +error: expected `|` + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr index eec5e33ccbd937d370cf379c38f8a4fcecfa526d..1809fcb6ed998a916356c750ca3129388d1b4f61 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected `weight` or `call_index` +error: expected one of: `weight`, `call_index`, `feeless_if` --> tests/pallet_ui/call_invalid_attr.rs:31:13 | 31 | #[pallet::weird_attr] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index 99146c0563a98ad9c1b89ad5109849dacdab3cf2..c04729a243861c52f6ad7caef1d3c8f6802b263a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -3,9 +3,3 @@ error: Invalid type: expected `OriginFor` | 34 | pub fn foo(origin: u8) {} | ^^ - -error: expected `OriginFor` - --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 - | -34 | pub fn foo(origin: u8) {} - | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs new file mode 100644 index 0000000000000000000000000000000000000000..8d93638f5a51e491a51f509dc602f5c41eefbf9f --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(*_unused)] + pub fn foo(_: OriginFor, _unused: u64) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr new file mode 100644 index 0000000000000000000000000000000000000000..89fc1e0820f5ea0969b05b00efee7ca70c95e593 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr @@ -0,0 +1,12 @@ +error: use of deprecated constant `pallet::warnings::UncheckedWeightWitness_0::_w`: + It is deprecated to not check weight witness data. + Please instead ensure that all witness data for weight calculation is checked before usage. + + For more info see: + + --> tests/pallet_ui/call_weight_unchecked_warning.rs:33:31 + | +33 | pub fn foo(_: OriginFor, _unused: u64) -> DispatchResult { Ok(()) } + | ^^^^^^^ + | + = note: `-D deprecated` implied by `-D warnings` diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 74ee0e4aeba5cc359174c8aa017d77c6a26a0e29..531e8bdffeb0c0f073e1775cdc0f683beb3d198d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -30,13 +30,13 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Vec` | = help: the following other types implement trait `MaxEncodedLen`: - () - (TupleElement0, TupleElement1) - (TupleElement0, TupleElement1, TupleElement2) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + bool + i8 + i16 + i32 + i64 + i128 + u8 + u16 and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index cfa0d465990a242f2c2ca2f65377b50d9e5c20bd..ea1d0ed99cd39747bfc1f94a9e3433809baa82b5 100644 --- a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -5,13 +5,13 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | = help: the following other types implement trait `PalletError`: - () - (TupleElement0, TupleElement1) - (TupleElement0, TupleElement1, TupleElement2) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + bool + i8 + i16 + i32 + i64 + i128 + u8 + u16 and $N others = note: this error originates in the derive macro `frame_support::PalletError` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 4df6deafa0df667a4299f348eb90c12ae860ae06..fc4a33b721500a09ef03f36836f7ebea832a2547 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -18,4 +18,4 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs index 665d2a984cf04801ef4c3c3712e053a075c1ae07..34b563ffb6433b6a156e5ace4abecd08b560ce49 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs @@ -41,5 +41,4 @@ mod pallet { } } -fn main() { -} +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs new file mode 100644 index 0000000000000000000000000000000000000000..573ceb6dfab7dcfe5fb677f50be2c0c5f7e119b5 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame::deps::frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::{Hooks, IsType}; + + #[pallet::config] + pub trait Config: frame::deps::frame_system::Config { + type Bar: Clone + std::fmt::Debug + Eq; + type RuntimeEvent: IsType<::RuntimeEvent> + + From>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0f805c972e4da5f1ecfc5a7836f259c6867f0741 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr @@ -0,0 +1,5 @@ +error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` + --> tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs:26:3 + | +26 | type RuntimeEvent: IsType<::RuntimeEvent> + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr index 3a26d1c0495418658df96cd902f263155cc4fd45..5ea3be470a068b1468beb754b6b4caeca25d93e4 100644 --- a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -4,8 +4,8 @@ error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERE 36 | impl ProvideInherent for Pallet {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` in implementation | - = help: implement the missing item: `type Call = Type;` - = help: implement the missing item: `type Error = Type;` + = help: implement the missing item: `type Call = /* Type */;` + = help: implement the missing item: `type Error = /* Type */;` = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` = help: implement the missing item: `fn is_inherent(_: &::Call) -> bool { todo!() }` diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/event_type_bound_system_config_assoc_type.rs b/substrate/frame/support/test/tests/pallet_ui/pass/event_type_bound_system_config_assoc_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a70c4a5e68a8f67fa17f09fda2d11eb6e557efc --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/event_type_bound_system_config_assoc_type.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, IsType}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: Clone + std::fmt::Debug + Eq; + type RuntimeEvent: IsType<::RuntimeEvent> + From>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs new file mode 100644 index 0000000000000000000000000000000000000000..566b7c65cc71005a75b07f2cb9a79bf24f087e19 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index e290b22a0eaa59b02cf2af5487444cde5924c3b5..b5d108275249e5c2deb95bc0336165aa5a8ba8c1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -5,10 +5,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: - Arc Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -20,14 +20,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: - <&&T as EncodeLike> - <&T as EncodeLike> - <&T as EncodeLike> - <&[(K, V)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[T] as EncodeLike>> + + + + + + + + and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -40,14 +40,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: - &T - &mut T - Arc Box + bytes::bytes::Bytes Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc Vec - bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | = help: the following other types implement trait `TypeInfo`: - &T - &mut T - () - (A, B) - (A, B, C) - (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) + bool + char + i8 + i16 + i32 + i64 + i128 + u8 and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -80,10 +80,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: - Arc Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -95,14 +95,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: - <&&T as EncodeLike> - <&T as EncodeLike> - <&T as EncodeLike> - <&[(K, V)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[T] as EncodeLike>> + + + + + + + + and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -115,16 +115,75 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: - &T - &mut T - Arc Box + bytes::bytes::Bytes Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc Vec - bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = help: the following other types implement trait `WrapperTypeDecode`: + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = help: the following other types implement trait `EncodeLike`: + + + + + + + + + and $N others + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = help: the following other types implement trait `WrapperTypeEncode`: + Box + bytes::bytes::Bytes + Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc + Vec + and $N others + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 0e3a7c9f1cbfa0f7ac626eef998824a85b0673bc..b58902590b8560f5d8d5e6bbc5bdb652aeb82b44 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -5,10 +5,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: - Arc Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -20,14 +20,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: - <&&T as EncodeLike> - <&T as EncodeLike> - <&T as EncodeLike> - <&[(K, V)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[T] as EncodeLike>> + + + + + + + + and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -40,14 +40,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: - &T - &mut T - Arc Box + bytes::bytes::Bytes Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc Vec - bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | = help: the following other types implement trait `TypeInfo`: - &T - &mut T - () - (A, B) - (A, B, C) - (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) + bool + char + i8 + i16 + i32 + i64 + i128 + u8 and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -80,10 +80,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: - Arc Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -95,14 +95,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: - <&&T as EncodeLike> - <&T as EncodeLike> - <&T as EncodeLike> - <&[(K, V)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[(T,)] as EncodeLike>> - <&[T] as EncodeLike>> + + + + + + + + and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -115,16 +115,75 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: - &T - &mut T - Arc Box + bytes::bytes::Bytes Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc Vec - bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = help: the following other types implement trait `WrapperTypeDecode`: + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = help: the following other types implement trait `EncodeLike`: + + + + + + + + + and $N others + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + | +18 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = help: the following other types implement trait `WrapperTypeEncode`: + Box + bytes::bytes::Bytes + Cow<'a, T> + parity_scale_codec::Ref<'a, T, U> + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Arc + Vec + and $N others + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 9a31e4b6bdf46eb0470e7fd90ed329a95b27ec58..e04de98800ec20fc449666f671bcd91058fb3335 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -5,13 +5,13 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | = help: the following other types implement trait `MaxEncodedLen`: - () - (TupleElement0, TupleElement1) - (TupleElement0, TupleElement1, TupleElement2) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + bool + i8 + i16 + i32 + i64 + i128 + u8 + u16 and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index cdcd1b401f8010e3b619903fc5d11cd16fd5020e..31fe3b57338968822c8f49837c5176aa859be2cf 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -5,14 +5,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | = help: the following other types implement trait `MaxEncodedLen`: - () - (TupleElement0, TupleElement1) - (TupleElement0, TupleElement1, TupleElement2) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + bool + i8 + i16 + i32 + i64 + i128 + u8 + u16 and $N others - = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` - = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` + = note: required for `NMapKey` to implement `KeyGeneratorMaxEncodedLen` + = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` to implement `StorageInfoTrait` diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index d1d5897ce35524adf1d2b034700bd2fc5505d39b..b61b4d531e2b6c285b6bf061833790b0db4b7c78 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } frame-support = { path = "../support", default-features = false} sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } @@ -25,6 +25,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false} sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } +docify = "0.2.0" [dev-dependencies] criterion = "0.4.0" @@ -52,6 +53,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] +experimental = [] [[bench]] name = "bench" diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 0bd9299f783c25bafba79248716f7ad4879e80c4..c1d241f4bec1641f65700ff6876b9f36beb441fb 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false} frame-support = { path = "../../support", default-features = false} frame-system = { path = "..", default-features = false} diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 2939fd6534c09c7e5b59816b821d781966273e81..7504a814aef13b0230e1067c0d95e0104ad74275 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode}; use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, One, SignedExtension}, + traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, transaction_validity::{ InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, ValidTransaction, @@ -80,6 +80,10 @@ where _len: usize, ) -> Result<(), TransactionValidityError> { let mut account = crate::Account::::get(who); + if account.providers.is_zero() && account.sufficients.is_zero() { + // Nonce storage not paid for + return Err(InvalidTransaction::Payment.into()) + } if self.0 != account.nonce { return Err(if self.0 < account.nonce { InvalidTransaction::Stale @@ -100,8 +104,11 @@ where _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { - // check index let account = crate::Account::::get(who); + if account.providers.is_zero() && account.sufficients.is_zero() { + // Nonce storage not paid for + return InvalidTransaction::Payment.into() + } if self.0 < account.nonce { return InvalidTransaction::Stale.into() } @@ -137,7 +144,7 @@ mod tests { crate::AccountInfo { nonce: 1, consumers: 0, - providers: 0, + providers: 1, sufficients: 0, data: 0, }, @@ -164,4 +171,47 @@ mod tests { ); }) } + + #[test] + fn signed_ext_check_nonce_requires_provider() { + new_test_ext().execute_with(|| { + crate::Account::::insert( + 2, + crate::AccountInfo { + nonce: 1, + consumers: 0, + providers: 1, + sufficients: 0, + data: 0, + }, + ); + crate::Account::::insert( + 3, + crate::AccountInfo { + nonce: 1, + consumers: 0, + providers: 0, + sufficients: 1, + data: 0, + }, + ); + let info = DispatchInfo::default(); + let len = 0_usize; + // Both providers and sufficients zero + assert_noop!( + CheckNonce::(1).validate(&1, CALL, &info, len), + InvalidTransaction::Payment + ); + assert_noop!( + CheckNonce::(1).pre_dispatch(&1, CALL, &info, len), + InvalidTransaction::Payment + ); + // Non-zero providers + assert_ok!(CheckNonce::(1).validate(&2, CALL, &info, len)); + assert_ok!(CheckNonce::(1).pre_dispatch(&2, CALL, &info, len)); + // Non-zero sufficients + assert_ok!(CheckNonce::(1).validate(&3, CALL, &info, len)); + assert_ok!(CheckNonce::(1).pre_dispatch(&3, CALL, &info, len)); + }) + } } diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 1030c8daf7b0464aff46a1020e83b3b7c9831b16..70d1e75633278c665f4c06ca22f02a96668ef964 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, Pallet}; +use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -50,8 +50,16 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => - Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if info.weight.any_gt(max) => { + log::debug!( + target: LOG_TARGET, + "Extrinsic {} is greater than the max extrinsic {}", + info.weight, + max, + ); + + Err(InvalidTransaction::ExhaustsResources.into()) + }, _ => Ok(()), } } @@ -79,6 +87,13 @@ where let added_len = len as u32; let next_len = current_len.saturating_add(added_len); if next_len > *length_limit.max.get(info.class) { + log::debug!( + target: LOG_TARGET, + "Exceeded block length limit: {} > {}", + next_len, + length_limit.max.get(info.class), + ); + Err(InvalidTransaction::ExhaustsResources.into()) } else { Ok(next_len) @@ -137,17 +152,28 @@ where if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { all_weight.accrue(extrinsic_weight, info.class) } else { - all_weight - .checked_accrue(extrinsic_weight, info.class) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; + all_weight.checked_accrue(extrinsic_weight, info.class).map_err(|_| { + log::debug!( + target: LOG_TARGET, + "All weight checked add overflow.", + ); + + InvalidTransaction::ExhaustsResources + })?; } let per_class = *all_weight.get(info.class); // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class.any_gt(max) => - return Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if per_class.any_gt(max) => { + log::debug!( + target: LOG_TARGET, + "Exceeded the per-class allowance.", + ); + + return Err(InvalidTransaction::ExhaustsResources.into()) + }, // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -158,8 +184,14 @@ where if all_weight.total().any_gt(maximum_weight.max_block) { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class.any_gt(reserved) => - return Err(InvalidTransaction::ExhaustsResources.into()), + Some(reserved) if per_class.any_gt(reserved) => { + log::debug!( + target: LOG_TARGET, + "Total block weight is exceeded.", + ); + + return Err(InvalidTransaction::ExhaustsResources.into()) + }, // There is either no limit in reserved pool (`None`), // or we are below the limit. _ => {}, @@ -233,6 +265,18 @@ where }) } + log::trace!( + target: LOG_TARGET, + "Used block weight: {:?}", + crate::BlockWeight::::get(), + ); + + log::trace!( + target: LOG_TARGET, + "Used block length: {:?}", + Pallet::::all_extrinsics_len(), + ); + Ok(()) } } diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 84b6dc031457db67ebd9eba43b83571fb70220bf..1b8dd6a9367f496cfceb54c788728d5de0059b08 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -163,7 +163,7 @@ pub trait SetCode { impl SetCode for () { fn set_code(code: Vec) -> DispatchResult { - >::update_code_in_storage(&code)?; + >::update_code_in_storage(&code); Ok(()) } } @@ -244,6 +244,53 @@ pub mod pallet { type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); } + + /// Default configurations of this pallet in a solo-chain environment. + /// + /// ## Considerations: + /// + /// By default, this type makes the following choices: + /// + /// * Use a normal 32 byte account id, with a [`DefaultConfig::Lookup`] that implies no + /// 'account-indexing' pallet is being used. + /// * Given that we don't know anything about the existence of a currency system in scope, + /// an [`DefaultConfig::AccountData`] is chosen that has no addition data. Overwrite this + /// if you use `pallet-balances` or similar. + /// * Make sure to overwrite [`DefaultConfig::Version`]. + /// * 2s block time, and a default 5mb block size is used. + #[cfg(feature = "experimental")] + pub struct SolochainDefaultConfig; + + #[cfg(feature = "experimental")] + #[frame_support::register_default_impl(SolochainDefaultConfig)] + impl DefaultConfig for SolochainDefaultConfig { + type Nonce = u32; + type Hash = sp_core::hash::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = sp_runtime::AccountId32; + type Lookup = sp_runtime::traits::AccountIdLookup; + type MaxConsumers = frame_support::traits::ConstU32<128>; + type AccountData = crate::AccountInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type Version = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + #[inject_runtime_type] + type RuntimeEvent = (); + #[inject_runtime_type] + type RuntimeOrigin = (); + #[inject_runtime_type] + type RuntimeCall = (); + #[inject_runtime_type] + type PalletInfo = (); + type BaseCallFilter = frame_support::traits::Everything; + type BlockHashCount = frame_support::traits::ConstU32<256>; + type OnSetCode = (); + } } /// System configuration trait. Implemented by runtime. @@ -420,8 +467,9 @@ pub mod pallet { /// /// Can be executed by every `origin`. #[pallet::call_index(0)] - #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] - pub fn remark(_origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { + #[pallet::weight(T::SystemWeightInfo::remark(remark.len() as u32))] + pub fn remark(_origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { + let _ = remark; // No need to check the weight witness. Ok(().into()) } @@ -495,16 +543,16 @@ pub mod pallet { /// the prefix we are removing to accurately calculate the weight of this function. #[pallet::call_index(6)] #[pallet::weight(( - T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), + T::SystemWeightInfo::kill_prefix(subkeys.saturating_add(1)), DispatchClass::Operational, ))] pub fn kill_prefix( origin: OriginFor, prefix: Key, - _subkeys: u32, + subkeys: u32, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - let _ = storage::unhashed::clear_prefix(&prefix, None, None); + let _ = storage::unhashed::clear_prefix(&prefix, Some(subkeys), None); Ok(().into()) } @@ -676,8 +724,6 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { - #[serde(with = "sp_core::bytes")] - pub code: Vec, #[serde(skip)] pub _config: sp_std::marker::PhantomData, } @@ -691,7 +737,6 @@ pub mod pallet { >::put(true); >::put(true); - sp_io::storage::set(well_known_keys::CODE, &self.code); sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); } } @@ -1049,6 +1094,25 @@ pub enum DecRefStatus { } impl Pallet { + /// Returns the `spec_version` of the last runtime upgrade. + /// + /// This function is useful for writing guarded runtime migrations in the runtime. A runtime + /// migration can use the `spec_version` to ensure that it isn't applied twice. This works + /// similar as the storage version for pallets. + /// + /// This functions returns the `spec_version` of the last runtime upgrade while executing the + /// runtime migrations + /// [`on_runtime_upgrade`](frame_support::traits::OnRuntimeUpgrade::on_runtime_upgrade) + /// function. After all migrations are executed, this will return the `spec_version` of the + /// current runtime until there is another runtime upgrade. + /// + /// Example: + #[doc = docify::embed!("src/tests.rs", last_runtime_upgrade_spec_version_usage)] + pub fn last_runtime_upgrade_spec_version() -> u32 { + LastRuntimeUpgrade::::get().map_or(0, |l| l.spec_version.0) + } + + /// Returns true if the given account exists. pub fn account_exists(who: &T::AccountId) -> bool { Account::::contains_key(who) } @@ -1058,11 +1122,10 @@ impl Pallet { /// Note this function almost never should be used directly. It is exposed /// for `OnSetCode` implementations that defer actual code being written to /// the storage (for instance in case of parachains). - pub fn update_code_in_storage(code: &[u8]) -> DispatchResult { + pub fn update_code_in_storage(code: &[u8]) { storage::unhashed::put_raw(well_known_keys::CODE, code); Self::deposit_log(generic::DigestItem::RuntimeEnvironmentUpdated); Self::deposit_event(Event::CodeUpdated); - Ok(()) } /// Increment the reference counter on an account. @@ -1816,4 +1879,11 @@ pub mod pallet_prelude { /// Type alias for the `BlockNumber` associated type of system config. pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; + + /// Type alias for the `Extrinsic` associated type of system config. + pub type ExtrinsicFor = + <::Block as sp_runtime::traits::Block>::Extrinsic; + + /// Type alias for the `RuntimeCall` associated type of system config. + pub type RuntimeCallFor = ::RuntimeCall; } diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index dd4ac6782a55e1d6330386444423c1d6e662d5be..a019cfd666e8cb3ddccc02aa706692cbe6cee2e3 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -154,8 +154,8 @@ impl, X> Signer /// all available accounts and the provided accounts /// in `with_filter`. If no accounts are provided, /// use all accounts by default. - fn accounts_from_keys<'a>(&'a self) -> Box> + 'a> { - let keystore_accounts = self.keystore_accounts(); + pub fn accounts_from_keys<'a>(&'a self) -> Box> + 'a> { + let keystore_accounts = Self::keystore_accounts(); match self.accounts { None => Box::new(keystore_accounts), Some(ref keys) => { @@ -175,7 +175,8 @@ impl, X> Signer } } - fn keystore_accounts(&self) -> impl Iterator> { + /// Return all available accounts in keystore. + pub fn keystore_accounts() -> impl Iterator> { C::RuntimeAppPublic::all().into_iter().enumerate().map(|(index, key)| { let generic_public = C::GenericPublic::from(key); let public: T::Public = generic_public.into(); diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 165df688b1c2c2915bfdb932a9e1d0448d31b680..6fbddaaf229402d9a2457e089d9b9d2059c440bb 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, - traits::WhitelistedStorageKeys, + traits::{OnRuntimeUpgrade, WhitelistedStorageKeys}, }; use std::collections::BTreeSet; @@ -773,3 +773,26 @@ pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } } + +#[docify::export] +#[test] +fn last_runtime_upgrade_spec_version_usage() { + struct Migration; + + impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + // Ensure to compare the spec version against some static version to prevent applying + // the same migration multiple times. + // + // `1337` here is the spec version of the runtime running on chain. If there is maybe + // a runtime upgrade in the pipeline of being applied, you should use the spec version + // of this upgrade. + if System::last_runtime_upgrade_spec_version() > 1337 { + return Weight::zero(); + } + + // Do the migration. + Weight::zero() + } + } +} diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 291f6b1cf590ac0bdb751b8d5af9b6c0e22ab0e3..e23ded725d8910d9f608cd6c4d8aa0d82d12ca57 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -27,7 +27,7 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} -docify = "0.2.4" +docify = "0.2.6" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index ad055bab004f99a49710a2e479a54910b249cc00..a62ac6d633d043d5a62cbd9f27639fc6ba0ac4bf 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -31,7 +31,7 @@ //! //! See the [`pallet`] module for more information about the interfaces this pallet exposes, //! including its configuration trait, dispatchables, storage items, events and errors. -//! +//! //! ## Overview //! //! The Timestamp pallet is designed to create a consensus-based time source. This helps ensure that @@ -144,12 +144,33 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{derive_impl, pallet_prelude::*}; use frame_system::pallet_prelude::*; - #[pallet::config] + /// Default preludes for [`Config`]. + pub mod config_preludes { + use super::*; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = frame_support::traits::ConstU64<1>; + type WeightInfo = (); + } + } + + /// The pallet configuration trait + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// Type used for expressing a timestamp. + #[pallet::no_default_bounds] type Moment: Parameter + Default + AtLeast32Bit diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 0e9314a9f97c58f1dc44df2ab696fc263c50fb83..6df886b93d71cdafa0ab89034755a406ff3649bc 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 8fe111afc26a41386da1005924648606b34fa61f..189623b520ebacb6c3ca6f1cb335827896f6eac2 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -95,6 +95,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 5c65ebd4c730a4dfd2195cd39cf7c9a55d32af31..ad384755614fc202ed3ae7d93226bfba71a0a605 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -26,7 +26,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" pallet-balances = { path = "../balances" } [features] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 3ce7aa0a31b2597542e0dd534e664a2f21dbb450..2cac47fb3b713cb8100285be6c24f1a501181686 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -21,7 +21,7 @@ frame-system = { path = "../../system", default-features = false} pallet-asset-conversion = { path = "../../asset-conversion", default-features = false} pallet-transaction-payment = { path = "..", default-features = false} codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [dev-dependencies] sp-core = { path = "../../../primitives/core", default-features = false} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index c2d9ed56c7aa38867cbd988eb1fe1a486233d420..04a71e2ff4260bacade4d93420646b21b946a2c2 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -48,7 +48,7 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, traits::{ - tokens::fungibles::{Balanced, Inspect}, + fungibles::{Balanced, Inspect}, IsType, }, DefaultNoBound, diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index bfbe8b4178cee217c65de1f86f980754a12402f0..76c78fb4222385257caf40196948961f9ff3d42d 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -121,6 +121,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index d3f040e9893fab35d397ddabbfc0eeea957436ab..ae236728cd5816bc6993dfbac46339084006a338 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -26,11 +26,11 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" sp-storage = { path = "../../../primitives/storage", default-features = false} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index b8d7b523ca2589555b7a4aeb65eb544b54a52a98..5fa8a4ab27dd114f555646e9a3a84d01e056ee55 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -113,6 +113,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cfb814e2e380a13e24860b20504d0df4b400c9d4 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions are satisfied." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} + +# Other dependencies +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..55afaaf93d78cf2eb835cf546476223eaad8e492 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//! # Skip Feeless Payment Pallet +//! +//! This pallet allows runtimes that include it to skip payment of transaction fees for +//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ +//! frame_support::pallet_prelude::feeless_if`). +//! +//! ## Overview +//! +//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the +//! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom +//! event is emitted instead. Otherwise, the extension is applied as usual. +//! +//! +//! ## Integration +//! +//! This pallet wraps an existing transaction payment pallet. This means you should both pallets +//! in your `construct_runtime` macro and include this pallet's +//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{CheckIfFeeless, DispatchResult}, + traits::{IsType, OriginTrait}, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee was skipped. + FeeSkipped { who: T::AccountId }, + } +} + +/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for SkipCheckIfFeeless { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl From for SkipCheckIfFeeless { + fn from(s: S) -> Self { + Self(s, sp_std::marker::PhantomData) + } +} + +impl> SignedExtension + for SkipCheckIfFeeless +where + S::Call: CheckIfFeeless>, +{ + type AccountId = T::AccountId; + type Call = S::Call; + type AdditionalSigned = S::AdditionalSigned; + type Pre = (Self::AccountId, Option<::Pre>); + // From the outside this extension should be "invisible", because it just extends the wrapped + // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward + // the identifier of the wrapped extension to let wallets see this extension as it would only be + // the wrapped extension itself. + const IDENTIFIER: &'static str = S::IDENTIFIER; + + fn additional_signed(&self) -> Result { + self.0.additional_signed() + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { + Ok((who.clone(), None)) + } else { + Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + } + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + if let Some(pre) = pre { + if let Some(pre) = pre.1 { + S::post_dispatch(Some(pre), info, post_info, len, result)?; + } else { + Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); + } + } + Ok(()) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..5c540c3e45955f3385462efd2d68a8c394617644 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_skip_feeless_payment; + +use frame_support::{derive_impl, parameter_types}; +use frame_system as system; + +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub static PreDispatchCount: u32 = 0; +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] +pub struct DummyExtension; + +impl SignedExtension for DummyExtension { + type AccountId = AccountId; + type Call = RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "DummyExtension"; + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + PreDispatchCount::mutate(|c| *c += 1); + Ok(()) + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_dummy { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_origin: &OriginFor, data: &u32| -> bool { + *data == 0 + })] + pub fn aux(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + } +} + +impl pallet_dummy::Config for Runtime {} + +frame_support::construct_runtime!( + pub struct Runtime { + System: system, + SkipFeeless: pallet_skip_feeless_payment, + DummyPallet: pallet_dummy, + } +); diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b4dd6997418f9c9e39ebe0ab294051a688f2f87 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -0,0 +1,33 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; +use frame_support::dispatch::DispatchInfo; + +#[test] +fn skip_feeless_payment_works() { + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); +} diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 8160d72ad8942b776bea069a3ef864217ce9a8b2..efadfd60bdd30126f8627fccea7c02f4c42c68a3 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -319,9 +319,29 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - #[pallet::config] + pub mod config_preludes { + use super::*; + use frame_support::derive_impl; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = (); + } + } + + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for withdrawing, refunding and depositing the transaction fee. @@ -330,12 +350,24 @@ pub mod pallet { /// adjusted, depending on the used resources by the transaction. If the /// transaction weight is lower than expected, parts of the transaction fee /// might be refunded. In the end the fees can be deposited. + #[pallet::no_default] type OnChargeTransaction: OnChargeTransaction; - /// A fee mulitplier for `Operational` extrinsics to compute "virtual tip" to boost their + /// Convert a weight value into a deductible fee based on the currency type. + #[pallet::no_default] + type WeightToFee: WeightToFee>; + + /// Convert a length value into a deductible fee based on the currency type. + #[pallet::no_default] + type LengthToFee: WeightToFee>; + + /// Update the multiplier of the next block, based on the previous block's weight. + type FeeMultiplierUpdate: MultiplierUpdate; + + /// A fee multiplier for `Operational` extrinsics to compute "virtual tip" to boost their /// `priority` /// - /// This value is multipled by the `final_fee` to obtain a "virtual tip" that is later + /// This value is multiplied by the `final_fee` to obtain a "virtual tip" that is later /// added to a tip component in regular `priority` calculations. /// It means that a `Normal` transaction can front-run a similarly-sized `Operational` /// extrinsic (with no tip), by including a tip value greater than the virtual tip. @@ -355,15 +387,6 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; - - /// Convert a weight value into a deductible fee based on the currency type. - type WeightToFee: WeightToFee>; - - /// Convert a length value into a deductible fee based on the currency type. - type LengthToFee: WeightToFee>; - - /// Update the multiplier of the next block, based on the previous block's weight. - type FeeMultiplierUpdate: MultiplierUpdate; } #[pallet::type_value] diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 97253be463058ce94bf103ef0e03c5243ea29eec..419989bef12a02b7f6fa52f9cbedd95caf99dd45 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -108,6 +108,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index a1aec7ef65a389b9a2b0e48b7ed58e87daba392d..e90f063427b847555858d2c8b638451b0aeb0993 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index fdbaeb1f951818a7f34cd98c077f1880dd8bdf5f..8d485d9f3cac2de9f5a738dbe5490f2fb5232bce 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -21,9 +21,9 @@ use super::*; use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; -use frame_support::traits::{Currency, Get, OnFinalize, OnInitialize}; +use frame_support::traits::{Get, OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; -use sp_runtime::traits::{Bounded, One, Zero}; +use sp_runtime::traits::{Bounded, CheckedDiv, One, Zero}; use sp_std::*; use sp_transaction_storage_proof::TransactionStorageProof; @@ -103,9 +103,6 @@ fn proof() -> Vec { array_bytes::hex2bytes_unchecked(PROOF) } -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - fn assert_last_event(generic_event: ::RuntimeEvent) { let events = System::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -129,7 +126,8 @@ benchmarks! { store { let l in 1 .. T::MaxTransactionSize::get(); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) verify { assert!(!BlockTransactions::::get().is_empty()); @@ -138,7 +136,8 @@ benchmarks! { renew { let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], @@ -152,7 +151,8 @@ benchmarks! { check_proof_max { run_to_block::(1u32.into()); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); for _ in 0 .. T::MaxBlockTransactions::get() { TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 753f5ca0c7b5238c1d51dfbec2da50d1d1b82b1a..fb8ada0f5f9954b9fc8455094aeb9ec641a165c2 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -31,7 +31,14 @@ mod tests; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::GetDispatchInfo, - traits::{Currency, OnUnbalanced, ReservableCurrency}, + traits::{ + fungible::{ + hold::Balanced as FnBalanced, Inspect as FnInspect, Mutate as FnMutate, + MutateHold as FnMutateHold, + }, + tokens::fungible::Credit, + OnUnbalanced, + }, }; use sp_runtime::traits::{BlakeTwo256, Dispatchable, Hash, One, Saturating, Zero}; use sp_std::{prelude::*, result}; @@ -42,10 +49,8 @@ use sp_transaction_storage_proof::{ /// A type alias for the balance type from this pallet's point of view. type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency< - ::AccountId, ->>::NegativeImbalance; + <::Currency as FnInspect<::AccountId>>::Balance; +pub type CreditOf = Credit<::AccountId, ::Currency>; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -89,6 +94,13 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + /// A reason for this pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as deposit for the used storage. + StorageFeeHold, + } + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -98,10 +110,14 @@ pub mod pallet { + Dispatchable + GetDispatchInfo + From>; - /// The currency trait. - type Currency: ReservableCurrency; + /// The fungible type for this pallet. + type Currency: FnMutate + + FnMutateHold + + FnBalanced; + /// The overarching runtime hold reason. + type RuntimeHoldReason: From; /// Handler for the unbalanced decrease when fees are burned. - type FeeDestination: OnUnbalanced>; + type FeeDestination: OnUnbalanced>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// Maximum number of indexed transactions in the block. @@ -112,8 +128,6 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Insufficient account balance. - InsufficientFunds, /// Invalid configuration. NotConfigured, /// Renewed extrinsic is not found. @@ -432,8 +446,10 @@ pub mod pallet { let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; let fee = byte_fee.saturating_mul(size.into()).saturating_add(entry_fee); - ensure!(T::Currency::can_slash(&sender, fee), Error::::InsufficientFunds); - let (credit, _) = T::Currency::slash(&sender, fee); + T::Currency::hold(&HoldReason::StorageFeeHold.into(), &sender, fee)?; + let (credit, _remainder) = + T::Currency::slash(&HoldReason::StorageFeeHold.into(), &sender, fee); + debug_assert!(_remainder.is_zero()); T::FeeDestination::on_unbalanced(credit); Ok(()) } diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index 243e26b559053d54a84b8f29010ccd39de1e6781..a8da19a382df155e60aad10dfd63c71c48577a6f 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -21,12 +21,11 @@ use crate::{ self as pallet_transaction_storage, TransactionStorageProof, DEFAULT_MAX_BLOCK_TRANSACTIONS, DEFAULT_MAX_TRANSACTION_SIZE, }; -use frame_support::traits::{ConstU16, ConstU32, ConstU64, OnFinalize, OnInitialize}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; pub type Block = frame_system::mocking::MockBlock; @@ -37,57 +36,35 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, TransactionStorage: pallet_transaction_storage::{ - Pallet, Call, Storage, Config, Inherent, Event + Pallet, Call, Storage, Config, Inherent, Event, HoldReason }, } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type DbWeight = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + type AccountId = u64; + type BlockHashCount = ConstU64<250>; + type Lookup = IdentityLookup; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Test { type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxHolds = ConstU32<128>; } impl pallet_transaction_storage::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; type FeeDestination = (); type WeightInfo = (); type MaxBlockTransactions = ConstU32<{ DEFAULT_MAX_BLOCK_TRANSACTIONS }>; diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 43dfed81f88bb94bae0e936466523f556464f0cb..e17b3ca3bebd596f1d7071c45c986820169c4f94 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -21,6 +21,7 @@ use super::{Pallet as TransactionStorage, *}; use crate::mock::*; use frame_support::{assert_noop, assert_ok}; use frame_system::RawOrigin; +use sp_runtime::{DispatchError, TokenError::FundsUnavailable}; use sp_transaction_storage_proof::registration::build_proof; const MAX_DATA_SIZE: u32 = DEFAULT_MAX_TRANSACTION_SIZE; @@ -71,7 +72,7 @@ fn burns_fee() { RawOrigin::Signed(5).into(), vec![0u8; 2000 as usize] ), - Error::::InsufficientFunds, + DispatchError::Token(FundsUnavailable), ); assert_ok!(TransactionStorage::::store( RawOrigin::Signed(caller).into(), diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index f7f7a6ae89c560cf3c1c89b8255b02b0e09f3196..6fb23380f82107d2aa9e08ed8299006130ef14a1 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -19,7 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } docify = "0.2.0" impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index f5f73ea8ddabd08a20d0ec6f3f37f7c170f47b8b..61fe29dafcae53b9b572243ca6b28da1c5a46bf0 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -336,5 +336,9 @@ mod benchmarks { Ok(()) } - impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!( + Treasury, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test + ); } diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index b2b3a8801c156254df68dce95d8283e6ef432d0b..5e429d3914bd0c60ea24eb53825c4da4b6a07fe5 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -89,7 +89,8 @@ use sp_runtime::{ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use frame_support::{ - print, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, + ensure, print, traits::{ tokens::Pay, Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced, ReservableCurrency, WithdrawReasons, @@ -456,6 +457,14 @@ pub mod pallet { Weight::zero() } } + + #[cfg(feature = "try-runtime")] + fn try_state( + _: frame_system::pallet_prelude::BlockNumberFor, + ) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state()?; + Ok(()) + } } #[derive(Default)] @@ -1020,6 +1029,85 @@ impl, I: 'static> Pallet { // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } + + /// Ensure the correctness of the state of this pallet. + #[cfg(any(feature = "try-runtime", test))] + fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_proposals()?; + Self::try_state_spends()?; + + Ok(()) + } + + /// ### Invariants of proposal storage items + /// + /// 1. [`ProposalCount`] >= Number of elements in [`Proposals`]. + /// 2. Each entry in [`Proposals`] should be saved under a key stricly less than current + /// [`ProposalCount`]. + /// 3. Each [`ProposalIndex`] contained in [`Approvals`] should exist in [`Proposals`]. + /// Note, that this automatically implies [`Approvals`].count() <= [`Proposals`].count(). + #[cfg(any(feature = "try-runtime", test))] + fn try_state_proposals() -> Result<(), sp_runtime::TryRuntimeError> { + let current_proposal_count = ProposalCount::::get(); + ensure!( + current_proposal_count as usize >= Proposals::::iter().count(), + "Actual number of proposals exceeds `ProposalCount`." + ); + + Proposals::::iter_keys().try_for_each(|proposal_index| -> DispatchResult { + ensure!( + current_proposal_count as u32 > proposal_index, + "`ProposalCount` should by strictly greater than any ProposalIndex used as a key for `Proposals`." + ); + Ok(()) + })?; + + Approvals::::get() + .iter() + .try_for_each(|proposal_index| -> DispatchResult { + ensure!( + Proposals::::contains_key(proposal_index), + "Proposal indices in `Approvals` must also be contained in `Proposals`." + ); + Ok(()) + })?; + + Ok(()) + } + + /// ## Invariants of spend storage items + /// + /// 1. [`SpendCount`] >= Number of elements in [`Spends`]. + /// 2. Each entry in [`Spends`] should be saved under a key stricly less than current + /// [`SpendCount`]. + /// 3. For each spend entry contained in [`Spends`] we should have spend.expire_at + /// > spend.valid_from. + #[cfg(any(feature = "try-runtime", test))] + fn try_state_spends() -> Result<(), sp_runtime::TryRuntimeError> { + let current_spend_count = SpendCount::::get(); + ensure!( + current_spend_count as usize >= Spends::::iter().count(), + "Actual number of spends exceeds `SpendCount`." + ); + + Spends::::iter_keys().try_for_each(|spend_index| -> DispatchResult { + ensure!( + current_spend_count > spend_index, + "`SpendCount` should by strictly greater than any SpendIndex used as a key for `Spends`." + ); + Ok(()) + })?; + + Spends::::iter().try_for_each(|(_index, spend)| -> DispatchResult { + ensure!( + spend.valid_from < spend.expire_at, + "Spend cannot expire before it becomes valid." + ); + Ok(()) + })?; + + Ok(()) + } } impl, I: 'static> OnUnbalanced> for Pallet { diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 4bb00547d9f28c26825bc5d23b8f06a0f530f360..522ecf6b18fc384d8014ead260a374c7935131a3 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -92,6 +92,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -217,16 +218,28 @@ impl Config for Test { type BenchmarkHelper = (); } -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { - // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], +pub struct ExtBuilder {} + +impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } +} + +impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + } + .assimilate_storage(&mut t) + .unwrap(); + crate::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } - .assimilate_storage(&mut t) - .unwrap(); - crate::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - t.into() } fn get_payment_id(i: SpendIndex) -> Option { @@ -239,7 +252,7 @@ fn get_payment_id(i: SpendIndex) -> Option { #[test] fn genesis_config_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_eq!(Treasury::pot(), 0); assert_eq!(Treasury::proposal_count(), 0); }); @@ -247,7 +260,7 @@ fn genesis_config_works() { #[test] fn spend_local_origin_permissioning_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_noop!(Treasury::spend_local(RuntimeOrigin::signed(1), 1, 1), BadOrigin); assert_noop!( Treasury::spend_local(RuntimeOrigin::signed(10), 6, 1), @@ -271,7 +284,7 @@ fn spend_local_origin_permissioning_works() { #[docify::export] #[test] fn spend_local_origin_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); // approve spend of some amount to beneficiary `6`. @@ -295,7 +308,7 @@ fn spend_local_origin_works() { #[test] fn minting_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -304,7 +317,7 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_ok!({ #[allow(deprecated)] Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) @@ -316,7 +329,7 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_ok!({ #[allow(deprecated)] Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) @@ -328,7 +341,7 @@ fn spend_proposal_takes_proportional_deposit() { #[test] fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_noop!( { #[allow(deprecated)] @@ -341,7 +354,7 @@ fn spend_proposal_fails_when_proposer_poor() { #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ @@ -361,7 +374,7 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { #[test] fn unused_pot_should_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { let init_total_issuance = Balances::total_issuance(); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::total_issuance(), init_total_issuance + 100); @@ -374,7 +387,7 @@ fn unused_pot_should_diminish() { #[test] fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ @@ -394,7 +407,7 @@ fn rejected_spend_proposal_ignored_on_spend_period() { #[test] fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ @@ -417,7 +430,7 @@ fn reject_already_rejected_spend_proposal_fails() { #[test] fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_noop!( { #[allow(deprecated)] @@ -430,7 +443,7 @@ fn reject_non_existent_spend_proposal_fails() { #[test] fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_noop!( { #[allow(deprecated)] @@ -443,7 +456,7 @@ fn accept_non_existent_spend_proposal_fails() { #[test] fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ @@ -466,7 +479,7 @@ fn accept_already_rejected_spend_proposal_fails() { #[test] fn accepted_spend_proposal_enacted_on_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -487,7 +500,7 @@ fn accepted_spend_proposal_enacted_on_spend_period() { #[test] fn pot_underflow_should_not_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -514,7 +527,7 @@ fn pot_underflow_should_not_diminish() { // i.e. pot should not include existential deposit needed for account survival. #[test] fn treasury_account_doesnt_get_deleted() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); @@ -613,7 +626,7 @@ fn genesis_funding_works() { #[test] fn max_approvals_limited() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), u64::MAX); Balances::make_free_balance_be(&0, u64::MAX); @@ -645,7 +658,7 @@ fn max_approvals_limited() { #[test] fn remove_already_removed_approval_fails() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ @@ -669,7 +682,7 @@ fn remove_already_removed_approval_fails() { #[test] fn spending_local_in_batch_respects_max_total() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { // Respect the `max_total` for the given origin. assert_ok!(RuntimeCall::from(UtilityCall::batch_all { calls: vec![ @@ -694,7 +707,7 @@ fn spending_local_in_batch_respects_max_total() { #[test] fn spending_in_batch_respects_max_total() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { // Respect the `max_total` for the given origin. assert_ok!(RuntimeCall::from(UtilityCall::batch_all { calls: vec![ @@ -739,7 +752,7 @@ fn spending_in_batch_respects_max_total() { #[test] fn spend_origin_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 1, Box::new(6), None)); assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None)); assert_noop!( @@ -764,7 +777,7 @@ fn spend_origin_works() { #[test] fn spend_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { System::set_block_number(1); assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None)); @@ -796,7 +809,7 @@ fn spend_works() { #[test] fn spend_expires() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_eq!(::PayoutPeriod::get(), 5); // spend `0` expires in 5 blocks after the creating. @@ -816,7 +829,7 @@ fn spend_expires() { #[docify::export] #[test] fn spend_payout_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { System::set_block_number(1); // approve a `2` coins spend of asset `1` to beneficiary `6`, the spend valid from now. assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None)); @@ -838,7 +851,7 @@ fn spend_payout_works() { #[test] fn payout_retry_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { System::set_block_number(1); assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None)); assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0)); @@ -863,7 +876,7 @@ fn payout_retry_works() { #[test] fn spend_valid_from_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_eq!(::PayoutPeriod::get(), 5); System::set_block_number(1); @@ -895,7 +908,7 @@ fn spend_valid_from_works() { #[test] fn void_spend_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { System::set_block_number(1); // spend cannot be voided if already attempted. assert_ok!(Treasury::spend( @@ -926,7 +939,7 @@ fn void_spend_works() { #[test] fn check_status_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { assert_eq!(::PayoutPeriod::get(), 5); System::set_block_number(1); @@ -984,3 +997,167 @@ fn check_status_works() { System::assert_last_event(Event::::SpendProcessed { index: 4 }.into()); }); } + +#[test] +fn try_state_proposals_invariant_1_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Add a proposal using `propose_spend` + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); + assert_eq!(Proposals::::iter().count(), 1); + assert_eq!(ProposalCount::::get(), 1); + // Check invariant 1 holds + assert!(ProposalCount::::get() as usize >= Proposals::::iter().count()); + // Break invariant 1 by decreasing `ProposalCount` + ProposalCount::::put(0); + // Invariant 1 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("Actual number of proposals exceeds `ProposalCount`.")) + ); + }); +} + +#[test] +fn try_state_proposals_invariant_2_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Add a proposal using `propose_spend` + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); + assert_eq!(Proposals::::iter().count(), 1); + let current_proposal_count = ProposalCount::::get(); + assert_eq!(current_proposal_count, 1); + // Check invariant 2 holds + assert!( + Proposals::::iter_keys() + .all(|proposal_index| { + proposal_index < current_proposal_count + }) + ); + // Break invariant 2 by inserting the proposal under key = 1 + let proposal = Proposals::::take(0).unwrap(); + Proposals::::insert(1, proposal); + // Invariant 2 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("`ProposalCount` should by strictly greater than any ProposalIndex used as a key for `Proposals`.")) + ); + }); +} + +#[test] +fn try_state_proposals_invariant_3_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Add a proposal using `propose_spend` + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 10, 3) + }); + assert_eq!(Proposals::::iter().count(), 1); + // Approve the proposal + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); + assert_eq!(Approvals::::get().len(), 1); + // Check invariant 3 holds + assert!(Approvals::::get() + .iter() + .all(|proposal_index| { Proposals::::contains_key(proposal_index) })); + // Break invariant 3 by adding another key to `Approvals` + let mut approvals_modified = Approvals::::get(); + approvals_modified.try_push(2).unwrap(); + Approvals::::put(approvals_modified); + // Invariant 3 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("Proposal indices in `Approvals` must also be contained in `Proposals`.")) + ); + }); +} + +#[test] +fn try_state_spends_invariant_1_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Propose and approve a spend + assert_ok!({ + Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 1, Box::new(6), None) + }); + assert_eq!(Spends::::iter().count(), 1); + assert_eq!(SpendCount::::get(), 1); + // Check invariant 1 holds + assert!(SpendCount::::get() as usize >= Spends::::iter().count()); + // Break invariant 1 by decreasing `SpendCount` + SpendCount::::put(0); + // Invariant 1 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("Actual number of spends exceeds `SpendCount`.")) + ); + }); +} + +#[test] +fn try_state_spends_invariant_2_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Propose and approve a spend + assert_ok!({ + Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 1, Box::new(6), None) + }); + assert_eq!(Spends::::iter().count(), 1); + let current_spend_count = SpendCount::::get(); + assert_eq!(current_spend_count, 1); + // Check invariant 2 holds + assert!( + Spends::::iter_keys() + .all(|spend_index| { + spend_index < current_spend_count + }) + ); + // Break invariant 2 by inserting the spend under key = 1 + let spend = Spends::::take(0).unwrap(); + Spends::::insert(1, spend); + // Invariant 2 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("`SpendCount` should by strictly greater than any SpendIndex used as a key for `Spends`.")) + ); + }); +} + +#[test] +fn try_state_spends_invariant_3_works() { + ExtBuilder::default().build().execute_with(|| { + use frame_support::pallet_prelude::DispatchError::Other; + // Propose and approve a spend + assert_ok!({ + Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 1, Box::new(6), None) + }); + assert_eq!(Spends::::iter().count(), 1); + let current_spend_count = SpendCount::::get(); + assert_eq!(current_spend_count, 1); + // Check invariant 3 holds + assert!(Spends::::iter_values() + .all(|SpendStatus { valid_from, expire_at, .. }| { valid_from < expire_at })); + // Break invariant 3 by reversing spend.expire_at and spend.valid_from + let spend = Spends::::take(0).unwrap(); + Spends::::insert( + 0, + SpendStatus { valid_from: spend.expire_at, expire_at: spend.valid_from, ..spend }, + ); + // Invariant 3 should be violated + assert_eq!( + Treasury::do_try_state(), + Err(Other("Spend cannot expire before it becomes valid.")) + ); + }); +} diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 6d96cb8abe79924b6b23c2f509bc63f08a6bb631..9af424f541cd740a950e421452e6c9ff6bfe1ff4 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} pallet-balances = { path = "../balances", default-features = false, optional = true } diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 60c5fc1eced5c3b7e71eaac3a606d763ea57b175..66218c8c015cb83ea90d281723d9f740edf24e95 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -78,6 +78,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = ConstU32<0>; type MaxFreezes = ConstU32<0>; } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index b0c063a83e752511c8210d652bf6c8768a6132f2..4c1bcca573dc1281d9958c330069cb8ddc62d24e 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 5c44a7ed7a5390cb54f4793c935c5a31862f8859..1f62c3c4e93bf1d7b9ab5f51315d1536e34ca041 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -80,6 +80,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 1f803b6ca5bb779fe8835a50ba17b6fbeb3be090..8f7a368709b612376dfc807ae5fb0405cfafc776 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index af212a31eb9713a651ac9d40c44c582ffc3c1a8e..7f963e3637d6fcb606591c46e9511280ee776879 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -479,13 +479,15 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Root_. #[pallet::call_index(5)] - #[pallet::weight((*_weight, call.get_dispatch_info().class))] + #[pallet::weight((*weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor, call: Box<::RuntimeCall>, - _weight: Weight, + weight: Weight, ) -> DispatchResult { ensure_root(origin)?; + let _ = weight; // Explicitly don't check the the weight witness. + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 183853c4e8ac68d3f36767edfa9bc860025b4ce6..01e3f5264bff0087a69d313188233e261c941ce5 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -132,7 +132,7 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - RootTesting: pallet_root_testing::{Pallet, Call, Storage}, + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event}, Council: pallet_collective::, Utility: utility::{Pallet, Call, Event}, Example: example::{Pallet, Call}, @@ -183,10 +183,13 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } -impl pallet_root_testing::Config for Test {} +impl pallet_root_testing::Config for Test { + type RuntimeEvent = RuntimeEvent; +} impl pallet_timestamp::Config for Test { type Moment = u64; diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 18e3a4aeaa1745bb37c7f06a4541dc33a9a8d4e5..ed13a15bc977c0c99a30b4966c94d96588cd67d0 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 4af48f5d368dbfc75643fa1192e4e51b23f33c18..34aa04607add1a9afad621f4c8fd89dc18876436 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -383,6 +383,33 @@ benchmarks! { ); } +force_remove_vesting_schedule { + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 2 .. T::MAX_VESTING_SCHEDULES; + + let source: T::AccountId = account("source", 0, SEED); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + + let target: T::AccountId = account("target", 0, SEED); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + + // Give target existing locks. + add_locks::(&target, l as u8); + let _ = add_vesting_schedules::(target_lookup.clone(), s)?; + + // The last vesting schedule. + let schedule_index = s - 1; + }: _(RawOrigin::Root, target_lookup, schedule_index) + verify { + assert_eq!( + Vesting::::vesting(&target).unwrap().len(), + schedule_index as usize, + "Schedule count should reduce by 1" + ); + } + impl_benchmark_test_suite!( Vesting, crate::mock::ExtBuilder::default().existential_deposit(256).build(), diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index ee67a038e4d7bbbf89ecf11e7d9c91837417aa31..dbad7926a30f56a3ec3d565320ea9ea4b1c77561 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -427,6 +427,36 @@ pub mod pallet { Ok(()) } + + /// Force remove a vesting schedule + /// + /// The dispatch origin for this call must be _Root_. + /// + /// - `target`: An account that has a vesting schedule + /// - `schedule_index`: The vesting schedule index that should be removed + #[pallet::call_index(5)] + #[pallet::weight( + T::WeightInfo::force_remove_vesting_schedule(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + )] + pub fn force_remove_vesting_schedule( + origin: OriginFor, + target: ::Source, + schedule_index: u32, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let who = T::Lookup::lookup(target)?; + + let schedules_count = Vesting::::decode_len(&who).unwrap_or_default(); + ensure!(schedule_index < schedules_count as u32, Error::::InvalidScheduleParams); + + Self::remove_vesting_schedule(&who, schedule_index)?; + + Ok(Some(T::WeightInfo::force_remove_vesting_schedule( + MaxLocksOf::::get(), + schedules_count as u32, + )) + .into()) + } } } diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index fe1779475a69ae850fc4335309f6fadb4cbe5d7e..13d6d5ba57a6fbb9379d21fd27dcddd341b27960 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -78,6 +78,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index c35686bd5146c3c951babbe9f044d53566f9766e..2e1e41fc9578fbb8e7e0b990f0b91c9c13a86402 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -1155,3 +1155,39 @@ fn vested_transfer_less_than_existential_deposit_fails() { ); }); } + +#[test] +fn remove_vesting_schedule() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(Balances::free_balance(&3), 256 * 30); + assert_eq!(Balances::free_balance(&4), 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo::new( + ED * 5, + (ED * 5) / 20, // Vesting over 20 blocks + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + // Verify only root can call. + assert_noop!(Vesting::force_remove_vesting_schedule(Some(4).into(), 4, 0), BadOrigin); + // Verify that root can remove the schedule. + assert_ok!(Vesting::force_remove_vesting_schedule(RawOrigin::Root.into(), 4, 0)); + // Verify that last event is VestingCompleted. + System::assert_last_event(Event::VestingCompleted { account: 4 }.into()); + // Appropriate storage is cleaned up. + assert!(!>::contains_key(4)); + // Check the vesting balance is zero. + assert_eq!(Vesting::vesting(&4), None); + // Verifies that trying to remove a schedule when it doesnt exist throws error. + assert_noop!( + Vesting::force_remove_vesting_schedule(RawOrigin::Root.into(), 4, 0), + Error::::InvalidScheduleParams + ); + }); +} diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index 17bde888577795667767c8cd8c2dc544d238a578..ddc7db8fa61fbedef4c2629117459dc37c5f8bcd 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_vesting +//! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_vesting -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/vesting/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_vesting +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/vesting/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +47,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_vesting. +/// Weight functions needed for `pallet_vesting`. pub trait WeightInfo { fn vest_locked(l: u32, s: u32, ) -> Weight; fn vest_unlocked(l: u32, s: u32, ) -> Weight; @@ -60,372 +57,419 @@ pub trait WeightInfo { fn force_vested_transfer(l: u32, s: u32, ) -> Weight; fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight; } -/// Weights for pallet_vesting using the Substrate node and recommended hardware. +/// Weights for `pallet_vesting` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_336_000 picoseconds. - Weight::from_parts(34_290_169, 4764) - // Standard Error: 1_381 - .saturating_add(Weight::from_parts(76_354, 0).saturating_mul(l.into())) - // Standard Error: 2_457 - .saturating_add(Weight::from_parts(81_362, 0).saturating_mul(s.into())) + // Minimum execution time: 32_846_000 picoseconds. + Weight::from_parts(30_974_459, 4764) + // Standard Error: 1_755 + .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) + // Standard Error: 3_123 + .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_540_000 picoseconds. - Weight::from_parts(38_893_820, 4764) - // Standard Error: 1_710 - .saturating_add(Weight::from_parts(62_106, 0).saturating_mul(l.into())) - // Standard Error: 3_043 - .saturating_add(Weight::from_parts(41_966, 0).saturating_mul(s.into())) + // Minimum execution time: 34_360_000 picoseconds. + Weight::from_parts(34_964_080, 4764) + // Standard Error: 1_996 + .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) + // Standard Error: 3_552 + .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 37_529_000 picoseconds. - Weight::from_parts(36_781_151, 4764) - // Standard Error: 1_490 - .saturating_add(Weight::from_parts(76_322, 0).saturating_mul(l.into())) - // Standard Error: 2_652 - .saturating_add(Weight::from_parts(76_914, 0).saturating_mul(s.into())) + // Minimum execution time: 33_859_000 picoseconds. + Weight::from_parts(32_950_681, 4764) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) + // Standard Error: 3_105 + .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_217_000 picoseconds. - Weight::from_parts(40_942_515, 4764) - // Standard Error: 2_098 - .saturating_add(Weight::from_parts(65_213, 0).saturating_mul(l.into())) - // Standard Error: 3_733 - .saturating_add(Weight::from_parts(63_326, 0).saturating_mul(s.into())) + // Minimum execution time: 36_239_000 picoseconds. + Weight::from_parts(36_459_756, 4764) + // Standard Error: 2_051 + .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) + // Standard Error: 3_650 + .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 76_396_000 picoseconds. - Weight::from_parts(77_085_336, 4764) - // Standard Error: 2_795 - .saturating_add(Weight::from_parts(88_995, 0).saturating_mul(l.into())) - // Standard Error: 4_974 - .saturating_add(Weight::from_parts(135_384, 0).saturating_mul(s.into())) + // Minimum execution time: 70_330_000 picoseconds. + Weight::from_parts(71_196_328, 4764) + // Standard Error: 2_923 + .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) + // Standard Error: 5_201 + .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 77_312_000 picoseconds. - Weight::from_parts(79_600_900, 6196) - // Standard Error: 3_232 - .saturating_add(Weight::from_parts(78_018, 0).saturating_mul(l.into())) - // Standard Error: 5_750 - .saturating_add(Weight::from_parts(100_848, 0).saturating_mul(s.into())) + // Minimum execution time: 70_235_000 picoseconds. + Weight::from_parts(71_960_020, 6196) + // Standard Error: 2_493 + .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) + // Standard Error: 4_436 + .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_769_000 picoseconds. - Weight::from_parts(37_752_437, 4764) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(78_398, 0).saturating_mul(l.into())) - // Standard Error: 2_614 - .saturating_add(Weight::from_parts(78_922, 0).saturating_mul(s.into())) + // Minimum execution time: 34_352_000 picoseconds. + Weight::from_parts(33_697_027, 4764) + // Standard Error: 2_008 + .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) + // Standard Error: 3_710 + .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_021_000 picoseconds. - Weight::from_parts(42_182_858, 4764) - // Standard Error: 1_747 - .saturating_add(Weight::from_parts(83_938, 0).saturating_mul(l.into())) - // Standard Error: 3_227 - .saturating_add(Weight::from_parts(84_652, 0).saturating_mul(s.into())) + // Minimum execution time: 37_467_000 picoseconds. + Weight::from_parts(36_866_847, 4764) + // Standard Error: 1_692 + .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) + // Standard Error: 3_124 + .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 41_497_000 picoseconds. + Weight::from_parts(38_763_834, 4764) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) + // Standard Error: 3_750 + .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_336_000 picoseconds. - Weight::from_parts(34_290_169, 4764) - // Standard Error: 1_381 - .saturating_add(Weight::from_parts(76_354, 0).saturating_mul(l.into())) - // Standard Error: 2_457 - .saturating_add(Weight::from_parts(81_362, 0).saturating_mul(s.into())) + // Minimum execution time: 32_846_000 picoseconds. + Weight::from_parts(30_974_459, 4764) + // Standard Error: 1_755 + .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) + // Standard Error: 3_123 + .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_540_000 picoseconds. - Weight::from_parts(38_893_820, 4764) - // Standard Error: 1_710 - .saturating_add(Weight::from_parts(62_106, 0).saturating_mul(l.into())) - // Standard Error: 3_043 - .saturating_add(Weight::from_parts(41_966, 0).saturating_mul(s.into())) + // Minimum execution time: 34_360_000 picoseconds. + Weight::from_parts(34_964_080, 4764) + // Standard Error: 1_996 + .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) + // Standard Error: 3_552 + .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 37_529_000 picoseconds. - Weight::from_parts(36_781_151, 4764) - // Standard Error: 1_490 - .saturating_add(Weight::from_parts(76_322, 0).saturating_mul(l.into())) - // Standard Error: 2_652 - .saturating_add(Weight::from_parts(76_914, 0).saturating_mul(s.into())) + // Minimum execution time: 33_859_000 picoseconds. + Weight::from_parts(32_950_681, 4764) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) + // Standard Error: 3_105 + .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_217_000 picoseconds. - Weight::from_parts(40_942_515, 4764) - // Standard Error: 2_098 - .saturating_add(Weight::from_parts(65_213, 0).saturating_mul(l.into())) - // Standard Error: 3_733 - .saturating_add(Weight::from_parts(63_326, 0).saturating_mul(s.into())) + // Minimum execution time: 36_239_000 picoseconds. + Weight::from_parts(36_459_756, 4764) + // Standard Error: 2_051 + .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) + // Standard Error: 3_650 + .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 76_396_000 picoseconds. - Weight::from_parts(77_085_336, 4764) - // Standard Error: 2_795 - .saturating_add(Weight::from_parts(88_995, 0).saturating_mul(l.into())) - // Standard Error: 4_974 - .saturating_add(Weight::from_parts(135_384, 0).saturating_mul(s.into())) + // Minimum execution time: 70_330_000 picoseconds. + Weight::from_parts(71_196_328, 4764) + // Standard Error: 2_923 + .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) + // Standard Error: 5_201 + .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 77_312_000 picoseconds. - Weight::from_parts(79_600_900, 6196) - // Standard Error: 3_232 - .saturating_add(Weight::from_parts(78_018, 0).saturating_mul(l.into())) - // Standard Error: 5_750 - .saturating_add(Weight::from_parts(100_848, 0).saturating_mul(s.into())) + // Minimum execution time: 70_235_000 picoseconds. + Weight::from_parts(71_960_020, 6196) + // Standard Error: 2_493 + .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) + // Standard Error: 4_436 + .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_769_000 picoseconds. - Weight::from_parts(37_752_437, 4764) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(78_398, 0).saturating_mul(l.into())) - // Standard Error: 2_614 - .saturating_add(Weight::from_parts(78_922, 0).saturating_mul(s.into())) + // Minimum execution time: 34_352_000 picoseconds. + Weight::from_parts(33_697_027, 4764) + // Standard Error: 2_008 + .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) + // Standard Error: 3_710 + .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_021_000 picoseconds. - Weight::from_parts(42_182_858, 4764) - // Standard Error: 1_747 - .saturating_add(Weight::from_parts(83_938, 0).saturating_mul(l.into())) - // Standard Error: 3_227 - .saturating_add(Weight::from_parts(84_652, 0).saturating_mul(s.into())) + // Minimum execution time: 37_467_000 picoseconds. + Weight::from_parts(36_866_847, 4764) + // Standard Error: 1_692 + .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) + // Standard Error: 3_124 + .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 41_497_000 picoseconds. + Weight::from_parts(38_763_834, 4764) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) + // Standard Error: 3_750 + .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index ec78b03c08bde398b43dc0fc2e63f03b273372eb..c52466153205a73d4e1b69fc413c2e389f5faea0 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index e6ddbf02bb2bf882624806df823cb49879c57314..4e70a503c28065b0cebfae714a7c759a5da9233d 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -83,6 +83,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 95b5dde371394952a202575fdc61153f1c229640..c3a68af097b0a46ecf6225306f76cfc9de0c06d5 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -24,7 +24,7 @@ sp-state-machine = { path = "../state-machine", default-features = false, option sp-trie = { path = "../trie", default-features = false, optional = true} hash-db = { version = "0.16.0", optional = true } thiserror = { version = "1.0.48", optional = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true} log = { version = "0.4.17", default-features = false } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 25c87b5d0a4dfa660786799dd4fd9beca9f3c1bf..d60b9f1bb4ea6c49fa167d77f33f88d3bef340ca 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -30,6 +30,6 @@ assert_matches = "1.3.0" [features] # Required for the doc tests default = [ "std" ] -std = [] +std = [ "blake2/std" ] no-metadata-docs = [] frame-metadata = [] diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 74cfa0980623b0dd14c029c520093bd775f4da4d..e439a796e28d451ac47f7ed9858591b02dd50d7f 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -38,7 +38,7 @@ use syn::{ Attribute, Ident, ImplItem, ItemImpl, LitInt, LitStr, Path, Signature, Type, TypePath, }; -use std::collections::HashSet; +use std::collections::HashMap; /// The structure used for parsing the runtime api implementations. struct RuntimeApiImpls { @@ -726,7 +726,7 @@ fn populate_runtime_api_versions( fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let mut result = Vec::::with_capacity(impls.len()); let mut sections = Vec::::with_capacity(impls.len()); - let mut processed_traits = HashSet::new(); + let mut processed_traits = HashMap::new(); let c = generate_crate_access(); @@ -746,13 +746,17 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { .ident; let span = trait_.span(); - if !processed_traits.insert(trait_) { - return Err(Error::new( + if let Some(other_span) = processed_traits.insert(trait_, span) { + let mut error = Error::new( span, "Two traits with the same name detected! \ The trait name is used to generate its ID. \ Please rename one trait at the declaration!", - )) + ); + + error.combine(Error::new(other_span, "First trait implementation.")); + + return Err(error) } let id: Path = parse_quote!( #path ID ); diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index c9389154bbf40ff780f97e5d096dcaee7f1c1ce2..e261b162b5aa1c96252232b7c079415623bfa43b 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -15,21 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::common::API_VERSION_ATTRIBUTE; +use inflector::Inflector; use proc_macro2::{Span, TokenStream}; - +use proc_macro_crate::{crate_name, FoundCrate}; +use quote::{format_ident, quote, ToTokens}; use syn::{ parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::{format_ident, quote}; - -use proc_macro_crate::{crate_name, FoundCrate}; - -use crate::common::API_VERSION_ATTRIBUTE; - -use inflector::Inflector; - /// Generates the access to the `sc_client` crate. pub fn generate_crate_access() -> TokenStream { match crate_name("sp-api") { @@ -38,10 +33,15 @@ pub fn generate_crate_access() -> TokenStream { let renamed_name = Ident::new(&renamed_name, Span::call_site()); quote!(#renamed_name) }, - Err(e) => { - let err = Error::new(Span::call_site(), e).to_compile_error(); - quote!( #err ) - }, + Err(e) => + if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + let path = format!("{}::deps::{}", name, "sp_api"); + let path = syn::parse_str::(&path).expect("is a valid path; qed"); + quote!( #path ) + } else { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, } } @@ -261,8 +261,6 @@ pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { /// Extract the documentation from the provided attributes. #[cfg(feature = "frame-metadata")] pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { - use quote::ToTokens; - attrs .iter() .filter_map(|attr| { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c3f80acf09ae59a436e79ccb988990a5131ed651..c122a2348b8e2daf36f79eff54f7aaa216ab0f83 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -665,6 +665,34 @@ pub trait CallApiAt { ) -> Result<(), ApiError>; } +#[cfg(feature = "std")] +impl> CallApiAt for std::sync::Arc { + type StateBackend = T::StateBackend; + + fn call_api_at(&self, params: CallApiAtParams) -> Result, ApiError> { + (**self).call_api_at(params) + } + + fn runtime_version_at( + &self, + at_hash: ::Hash, + ) -> Result { + (**self).runtime_version_at(at_hash) + } + + fn state_at(&self, at: ::Hash) -> Result { + (**self).state_at(at) + } + + fn initialize_extensions( + &self, + at: ::Hash, + extensions: &mut Extensions, + ) -> Result<(), ApiError> { + (**self).initialize_extensions(at, extensions) + } +} + /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. #[cfg(feature = "std")] pub struct ApiRef<'a, T>(T, std::marker::PhantomData<&'a ()>); diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 0cc3ce7969c224339f60854b6a2b1514f923796f..f207f5ff02dd0127a2cf072af4305ff0f2f8516d 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.74" rustversion = "1.0.6" -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 353be73dcccdadec537b189e3aadc9d7b913c37b..e66be7f9bf1a4b8ecb6619fe74035278e54ba7dc 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -17,6 +17,7 @@ use std::panic::UnwindSafe; +use sc_block_builder::BlockBuilderBuilder; use sp_api::{ApiExt, Core, ProvideRuntimeApi}; use sp_runtime::{ traits::{HashingFor, Header as HeaderT}, @@ -31,7 +32,6 @@ use substrate_test_runtime_client::{ }; use codec::Encode; -use sc_block_builder::BlockBuilderProvider; use sp_consensus::SelectChain; use substrate_test_runtime_client::sc_executor::WasmExecutor; @@ -105,9 +105,12 @@ fn record_proof_works() { .into_unchecked_extrinsic(); // Build the block and record proof - let mut builder = client - .new_block_at(client.chain_info().best_hash, Default::default(), true) - .expect("Creates block builder"); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .enable_proof_recording() + .build() + .unwrap(); builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); diff --git a/substrate/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr b/substrate/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr index 9e014e3ea821a192e6a6af64c45a23022c67a6f3..2197bbc99cfb1dcd4cd76c627a6280882c5517e2 100644 --- a/substrate/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr @@ -3,3 +3,9 @@ error: Two traits with the same name detected! The trait name is used to generat | 41 | impl second::Api for Runtime { | ^^^ + +error: First trait implementation. + --> tests/ui/impl_two_traits_with_same_name.rs:37:13 + | +37 | impl self::Api for Runtime { + | ^^^ diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 7c5e317307790728977a1f069388dc671cceba87..a4a1bc44a69b9128454a88e56c04e2f37a4ebc0e 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false} codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } sp-std = { path = "../std", default-features = false} sp-io = { path = "../io", default-features = false} diff --git a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs new file mode 100644 index 0000000000000000000000000000000000000000..70940587cedaf6312dbc75834d2180131ca157b4 --- /dev/null +++ b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ECDSA and BLS12-377 paired crypto applications. + +use crate::{KeyTypeId, RuntimePublic}; + +pub use sp_core::paired_crypto::ecdsa_bls377::*; + +mod app { + crate::app_crypto!(super, sp_core::testing::ECDSA_BLS377); +} + +#[cfg(feature = "full_crypto")] +pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; + +impl RuntimePublic for Public { + type Signature = Signature; + + /// Dummy implementation. Returns an empty vector. + fn all(_key_type: KeyTypeId) -> Vec { + Vec::new() + } + + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::ecdsa_bls377_generate(key_type, seed) + } + + /// Dummy implementation. Returns `None`. + fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { + None + } + + /// Dummy implementation. Returns `false`. + fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { + false + } + + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::ByteArray::to_raw_vec(self) + } +} diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 5384220bc9ca3f1ccf59144311b7ad999a91defa..686b486f335301e8750253cb2e306a0f5cb222d4 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -50,6 +50,8 @@ pub mod bls377; #[cfg(feature = "bls-experimental")] pub mod bls381; pub mod ecdsa; +#[cfg(feature = "bls-experimental")] +pub mod ecdsa_bls377; pub mod ed25519; pub mod sr25519; mod traits; diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 4c2a78aec6fbad473194dfe7acdd1e5b854a3363..249aebec68fdd0d93afe57c512cd5a82a5bf2aa6 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false} diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 024711bd94a89c733832ee3d69c2b9c5020c8192..e4f44e9da383e2121b419eec2bb94a0e4ea4f7fe 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false} sp-application-crypto = { path = "../application-crypto", default-features = false} sp-runtime = { path = "../runtime", default-features = false} diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 55c81bd71ec1a39fec71629d6b83906426d691e1..26f02bc3119924981fbab2fd90e0a8446579b6b9 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} sp-consensus-slots = { path = "../slots", default-features = false} diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 764e95501801b5448abae882a4c30e30f9d0312b..db8bb8cb1540688232493826fff4c554a437f6cf 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 6a12a5a7c7c522f9fb28740bc28544de32dc1c3d..5ff0a2ebc70fb6749da31a630148625ebffc8a57 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} @@ -42,6 +42,7 @@ std = [ "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", + "strum/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index c69e26bf574d877f2c5a2261709ade2cdcfaef46..e31c53237be24523da370725176265a978576835 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -74,10 +74,11 @@ pub trait BeefyAuthorityId: RuntimeAppPublic { /// Your code should use the above types as concrete types for all crypto related /// functionality. pub mod ecdsa_crypto { - use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE as BEEFY_KEY_TYPE}; + use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; use sp_application_crypto::{app_crypto, ecdsa}; use sp_core::crypto::Wraps; - app_crypto!(ecdsa, BEEFY_KEY_TYPE); + + app_crypto!(ecdsa, KEY_TYPE); /// Identity of a BEEFY authority using ECDSA as its crypto. pub type AuthorityId = Public; @@ -115,10 +116,11 @@ pub mod ecdsa_crypto { #[cfg(feature = "bls-experimental")] pub mod bls_crypto { - use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE as BEEFY_KEY_TYPE}; + use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; use sp_application_crypto::{app_crypto, bls377}; use sp_core::{bls377::Pair as BlsPair, crypto::Wraps, Pair as _}; - app_crypto!(bls377, BEEFY_KEY_TYPE); + + app_crypto!(bls377, KEY_TYPE); /// Identity of a BEEFY authority using BLS as its crypto. pub type AuthorityId = Public; @@ -131,7 +133,7 @@ pub mod bls_crypto { ::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes + // `w3f-bls` library uses IETF hashing standard and as such does not expose // a choice of hash to field function. // We are directly calling into the library to avoid introducing new host call. // and because BeefyAuthorityId::verify is being called in the runtime so we don't have @@ -140,6 +142,53 @@ pub mod bls_crypto { } } } + +/// BEEFY cryptographic types for (ECDSA,BLS) crypto pair +/// +/// This module basically introduces four crypto types: +/// - `ecdsa_bls_crypto::Pair` +/// - `ecdsa_bls_crypto::Public` +/// - `ecdsa_bls_crypto::Signature` +/// - `ecdsa_bls_crypto::AuthorityId` +/// +/// Your code should use the above types as concrete types for all crypto related +/// functionality. +#[cfg(feature = "bls-experimental")] +pub mod ecdsa_bls_crypto { + use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; + use sp_application_crypto::{app_crypto, ecdsa_bls377}; + use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair}; + + app_crypto!(ecdsa_bls377, KEY_TYPE); + + /// Identity of a BEEFY authority using (ECDSA,BLS) as its crypto. + pub type AuthorityId = Public; + + /// Signature for a BEEFY authority using (ECDSA,BLS) as its crypto. + pub type AuthoritySignature = Signature; + + impl BeefyAuthorityId for AuthorityId + where + H: Hash, + H::Output: Into<[u8; 32]>, + { + fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { + // We can not simply call + // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` + // because that invokes ECDSA default verification which perfoms Blake2b hash + // which we don't want. This is because ECDSA signatures are meant to be verified + // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. + // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) + // for comparison. + EcdsaBlsPair::verify_with_hasher::( + signature.as_inner_ref(), + msg, + self.as_inner_ref(), + ) + } + } +} + /// The `ConsensusEngineId` of BEEFY. pub const BEEFY_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"BEEF"; @@ -215,6 +264,7 @@ pub enum ConsensusLog { /// /// A vote message is a direct vote created by a BEEFY node on every voting round /// and is gossiped to its peers. +// TODO: Remove `Signature` generic type, instead get it from `Id::Signature`. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct VoteMessage { /// Commit to information extracted from a finalized block @@ -458,4 +508,24 @@ mod tests { let (other_pair, _) = bls_crypto::Pair::generate(); assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); } + + #[test] + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls_beefy_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = ecdsa_bls_crypto::Pair::generate(); + + let signature: ecdsa_bls_crypto::Signature = + pair.as_inner_ref().sign_with_hasher::(&msg).into(); + + // Verification works if same hashing function is used when signing and verifying. + assert!(BeefyAuthorityId::::verify(&pair.public(), &signature, msg)); + + // Verification doesn't work if we verify function provided by pair_crypto implementation + assert!(!ecdsa_bls_crypto::Pair::verify(&signature, msg, &pair.public())); + + // Other public key doesn't work + let (other_pair, _) = ecdsa_bls_crypto::Pair::generate(); + assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); + } } diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index bee9092b986816a0609bf5aa490ce08135c7e1fd..8757869995d061ef7d5d0f679bfab993a44a7912 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index baeaee4738e484d23059f4a27dc99093b60412fe..1cf5504c5e7d1b0f17e248e6be45829e6ef15655 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -19,13 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] -extern crate alloc; - #[cfg(feature = "serde")] use serde::Serialize; -use codec::{Codec, Decode, Encode, Input}; +use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::KeystorePtr; @@ -33,7 +30,7 @@ use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, ConsensusEngineId, RuntimeDebug, }; -use sp_std::{borrow::Cow, vec::Vec}; +use sp_std::vec::Vec; /// The log target to be used by client code. pub const CLIENT_LOG_TARGET: &str = "grandpa"; @@ -62,10 +59,6 @@ pub type AuthoritySignature = app::Signature; /// The `ConsensusEngineId` of GRANDPA. pub const GRANDPA_ENGINE_ID: ConsensusEngineId = *b"FRNK"; -/// The storage key for the current set of weighted Grandpa authorities. -/// The value stored is an encoded VersionedAuthorityList. -pub const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; - /// The weight of an authority. pub type AuthorityWeight = u64; @@ -464,60 +457,6 @@ where Some(grandpa::SignedMessage { message, signature, id: public }) } -/// WASM function call to check for pending changes. -pub const PENDING_CHANGE_CALL: &str = "grandpa_pending_change"; -/// WASM function call to get current GRANDPA authorities. -pub const AUTHORITIES_CALL: &str = "grandpa_authorities"; - -/// The current version of the stored AuthorityList type. The encoding version MUST be updated any -/// time the AuthorityList type changes. -const AUTHORITIES_VERSION: u8 = 1; - -/// An AuthorityList that is encoded with a version specifier. The encoding version is updated any -/// time the AuthorityList type changes. This ensures that encodings of different versions of an -/// AuthorityList are differentiable. Attempting to decode an authority list with an unknown -/// version will fail. -#[derive(Default)] -pub struct VersionedAuthorityList<'a>(Cow<'a, AuthorityList>); - -impl<'a> From for VersionedAuthorityList<'a> { - fn from(authorities: AuthorityList) -> Self { - VersionedAuthorityList(Cow::Owned(authorities)) - } -} - -impl<'a> From<&'a AuthorityList> for VersionedAuthorityList<'a> { - fn from(authorities: &'a AuthorityList) -> Self { - VersionedAuthorityList(Cow::Borrowed(authorities)) - } -} - -impl<'a> Into for VersionedAuthorityList<'a> { - fn into(self) -> AuthorityList { - self.0.into_owned() - } -} - -impl<'a> Encode for VersionedAuthorityList<'a> { - fn size_hint(&self) -> usize { - (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() - } - - fn using_encoded R>(&self, f: F) -> R { - (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) - } -} - -impl<'a> Decode for VersionedAuthorityList<'a> { - fn decode(value: &mut I) -> Result { - let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; - if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()) - } - Ok(authorities.into()) - } -} - /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index cb887fd40fe494936d318e4e57d3c2d8902adba5..696e0a64596163ad18775bc0714d2622a8753651 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -16,14 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.163", default-features = false, features = ["derive"], optional = true } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } -sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-core = { version = "21.0.0", default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } -sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "8.0.0", default-features = false, path = "../../std" } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } +sp-api = { default-features = false, path = "../../api" } +sp-application-crypto = { default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } +sp-consensus-slots = { default-features = false, path = "../slots" } +sp-core = { default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } +sp-runtime = { default-features = false, path = "../../runtime" } +sp-std = { default-features = false, path = "../../std" } [features] default = [ "std" ] diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index faf5a9ee956e47a13572d6b1b89c60745a96a6aa..aa899d86e72ca34f39f9bcd924d117deaeff57bc 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true } sp-std = { path = "../../std", default-features = false} sp-timestamp = { path = "../../timestamp", default-features = false} diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 9f1f0127d7cdcb365810c4c7351aa55fdafc2081..34485c72ab03c3a0d0a8b86ef6a9c72f7f2990fc 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -25,11 +25,9 @@ hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } -tiny-bip39 = { version = "1.0.0", optional = true } -regex = { version = "1.6.0", optional = true } +bip39 = { version = "2.0.0", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false} @@ -42,6 +40,7 @@ thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" +itertools = { version = "0.10.3", optional = true } # full crypto array-bytes = { version = "6.1", optional = true } @@ -50,18 +49,20 @@ blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.28.0", default-features = false, features = ["recovery", "alloc"], optional = true } sp-core-hashing = { path = "hashing", default-features = false, optional = true } sp-runtime-interface = { path = "../runtime-interface", default-features = false} # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true} # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "f4fe253", default-features = false, optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true } [dev-dependencies] criterion = "0.4.0" -serde_json = "1.0" +serde_json = "1.0.108" +lazy_static = "1.4.0" +regex = "1.6.0" sp-core-hashing-proc-macro = { path = "hashing/proc-macro" } [[bench]] @@ -75,7 +76,9 @@ bench = false default = [ "std" ] std = [ "array-bytes", - "bandersnatch_vrfs/getrandom", + "bandersnatch_vrfs?/std", + "bip39/rand", + "bip39/std", "blake2/std", "bounded-collections/std", "bs58/std", @@ -88,7 +91,7 @@ std = [ "hash-db/std", "hash256-std-hasher/std", "impl-serde/std", - "lazy_static", + "itertools", "libsecp256k1/std", "log/std", "merlin/std", @@ -98,7 +101,6 @@ std = [ "primitive-types/serde", "primitive-types/std", "rand", - "regex", "scale-info/std", "schnorrkel/std", "secp256k1/global-context", @@ -114,9 +116,10 @@ std = [ "ss58-registry/std", "substrate-bip39", "thiserror", - "tiny-bip39", "tracing", + "w3f-bls?/std", "zeroize/alloc", + "zeroize/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9a094b07d4a10ad8632ab656deb6e618e0f2d5e3 --- /dev/null +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sp-core-fuzz" +version = "0.0.0" +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +lazy_static = "1.4.0" +libfuzzer-sys = "0.4" +regex = "1.10.2" + +sp-core = { path = ".." } + +[[bin]] +name = "fuzz_address_uri" +path = "fuzz_targets/fuzz_address_uri.rs" +test = false +doc = false diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2d9e2fc8b0822ae9d984683cdaaf71a97bd7c2e --- /dev/null +++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +extern crate libfuzzer_sys; +extern crate regex; +extern crate sp_core; + +use libfuzzer_sys::fuzz_target; +use regex::Regex; +use sp_core::crypto::AddressUri; + +lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); +} + +fuzz_target!(|input: &str| { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if manual_result.is_err() { + let _ = format!("{}", manual_result.as_ref().err().unwrap()); + } + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!(regex_result.name("phrase").map(|p| p.as_str()), manual_result.phrase); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!(regex_result.name("password").map(|pass| pass.as_str()), manual_result.pass); + } +}); diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs new file mode 100644 index 0000000000000000000000000000000000000000..862747c9a4b69947905f384ab0be52e4b776c066 --- /dev/null +++ b/substrate/primitives/core/src/address_uri.rs @@ -0,0 +1,432 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Little util for parsing an address URI. Replaces regular expressions. + +#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +use sp_std::{ + alloc::string::{String, ToString}, + vec::Vec, +}; + +/// A container for results of parsing the address uri string. +/// +/// Intended to be equivalent of: +/// `Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")` +/// which also handles soft and hard derivation paths: +/// `Regex::new(r"/(/?[^/]+)")` +/// +/// Example: +/// ``` +/// use sp_core::crypto::AddressUri; +/// let manual_result = AddressUri::parse("hello world/s//h///pass"); +/// assert_eq!( +/// manual_result.unwrap(), +/// AddressUri { phrase: Some("hello world"), paths: vec!["s", "/h"], pass: Some("pass") } +/// ); +/// ``` +#[derive(Debug, PartialEq)] +pub struct AddressUri<'a> { + /// Phrase, hexadecimal string, or ss58-compatible string. + pub phrase: Option<&'a str>, + /// Key derivation paths, ordered as in input string, + pub paths: Vec<&'a str>, + /// Password. + pub pass: Option<&'a str>, +} + +/// Errors that are possible during parsing the address URI. +#[allow(missing_docs)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Error { + #[cfg_attr(feature = "std", error("Invalid character in phrase:\n{0}"))] + InvalidCharacterInPhrase(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Invalid character in password:\n{0}"))] + InvalidCharacterInPass(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in hard path:\n{0}"))] + MissingCharacterInHardPath(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in soft path:\n{0}"))] + MissingCharacterInSoftPath(InvalidCharacterInfo), +} + +impl Error { + /// Creates an instance of `Error::InvalidCharacterInPhrase` using given parameters. + pub fn in_phrase(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPhrase(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::InvalidCharacterInPass` using given parameters. + pub fn in_pass(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPass(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInHardPath` using given parameters. + pub fn in_hard_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInHardPath(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInSoftPath` using given parameters. + pub fn in_soft_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInSoftPath(InvalidCharacterInfo::new(input, pos)) + } +} + +/// Complementary error information. +/// +/// Strucutre contains complementary information about parsing address URI string. +/// String contains a copy of an original URI string, 0-based integer indicates position of invalid +/// character. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InvalidCharacterInfo(String, usize); + +impl InvalidCharacterInfo { + fn new(info: &str, pos: usize) -> Self { + Self(info.to_string(), pos) + } +} + +impl sp_std::fmt::Display for InvalidCharacterInfo { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let (s, pos) = escape_string(&self.0, self.1); + write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::()) + } +} + +/// Escapes the control characters in given string, and recomputes the position if some characters +/// were actually escaped. +fn escape_string(input: &str, pos: usize) -> (String, usize) { + let mut out = String::with_capacity(2 * input.len()); + let mut out_pos = 0; + input + .chars() + .enumerate() + .map(|(i, c)| { + let esc = |c| (i, Some('\\'), c, 2); + match c { + '\t' => esc('t'), + '\n' => esc('n'), + '\r' => esc('r'), + '\x07' => esc('a'), + '\x08' => esc('b'), + '\x0b' => esc('v'), + '\x0c' => esc('f'), + _ => (i, None, c, 1), + } + }) + .for_each(|(i, maybe_escape, c, increment)| { + maybe_escape.map(|e| out.push(e)); + out.push(c); + if i < pos { + out_pos += increment; + } + }); + (out, out_pos) +} + +fn extract_prefix<'a>(input: &mut &'a str, is_allowed: &dyn Fn(char) -> bool) -> Option<&'a str> { + let output = input.trim_start_matches(is_allowed); + let prefix_len = input.len() - output.len(); + let prefix = if prefix_len > 0 { Some(&input[..prefix_len]) } else { None }; + *input = output; + prefix +} + +fn strip_prefix(input: &mut &str, prefix: &str) -> bool { + if let Some(stripped_input) = input.strip_prefix(prefix) { + *input = stripped_input; + true + } else { + false + } +} + +impl<'a> AddressUri<'a> { + /// Parses the given string. + pub fn parse(mut input: &'a str) -> Result { + let initial_input = input; + let initial_input_len = input.len(); + let phrase = extract_prefix(&mut input, &|ch: char| { + ch.is_ascii_digit() || ch.is_ascii_alphabetic() || ch == ' ' + }); + + let mut pass = None; + let mut paths = Vec::new(); + while !input.is_empty() { + let unstripped_input = input; + if strip_prefix(&mut input, "///") { + pass = Some(extract_prefix(&mut input, &|ch: char| ch != '\n').unwrap_or("")); + } else if strip_prefix(&mut input, "//") { + let path = extract_prefix(&mut input, &|ch: char| ch != '/') + .ok_or(Error::in_hard_path(initial_input, initial_input_len - input.len()))?; + assert!(path.len() > 0); + // hard path shall contain leading '/', so take it from unstripped input. + paths.push(&unstripped_input[1..path.len() + 2]); + } else if strip_prefix(&mut input, "/") { + paths.push( + extract_prefix(&mut input, &|ch: char| ch != '/').ok_or( + Error::in_soft_path(initial_input, initial_input_len - input.len()), + )?, + ); + } else { + return Err(if pass.is_some() { + Error::in_pass(initial_input, initial_input_len - input.len()) + } else { + Error::in_phrase(initial_input, initial_input_len - input.len()) + }); + } + } + + Ok(Self { phrase, paths, pass }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use regex::Regex; + + lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + } + + fn check_with_regex(input: &str) { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!( + regex_result.name("phrase").map(|phrase| phrase.as_str()), + manual_result.phrase + ); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!( + regex_result.name("password").map(|phrase| phrase.as_str()), + manual_result.pass + ); + } + } + + fn check(input: &str, result: Result) { + let manual_result = AddressUri::parse(input); + assert_eq!(manual_result, result); + check_with_regex(input); + } + + #[test] + fn test00() { + check("///", Ok(AddressUri { phrase: None, pass: Some(""), paths: vec![] })); + } + + #[test] + fn test01() { + check("////////", Ok(AddressUri { phrase: None, pass: Some("/////"), paths: vec![] })) + } + + #[test] + fn test02() { + check( + "sdasd///asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: Some("asda"), paths: vec![] }), + ); + } + + #[test] + fn test03() { + check( + "sdasd//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asda"] }), + ); + } + + #[test] + fn test04() { + check("sdasd//a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/a"] })); + } + + #[test] + fn test05() { + let input = "sdasd//"; + check(input, Err(Error::in_hard_path(input, 7))); + } + + #[test] + fn test06() { + check( + "sdasd/xx//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/asda"] }), + ); + } + + #[test] + fn test07() { + check( + "sdasd/xx//a/b//c///pass", + Ok(AddressUri { + phrase: Some("sdasd"), + pass: Some("pass"), + paths: vec!["xx", "/a", "b", "/c"], + }), + ); + } + + #[test] + fn test08() { + check( + "sdasd/xx//a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/a"] }), + ); + } + + #[test] + fn test09() { + let input = "sdasd/xx//"; + check(input, Err(Error::in_hard_path(input, 10))); + } + + #[test] + fn test10() { + check( + "sdasd/asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda"] }), + ); + } + + #[test] + fn test11() { + check( + "sdasd/asda//x", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda", "/x"] }), + ); + } + + #[test] + fn test12() { + check("sdasd/a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["a"] })); + } + + #[test] + fn test13() { + let input = "sdasd/"; + check(input, Err(Error::in_soft_path(input, 6))); + } + + #[test] + fn test14() { + check("sdasd", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec![] })); + } + + #[test] + fn test15() { + let input = "sdasd."; + check(input, Err(Error::in_phrase(input, 5))); + } + + #[test] + fn test16() { + let input = "sd.asd/asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test17() { + let input = "sd.asd//asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test18() { + check( + "sdasd/asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asd.a"] }), + ); + } + + #[test] + fn test19() { + check( + "sdasd//asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asd.a"] }), + ); + } + + #[test] + fn test20() { + let input = "///\n"; + check(input, Err(Error::in_pass(input, 3))); + } + + #[test] + fn test21() { + let input = "///a\n"; + check(input, Err(Error::in_pass(input, 4))); + } + + #[test] + fn test22() { + let input = "sd asd///asd.a\n"; + check(input, Err(Error::in_pass(input, 14))); + } + + #[test] + fn test_invalid_char_info_1() { + let expected = "01234\n^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 0)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_2() { + let expected = "01\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01", 1)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_3() { + let expected = "01234\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 2)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_4() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 3)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_5() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 5)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_6() { + let expected = "012\\f456\\t89\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\x0c456\t89", 9)); + assert_eq!(expected, f); + } +} diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 832ef6c77bb3dbb71e94200e27a0e7379510769c..78b7f12f9ffd4c5ce9faacb6990f80e091994cf7 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -60,15 +60,7 @@ const PREOUT_SERIALIZED_LEN: usize = 33; // // This size is dependent on the ring domain size and the actual value // is equal to the SCALE encoded size of the `KZG` backend. -// -// Some values: -// ring_size → ~serialized_size -// 512 → 74 KB -// 1024 → 147 KB -// 2048 → 295 KB -// NOTE: This is quite big but looks like there is an upcoming fix -// in the backend. -const RING_CONTEXT_SERIALIZED_LEN: usize = 147748; +const RING_CONTEXT_SERIALIZED_LEN: usize = 147716; /// Bandersnatch public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] @@ -538,10 +530,7 @@ pub mod vrf { #[cfg(feature = "full_crypto")] impl Pair { fn vrf_sign_gen(&self, data: &VrfSignData) -> VrfSignature { - let ios = core::array::from_fn(|i| { - let input = data.inputs[i].0.clone(); - self.secret.vrf_inout(input) - }); + let ios = core::array::from_fn(|i| self.secret.vrf_inout(data.inputs[i].0)); let thin_signature: ThinVrfSignature = self.secret.sign_thin_vrf(data.transcript.clone(), &ios); @@ -567,7 +556,7 @@ pub mod vrf { input: &VrfInput, ) -> [u8; N] { let transcript = Transcript::new_labeled(context); - let inout = self.secret.vrf_inout(input.0.clone()); + let inout = self.secret.vrf_inout(input.0); inout.vrf_output_bytes(transcript) } } @@ -583,7 +572,7 @@ pub mod vrf { }; let preouts: [bandersnatch_vrfs::VrfPreOut; N] = - core::array::from_fn(|i| signature.outputs[i].0.clone()); + core::array::from_fn(|i| signature.outputs[i].0); // Deserialize only the proof, the rest has already been deserialized // This is another hack used because backend signature type is generic over @@ -596,7 +585,7 @@ pub mod vrf { }; let signature = ThinVrfSignature { proof, preouts }; - let inputs = data.inputs.iter().map(|i| i.0.clone()); + let inputs = data.inputs.iter().map(|i| i.0); public.verify_thin_vrf(data.transcript.clone(), inputs, &signature).is_ok() } @@ -610,8 +599,7 @@ pub mod vrf { input: &VrfInput, ) -> [u8; N] { let transcript = Transcript::new_labeled(context); - let inout = - bandersnatch_vrfs::VrfInOut { input: input.0.clone(), preoutput: self.0.clone() }; + let inout = bandersnatch_vrfs::VrfInOut { input: input.0, preoutput: self.0 }; inout.vrf_output_bytes(transcript) } } @@ -733,10 +721,7 @@ pub mod ring_vrf { data: &VrfSignData, prover: &RingProver, ) -> RingVrfSignature { - let ios = core::array::from_fn(|i| { - let input = data.inputs[i].0.clone(); - self.secret.vrf_inout(input) - }); + let ios = core::array::from_fn(|i| self.secret.vrf_inout(data.inputs[i].0)); let ring_signature: bandersnatch_vrfs::RingVrfSignature = bandersnatch_vrfs::RingProver { ring_prover: prover, secret: &self.secret } @@ -792,12 +777,12 @@ pub mod ring_vrf { }; let preouts: [bandersnatch_vrfs::VrfPreOut; N] = - core::array::from_fn(|i| self.outputs[i].0.clone()); + core::array::from_fn(|i| self.outputs[i].0); let signature = bandersnatch_vrfs::RingVrfSignature { proof: vrf_signature.proof, preouts }; - let inputs = data.inputs.iter().map(|i| i.0.clone()); + let inputs = data.inputs.iter().map(|i| i.0); bandersnatch_vrfs::RingVerifier(verifier) .verify_ring_vrf(data.transcript.clone(), inputs, &signature) diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index 951aa1828ea5136700ca8ee91ee1693e4b6b1e0a..e519ba1806c41b340713f1efe8f21487e83c012d 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -17,7 +17,7 @@ //! Simple BLS (Boneh–Lynn–Shacham) Signature API. -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ByteArray, CryptoType, Derive, Public as TraitPublic, UncheckedFrom}; #[cfg(feature = "full_crypto")] @@ -28,8 +28,12 @@ use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -#[cfg(feature = "std")] + +#[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; + use w3f_bls::{DoublePublicKey, DoubleSignature, EngineBLS, SerializableToBytes, TinyBLS381}; #[cfg(feature = "full_crypto")] use w3f_bls::{DoublePublicKeyScheme, Keypair, Message, SecretKey}; @@ -39,6 +43,7 @@ use sp_std::{convert::TryFrom, marker::PhantomData, ops::Deref}; /// BLS-377 specialized types pub mod bls377 { + pub use super::{PUBLIC_KEY_SERIALIZED_SIZE, SIGNATURE_SERIALIZED_SIZE}; use crate::crypto::CryptoTypeId; use w3f_bls::TinyBLS377; @@ -60,6 +65,7 @@ pub mod bls377 { /// BLS-381 specialized types pub mod bls381 { + pub use super::{PUBLIC_KEY_SERIALIZED_SIZE, SIGNATURE_SERIALIZED_SIZE}; use crate::crypto::CryptoTypeId; use w3f_bls::TinyBLS381; @@ -83,17 +89,17 @@ trait BlsBound: EngineBLS + HardJunctionId + Send + Sync + 'static {} impl BlsBound for T {} -// Secret key serialized size +/// Secret key serialized size #[cfg(feature = "full_crypto")] const SECRET_KEY_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; -// Public key serialized size -const PUBLIC_KEY_SERIALIZED_SIZE: usize = +/// Public key serialized size +pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; -// Signature serialized size -const SIGNATURE_SERIALIZED_SIZE: usize = +/// Signature serialized size +pub const SIGNATURE_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; /// A secret seed. @@ -128,7 +134,7 @@ impl Eq for Public {} impl PartialOrd for Public { fn partial_cmp(&self, other: &Self) -> Option { - self.inner.partial_cmp(&other.inner) + Some(self.cmp(other)) } } @@ -258,7 +264,7 @@ impl sp_std::fmt::Debug for Public { } } -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl Serialize for Public { fn serialize(&self, serializer: S) -> Result where @@ -268,7 +274,7 @@ impl Serialize for Public { } } -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl<'de, T: BlsBound> Deserialize<'de> for Public { fn deserialize(deserializer: D) -> Result where @@ -317,6 +323,10 @@ impl sp_std::hash::Hash for Signature { } } +impl ByteArray for Signature { + const LEN: usize = SIGNATURE_SERIALIZED_SIZE; +} + impl TryFrom<&[u8]> for Signature { type Error = (); @@ -330,7 +340,7 @@ impl TryFrom<&[u8]> for Signature { } } -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl Serialize for Signature { fn serialize(&self, serializer: S) -> Result where @@ -340,7 +350,7 @@ impl Serialize for Signature { } } -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl<'de, T> Deserialize<'de> for Signature { fn deserialize(deserializer: D) -> Result where @@ -444,10 +454,9 @@ impl TraitPair for Pair { path: Iter, _seed: Option, ) -> Result<(Self, Option), DeriveError> { - let mut acc: [u8; SECRET_KEY_SERIALIZED_SIZE] = - self.0.secret.to_bytes().try_into().expect( - "Secret key serializer returns a vector of SECRET_KEY_SERIALIZED_SIZE size", - ); + let mut acc: [u8; SECRET_KEY_SERIALIZED_SIZE] = self.0.secret.to_bytes().try_into().expect( + "Secret key serializer returns a vector of SECRET_KEY_SERIALIZED_SIZE size; qed", + ); for j in path { match j { DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), @@ -529,11 +538,10 @@ mod test { ); } - // Only passes if the seed = (seed mod ScalarField) #[test] fn seed_and_derive_should_work() { let seed = array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f00", + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", ); let pair = Pair::from_seed(&seed); // we are using hash to field so this is not going to work @@ -543,7 +551,7 @@ mod test { assert_eq!( derived.to_raw_vec(), array_bytes::hex2array_unchecked::<_, 32>( - "a4f2269333b3e87c577aa00c4a2cd650b3b30b2e8c286a47c251279ff3a26e0d" + "3a0626d095148813cd1642d38254f1cfff7eb8cc1a2fc83b2a135377c3554c12" ) ); } diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index e1bfb80046f7433f3c0a2131d0dbc1fcce9f4bde..c9719e344d3e208f8f9b81dddad10acf2eedfebb 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -19,12 +19,12 @@ use crate::{ed25519, sr25519}; #[cfg(feature = "std")] -use bip39::{Language, Mnemonic, MnemonicType}; +use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] -use rand::{rngs::OsRng, RngCore}; +use itertools::Itertools; #[cfg(feature = "std")] -use regex::Regex; +use rand::{rngs::OsRng, RngCore}; use scale_info::TypeInfo; #[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; @@ -41,6 +41,11 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; +#[cfg(feature = "std")] +pub use crate::address_uri::AddressUri; +#[cfg(any(feature = "std", feature = "full_crypto"))] +pub use crate::address_uri::Error as AddressUriError; + /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -80,8 +85,8 @@ impl> UncheckedInto for S { #[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). - #[cfg_attr(feature = "std", error("Invalid format"))] - InvalidFormat, + #[cfg_attr(feature = "std", error("Invalid format {0}"))] + InvalidFormat(AddressUriError), /// The seed phrase provided is not a valid BIP39 phrase. #[cfg_attr(feature = "std", error("Invalid phrase"))] InvalidPhrase, @@ -99,6 +104,13 @@ pub enum SecretStringError { InvalidPath, } +#[cfg(any(feature = "std", feature = "full_crypto"))] +impl From for SecretStringError { + fn from(e: AddressUriError) -> Self { + Self::InvalidFormat(e) + } +} + /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] @@ -206,7 +218,7 @@ impl> From for DeriveJunction { /// An error type for SS58 decoding. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[cfg_attr(not(feature = "std"), derive(Debug))] -#[derive(Clone, Copy, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] #[allow(missing_docs)] #[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum PublicError { @@ -233,6 +245,11 @@ pub enum PublicError { InvalidPath, #[cfg_attr(feature = "std", error("Disallowed SS58 Address Format for this datatype."))] FormatNotAllowed, + #[cfg_attr(feature = "std", error("Password not allowed."))] + PasswordNotAllowed, + #[cfg(feature = "std")] + #[cfg_attr(feature = "std", error("Incorrect URI syntax {0}."))] + MalformedUri(#[from] AddressUriError), } #[cfg(feature = "std")] @@ -412,47 +429,40 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed); } -#[cfg(feature = "std")] -lazy_static::lazy_static! { - static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); -} - #[cfg(feature = "std")] impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let s = cap.phrase.unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? }; - if cap["path"].is_empty() { + if cap.paths.is_empty() { Ok(addr) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), - )?; - if cap["path"].is_empty() { + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let (addr, v) = Self::from_ss58check_with_version(cap.phrase.unwrap_or(DEV_ADDRESS))?; + if cap.paths.is_empty() { Ok((addr, v)) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) + .map(|a| (a, v)) } } } @@ -815,22 +825,15 @@ impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; fn from_str(s: &str) -> Result { - let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - - let junctions = JUNCTION_REGEX - .captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])) - .collect::>(); - - let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); - let password = cap.name("password"); + let cap = AddressUri::parse(s)?; + let phrase = cap.phrase.unwrap_or(DEV_PHRASE); Ok(Self { phrase: SecretString::from_str(phrase).expect("Returns infallible error; qed"), - password: password.map(|v| { - SecretString::from_str(v.as_str()).expect("Returns infallible error; qed") - }), - junctions, + password: cap + .pass + .map(|v| SecretString::from_str(v).expect("Returns infallible error; qed")), + junctions: cap.paths.iter().map(DeriveJunction::from).collect::>(), }) } } @@ -870,9 +873,9 @@ pub trait Pair: CryptoType + Sized { /// the key from the current session. #[cfg(feature = "std")] fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) + let mnemonic = Mnemonic::generate(12).expect("Mnemonic generation always works; qed"); + let phrase = mnemonic.word_iter().join(" "); + let (pair, seed) = Self::from_phrase(&phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); (pair, phrase.to_owned(), seed) } @@ -883,10 +886,12 @@ pub trait Pair: CryptoType + Sized { phrase: &str, password: Option<&str>, ) -> Result<(Self, Self::Seed), SecretStringError> { - let mnemonic = Mnemonic::from_phrase(phrase, Language::English) + let mnemonic = Mnemonic::parse_in(Language::English, phrase) .map_err(|_| SecretStringError::InvalidPhrase)?; + + let (entropy, entropy_len) = mnemonic.to_entropy_array(); let big_seed = - substrate_bip39::seed_from_entropy(mnemonic.entropy(), password.unwrap_or("")) + substrate_bip39::seed_from_entropy(&entropy[0..entropy_len], password.unwrap_or("")) .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Self::Seed::default(); let seed_slice = seed.as_mut(); @@ -942,8 +947,7 @@ pub trait Pair: CryptoType + Sized { /// - the path may be followed by `///`, in which case everything after the `///` is treated /// as a password. /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` - /// and - /// interpreted as above. + /// and interpreted as above. /// /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft @@ -1213,7 +1217,7 @@ macro_rules! impl_from_entropy_base { [$type; 17], [$type; 18], [$type; 19], [$type; 20], [$type; 21], [$type; 22], [$type; 23], [$type; 24], [$type; 25], [$type; 26], [$type; 27], [$type; 28], [$type; 29], [$type; 30], [$type; 31], [$type; 32], [$type; 36], [$type; 40], [$type; 44], [$type; 48], [$type; 56], [$type; 64], [$type; 72], [$type; 80], - [$type; 96], [$type; 112], [$type; 128], [$type; 160], [$type; 192], [$type; 224], [$type; 256] + [$type; 96], [$type; 112], [$type; 128], [$type; 160], [$type; 177], [$type; 192], [$type; 224], [$type; 256] ); } } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 05bc679386c3dcf8741ade5221bc4b714693c138..471714582a6bba766cc05ad6e73e870c3d690a93 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -50,6 +50,12 @@ use sp_std::vec::Vec; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); +/// The byte length of public key +pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 33; + +/// The byte length of signature +pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; + /// A secret seed (which is bytewise essentially equivalent to a SecretKey). /// /// We need it as a different type because `Seed` is expected to be AsRef<[u8]>. @@ -71,11 +77,11 @@ type Seed = [u8; 32]; PartialOrd, Ord, )] -pub struct Public(pub [u8; 33]); +pub struct Public(pub [u8; PUBLIC_KEY_SERIALIZED_SIZE]); impl crate::crypto::FromEntropy for Public { fn from_entropy(input: &mut impl codec::Input) -> Result { - let mut result = Self([0u8; 33]); + let mut result = Self([0u8; PUBLIC_KEY_SERIALIZED_SIZE]); input.read(&mut result.0[..])?; Ok(result) } @@ -86,7 +92,7 @@ impl Public { /// /// NOTE: No checking goes on to ensure this is a real public key. Only use it if /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 33]) -> Self { + pub fn from_raw(data: [u8; PUBLIC_KEY_SERIALIZED_SIZE]) -> Self { Self(data) } @@ -109,7 +115,7 @@ impl Public { } impl ByteArray for Public { - const LEN: usize = 33; + const LEN: usize = PUBLIC_KEY_SERIALIZED_SIZE; } impl TraitPublic for Public {} @@ -148,8 +154,8 @@ impl From for Public { } } -impl UncheckedFrom<[u8; 33]> for Public { - fn unchecked_from(x: [u8; 33]) -> Self { +impl UncheckedFrom<[u8; PUBLIC_KEY_SERIALIZED_SIZE]> for Public { + fn unchecked_from(x: [u8; PUBLIC_KEY_SERIALIZED_SIZE]) -> Self { Public(x) } } @@ -198,14 +204,18 @@ impl<'de> Deserialize<'de> for Public { /// A signature (a 512-bit value, plus 8 bits for recovery ID). #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] -pub struct Signature(pub [u8; 65]); +pub struct Signature(pub [u8; SIGNATURE_SERIALIZED_SIZE]); + +impl ByteArray for Signature { + const LEN: usize = SIGNATURE_SERIALIZED_SIZE; +} impl TryFrom<&[u8]> for Signature { type Error = (); fn try_from(data: &[u8]) -> Result { - if data.len() == 65 { - let mut inner = [0u8; 65]; + if data.len() == SIGNATURE_SERIALIZED_SIZE { + let mut inner = [0u8; SIGNATURE_SERIALIZED_SIZE]; inner.copy_from_slice(data); Ok(Signature(inner)) } else { @@ -239,7 +249,7 @@ impl<'de> Deserialize<'de> for Signature { impl Clone for Signature { fn clone(&self) -> Self { - let mut r = [0u8; 65]; + let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; r.copy_from_slice(&self.0[..]); Signature(r) } @@ -247,18 +257,18 @@ impl Clone for Signature { impl Default for Signature { fn default() -> Self { - Signature([0u8; 65]) + Signature([0u8; SIGNATURE_SERIALIZED_SIZE]) } } -impl From for [u8; 65] { - fn from(v: Signature) -> [u8; 65] { +impl From for [u8; SIGNATURE_SERIALIZED_SIZE] { + fn from(v: Signature) -> [u8; SIGNATURE_SERIALIZED_SIZE] { v.0 } } -impl AsRef<[u8; 65]> for Signature { - fn as_ref(&self) -> &[u8; 65] { +impl AsRef<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { + fn as_ref(&self) -> &[u8; SIGNATURE_SERIALIZED_SIZE] { &self.0 } } @@ -287,8 +297,8 @@ impl sp_std::fmt::Debug for Signature { } } -impl UncheckedFrom<[u8; 65]> for Signature { - fn unchecked_from(data: [u8; 65]) -> Signature { +impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { + fn unchecked_from(data: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Signature { Signature(data) } } @@ -298,7 +308,7 @@ impl Signature { /// /// NOTE: No checking goes on to ensure this is a real signature. Only use it if /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 65]) -> Signature { + pub fn from_raw(data: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Signature { Signature(data) } @@ -307,10 +317,10 @@ impl Signature { /// NOTE: No checking goes on to ensure this is a real signature. Only use it if /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { - if data.len() != 65 { + if data.len() != SIGNATURE_SERIALIZED_SIZE { return None } - let mut r = [0u8; 65]; + let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; r.copy_from_slice(data); Some(Signature(r)) } @@ -326,7 +336,7 @@ impl Signature { pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -448,7 +458,7 @@ impl Pair { /// Sign a pre-hashed message pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -473,7 +483,7 @@ impl Pair { pub fn verify_deprecated>(sig: &Signature, message: M, pubkey: &Public) -> bool { let message = libsecp256k1::Message::parse(&blake2_256(message.as_ref())); - let parse_signature_overflowing = |x: [u8; 65]| { + let parse_signature_overflowing = |x: [u8; SIGNATURE_SERIALIZED_SIZE]| { let sig = libsecp256k1::Signature::parse_overflowing_slice(&x[..64]).ok()?; let rid = libsecp256k1::RecoveryId::parse(x[64]).ok()?; Some((sig, rid)) @@ -498,12 +508,7 @@ impl Pair { #[cfg(feature = "full_crypto")] impl Drop for Pair { fn drop(&mut self) { - let ptr = self.secret.as_mut_ptr(); - for off in 0..self.secret.len() { - unsafe { - core::ptr::write_volatile(ptr.add(off), 0); - } - } + self.secret.non_secure_erase() } } @@ -726,7 +731,7 @@ mod test { let signature = pair.sign(&message[..]); let serialized_signature = serde_json::to_string(&signature).unwrap(); // Signature is 65 bytes, so 130 chars + 2 quote chars - assert_eq!(serialized_signature.len(), 132); + assert_eq!(serialized_signature.len(), SIGNATURE_SERIALIZED_SIZE * 2 + 2); let signature = serde_json::from_str(&serialized_signature).unwrap(); assert!(Pair::verify(&signature, &message[..], &pair.public())); } @@ -750,7 +755,7 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_slice(&msg).unwrap(); + let message = Message::from_digest_slice(&msg).unwrap(); SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() }; assert_eq!(sig1, sig2); diff --git a/substrate/primitives/core/src/hexdisplay.rs b/substrate/primitives/core/src/hexdisplay.rs index 30e045dfc52acae9bbd22fecd4a2c8a4edbae979..72bb24a186e54dbfb4deecd105065dc2b63d332d 100644 --- a/substrate/primitives/core/src/hexdisplay.rs +++ b/substrate/primitives/core/src/hexdisplay.rs @@ -96,7 +96,7 @@ macro_rules! impl_non_endians { impl_non_endians!( [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], - [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128], [u8; 144] + [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128], [u8; 144], [u8; 177] ); /// Format into ASCII + # + hex, suitable for storage key preimages. diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 3a0e1f33f16c9d5fdbded144895fa1aa38bf3d73..4873d1a2112744e2e5ebbcd5c06c5f4b186f04ff 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -55,6 +55,8 @@ pub mod crypto; pub mod hexdisplay; pub use paste; +#[cfg(any(feature = "full_crypto", feature = "std"))] +mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; #[cfg(feature = "bls-experimental")] @@ -66,6 +68,7 @@ pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; +pub mod paired_crypto; pub mod sr25519; pub mod testing; #[cfg(feature = "std")] @@ -74,6 +77,8 @@ pub mod uint; #[cfg(feature = "bls-experimental")] pub use bls::{bls377, bls381}; +#[cfg(feature = "bls-experimental")] +pub use paired_crypto::ecdsa_bls377; pub use self::{ hash::{convert_hash, H160, H256, H512}, diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs new file mode 100644 index 0000000000000000000000000000000000000000..edf6156f268eeccadd5da4333ab7e049eaf0c296 --- /dev/null +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -0,0 +1,736 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! API for using a pair of crypto schemes together. + +#[cfg(feature = "serde")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ByteArray, CryptoType, Derive, Public as PublicT, UncheckedFrom}; +#[cfg(feature = "full_crypto")] +use crate::crypto::{DeriveError, DeriveJunction, Pair as PairT, SecretStringError}; + +#[cfg(feature = "full_crypto")] +use sp_std::vec::Vec; + +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; + +use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; +use sp_std::convert::TryFrom; + +/// ECDSA and BLS12-377 paired crypto scheme +#[cfg(feature = "bls-experimental")] +pub mod ecdsa_bls377 { + #[cfg(feature = "full_crypto")] + use crate::Hasher; + use crate::{ + bls377, + crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, + ecdsa, + }; + + /// An identifier used to match public keys against BLS12-377 keys + pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecb7"); + + const PUBLIC_KEY_LEN: usize = + ecdsa::PUBLIC_KEY_SERIALIZED_SIZE + bls377::PUBLIC_KEY_SERIALIZED_SIZE; + const SIGNATURE_LEN: usize = + ecdsa::SIGNATURE_SERIALIZED_SIZE + bls377::SIGNATURE_SERIALIZED_SIZE; + + /// (ECDSA,BLS12-377) key-pair pair. + #[cfg(feature = "full_crypto")] + pub type Pair = super::Pair; + /// (ECDSA,BLS12-377) public key pair. + pub type Public = super::Public; + /// (ECDSA,BLS12-377) signature pair. + pub type Signature = super::Signature; + + impl super::CryptoType for Public { + #[cfg(feature = "full_crypto")] + type Pair = Pair; + } + + impl super::CryptoType for Signature { + #[cfg(feature = "full_crypto")] + type Pair = Pair; + } + + #[cfg(feature = "full_crypto")] + impl super::CryptoType for Pair { + type Pair = Pair; + } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// component. + /// + /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature + /// according to IETF standard. + pub fn sign_with_hasher(&self, message: &[u8]) -> Signature + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + raw[..ecdsa::SIGNATURE_SERIALIZED_SIZE] + .copy_from_slice(self.left.sign_prehashed(&msg_hash).as_ref()); + raw[ecdsa::SIGNATURE_SERIALIZED_SIZE..] + .copy_from_slice(self.right.sign(message).as_ref()); + ::Signature::unchecked_from(raw) + } + + /// Hashes the `message` with the specified [`Hasher`] before verifying with the ECDSA + /// public component. + /// + /// The hasher does not affect the the BLS12-377 component. This verifies whether the + /// BLS12-377 signature was hashed and signed according to IETF standard + pub fn verify_with_hasher(sig: &Signature, message: &[u8], public: &Public) -> bool + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let Ok(left_pub) = public.0[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into() else { + return false + }; + let Ok(left_sig) = sig.0[0..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { + return false + }; + if !ecdsa::Pair::verify_prehashed(&left_sig, &msg_hash, &left_pub) { + return false + } + + let Ok(right_pub) = public.0[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].try_into() else { + return false + }; + let Ok(right_sig) = sig.0[ecdsa::SIGNATURE_SERIALIZED_SIZE..].try_into() else { + return false + }; + bls377::Pair::verify(&right_sig, message.as_ref(), &right_pub) + } + } +} + +/// Secure seed length. +/// +/// Currently only supporting sub-schemes whose seed is a 32-bytes array. +#[cfg(feature = "full_crypto")] +const SECURE_SEED_LEN: usize = 32; + +/// A secret seed. +/// +/// It's not called a "secret key" because ring doesn't expose the secret keys +/// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we +/// will need it later (such as for HDKD). +#[cfg(feature = "full_crypto")] +type Seed = [u8; SECURE_SEED_LEN]; + +/// A public key. +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq, PartialOrd, Ord)] +pub struct Public([u8; LEFT_PLUS_RIGHT_LEN]); + +#[cfg(feature = "full_crypto")] +impl sp_std::hash::Hash for Public { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl ByteArray for Public { + const LEN: usize = LEFT_PLUS_RIGHT_LEN; +} + +impl TryFrom<&[u8]> for Public { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != LEFT_PLUS_RIGHT_LEN { + return Err(()) + } + let mut inner = [0u8; LEFT_PLUS_RIGHT_LEN]; + inner.copy_from_slice(data); + Ok(Public(inner)) + } +} + +impl AsRef<[u8; LEFT_PLUS_RIGHT_LEN]> + for Public +{ + fn as_ref(&self) -> &[u8; LEFT_PLUS_RIGHT_LEN] { + &self.0 + } +} + +impl AsRef<[u8]> for Public { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for Public { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl PassByInner for Public { + type Inner = [u8; LEFT_PLUS_RIGHT_LEN]; + + fn into_inner(self) -> Self::Inner { + self.0 + } + + fn inner(&self) -> &Self::Inner { + &self.0 + } + + fn from_inner(inner: Self::Inner) -> Self { + Self(inner) + } +} + +impl PassBy for Public { + type PassBy = pass_by::Inner; +} + +#[cfg(feature = "full_crypto")] +impl< + LeftPair: PairT, + RightPair: PairT, + const LEFT_PLUS_RIGHT_PUBLIC_LEN: usize, + const SIGNATURE_LEN: usize, + > From> + for Public +where + Pair: + PairT>, +{ + fn from(x: Pair) -> Self { + x.public() + } +} + +impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> + for Public +{ + fn unchecked_from(data: [u8; LEFT_PLUS_RIGHT_LEN]) -> Self { + Public(data) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Public +where + Public: CryptoType, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } +} + +impl sp_std::fmt::Debug for Public +where + Public: CryptoType, + [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +#[cfg(feature = "serde")] +impl Serialize for Public +where + Public: CryptoType, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } +} + +#[cfg(feature = "serde")] +impl<'de, const LEFT_PLUS_RIGHT_LEN: usize> Deserialize<'de> for Public +where + Public: CryptoType, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } +} + +impl PublicT for Public where + Public: CryptoType +{ +} + +impl Derive for Public {} + +/// Trait characterizing a signature which could be used as individual component of an +/// `paired_crypto:Signature` pair. +pub trait SignatureBound: ByteArray {} + +impl SignatureBound for T {} + +/// A pair of signatures of different types +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq)] +pub struct Signature([u8; LEFT_PLUS_RIGHT_LEN]); + +#[cfg(feature = "full_crypto")] +impl sp_std::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl ByteArray for Signature { + const LEN: usize = LEFT_PLUS_RIGHT_LEN; +} + +impl TryFrom<&[u8]> for Signature { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != LEFT_PLUS_RIGHT_LEN { + return Err(()) + } + let mut inner = [0u8; LEFT_PLUS_RIGHT_LEN]; + inner.copy_from_slice(data); + Ok(Signature(inner)) + } +} + +impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl AsRef<[u8; LEFT_PLUS_RIGHT_LEN]> + for Signature +{ + fn as_ref(&self) -> &[u8; LEFT_PLUS_RIGHT_LEN] { + &self.0 + } +} + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +#[cfg(feature = "serde")] +impl Serialize for Signature { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&array_bytes::bytes2hex("", self)) + } +} + +#[cfg(feature = "serde")] +impl<'de, const LEFT_PLUS_RIGHT_LEN: usize> Deserialize<'de> for Signature { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Signature::::try_from(bytes.as_ref()).map_err(|e| { + de::Error::custom(format!("Error converting deserialized data into signature: {:?}", e)) + }) + } +} + +impl From> + for [u8; LEFT_PLUS_RIGHT_LEN] +{ + fn from(signature: Signature) -> [u8; LEFT_PLUS_RIGHT_LEN] { + signature.0 + } +} + +impl sp_std::fmt::Debug for Signature +where + [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> + for Signature +{ + fn unchecked_from(data: [u8; LEFT_PLUS_RIGHT_LEN]) -> Self { + Signature(data) + } +} + +/// A key pair. +#[cfg(feature = "full_crypto")] +#[derive(Clone)] +pub struct Pair< + LeftPair: PairT, + RightPair: PairT, + const PUBLIC_KEY_LEN: usize, + const SIGNATURE_LEN: usize, +> { + left: LeftPair, + right: RightPair, +} + +#[cfg(feature = "full_crypto")] +impl< + LeftPair: PairT, + RightPair: PairT, + const PUBLIC_KEY_LEN: usize, + const SIGNATURE_LEN: usize, + > PairT for Pair +where + Pair: CryptoType, + LeftPair::Signature: SignatureBound, + RightPair::Signature: SignatureBound, + Public: CryptoType, + LeftPair::Seed: From + Into, + RightPair::Seed: From + Into, +{ + type Seed = Seed; + type Public = Public; + type Signature = Signature; + + fn from_seed_slice(seed_slice: &[u8]) -> Result { + if seed_slice.len() != SECURE_SEED_LEN { + return Err(SecretStringError::InvalidSeedLength) + } + let left = LeftPair::from_seed_slice(&seed_slice)?; + let right = RightPair::from_seed_slice(&seed_slice)?; + Ok(Pair { left, right }) + } + + /// Derive a child key from a series of given junctions. + /// + /// Note: if the `LeftPair` and `RightPair` crypto schemes differ in + /// seed derivation, `derive` will drop the seed in the return. + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Self, Option), DeriveError> { + let path: Vec<_> = path.collect(); + + let left = self.left.derive(path.iter().cloned(), seed.map(|s| s.into()))?; + let right = self.right.derive(path.into_iter(), seed.map(|s| s.into()))?; + + let seed = match (left.1, right.1) { + (Some(l), Some(r)) if l.as_ref() == r.as_ref() => Some(l.into()), + _ => None, + }; + + Ok((Self { left: left.0, right: right.0 }, seed)) + } + + fn public(&self) -> Self::Public { + let mut raw = [0u8; PUBLIC_KEY_LEN]; + let left_pub = self.left.public(); + let right_pub = self.right.public(); + raw[..LeftPair::Public::LEN].copy_from_slice(left_pub.as_ref()); + raw[LeftPair::Public::LEN..].copy_from_slice(right_pub.as_ref()); + Self::Public::unchecked_from(raw) + } + + fn sign(&self, message: &[u8]) -> Self::Signature { + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + raw[..LeftPair::Signature::LEN].copy_from_slice(self.left.sign(message).as_ref()); + raw[LeftPair::Signature::LEN..].copy_from_slice(self.right.sign(message).as_ref()); + Self::Signature::unchecked_from(raw) + } + + fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { + let Ok(left_pub) = public.0[..LeftPair::Public::LEN].try_into() else { return false }; + let Ok(left_sig) = sig.0[0..LeftPair::Signature::LEN].try_into() else { return false }; + if !LeftPair::verify(&left_sig, message.as_ref(), &left_pub) { + return false + } + + let Ok(right_pub) = public.0[LeftPair::Public::LEN..PUBLIC_KEY_LEN].try_into() else { + return false + }; + let Ok(right_sig) = sig.0[LeftPair::Signature::LEN..].try_into() else { return false }; + RightPair::verify(&right_sig, message.as_ref(), &right_pub) + } + + /// Get the seed/secret key for each key and then concatenate them. + fn to_raw_vec(&self) -> Vec { + let mut raw = self.left.to_raw_vec(); + raw.extend(self.right.to_raw_vec()); + raw + } +} + +// Test set exercising the (ECDSA,BLS12-377) implementation +#[cfg(all(test, feature = "bls-experimental"))] +mod test { + use super::*; + use crate::{crypto::DEV_PHRASE, KeccakHasher}; + use ecdsa_bls377::{Pair, Signature}; + + use crate::{bls377, ecdsa}; + + #[test] + fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { + assert_eq!( + ::Public::LEN, + ::Public::LEN + ::Public::LEN + ); + assert_eq!( + ::Signature::LEN, + ::Signature::LEN + ::Signature::LEN + ); + } + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + } + + #[test] + fn seed_and_derive_should_work() { + let seed_for_right_and_left: [u8; SECURE_SEED_LEN] = array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + ); + let pair = Pair::from_seed(&seed_for_right_and_left); + // we are using hash to field so this is not going to work + // assert_eq!(pair.seed(), seed); + let path = vec![DeriveJunction::Hard([0u8; 32])]; + let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; + assert_eq!( + derived.to_raw_vec(), + [ + array_bytes::hex2array_unchecked::<&str, SECURE_SEED_LEN>( + "b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61" + ), + array_bytes::hex2array_unchecked::<&str, SECURE_SEED_LEN>( + "3a0626d095148813cd1642d38254f1cfff7eb8cc1a2fc83b2a135377c3554c12" + ) + ] + .concat() + ); + } + + #[test] + fn test_vector_should_work() { + let seed_left_and_right: [u8; SECURE_SEED_LEN] = array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + ); + let pair = Pair::from_seed(&([seed_left_and_right].concat()[..].try_into().unwrap())); + let public = pair.public(); + assert_eq!( + public, + Public::unchecked_from( + array_bytes::hex2array_unchecked("028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd917a84ca8ce4c37c93c95ecee6a3c0c9a7b9c225093cf2f12dc4f69cbfb847ef9424a18f5755d5a742247d386ff2aabb806bcf160eff31293ea9616976628f77266c8a8cc1d8753be04197bd6cdd8c5c87a148f782c4c1568d599b48833fd539001e580cff64bbc71850605433fcd051f3afc3b74819786f815ffb5272030a8d03e5df61e6183f8fd8ea85f26defa83400"), + ), + ); + let message = b""; + let signature = + array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + ); + let signature = Signature::unchecked_from(signature); + assert!(pair.sign(&message[..]) == signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn test_vector_by_string_should_work() { + let pair = Pair::from_string( + "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + None, + ) + .unwrap(); + let public = pair.public(); + assert_eq!( + public, + Public::unchecked_from( + array_bytes::hex2array_unchecked("028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd916dc6be608fab3c6bd894a606be86db346cc170db85c733853a371f3db54ae1b12052c0888d472760c81b537572a26f00db865e5963aef8634f9917571c51b538b564b2a9ceda938c8b930969ee3b832448e08e33a79e9ddd28af419a3ce45300f5dbc768b067781f44f3fe05a19e6b07b1c4196151ec3f8ea37e4f89a8963030d2101e931276bb9ebe1f20102239d780" + ), + ), + ); + let message = b""; + let signature = + array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00bbb395bbdee1a35930912034f5fde3b36df2835a0536c865501b0675776a1d5931a3bea2e66eff73b2546c6af2061a8019223e4ebbbed661b2538e0f5823f2c708eb89c406beca8fcb53a5c13dbc7c0c42e4cf2be2942bba96ea29297915a06bd2b1b979c0e2ac8fd4ec684a6b5d110c" + ); + let signature = Signature::unchecked_from(signature); + assert!(pair.sign(&message[..]) == signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, b"Something else", &public)); + } + + #[test] + fn seeded_pair_should_work() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let public = pair.public(); + assert_eq!( + public, + Public::unchecked_from( + array_bytes::hex2array_unchecked("035676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba9754d2f2bbfa67df54d7e0e951979a18a1e0f45948857752cc2bac6bbb0b1d05e8e48bcc453920bf0c4bbd5993212480112a1fb433f04d74af0a8b700d93dc957ab3207f8d071e948f5aca1a7632c00bdf6d06be05b43e2e6216dccc8a5d55a0071cb2313cfd60b7e9114619cd17c06843b352f0b607a99122f6651df8f02e1ad3697bd208e62af047ddd7b942ba80080") + ), + ); + let message = + array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000" + ); + let signature = pair.sign(&message[..]); + println!("Correct signature: {:?}", signature); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, "Other message", &public)); + } + + #[test] + fn generate_with_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(None); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn generate_with_password_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, Some("password")).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn password_does_something() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_ne!(pair1.public(), pair2.public()); + } + + #[test] + fn ss58check_roundtrip_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } + + #[test] + fn sign_and_verify_with_hasher_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign_with_hasher::(&message[..]); + + assert!(Pair::verify_with_hasher::(&signature, &message[..], &pair.public())); + } + + #[test] + fn signature_serialization_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + + let serialized_signature = serde_json::to_string(&signature).unwrap(); + println!("{:?} -- {:}", signature.0, serialized_signature); + // Signature is 177 bytes, hexify * 2 + 2 quote charsy + assert_eq!(serialized_signature.len(), 356); + let signature = serde_json::from_str(&serialized_signature).unwrap(); + assert!(Pair::verify(&signature, &message[..], &pair.public())); + } + + #[test] + fn signature_serialization_doesnt_panic() { + fn deserialize_signature(text: &str) -> Result { + serde_json::from_str(text) + } + assert!(deserialize_signature("Not valid json.").is_err()); + assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); + // Poorly-sized + assert!(deserialize_signature("\"abc123\"").is_err()); + } + + #[test] + fn encode_and_decode_public_key_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let public = pair.public(); + let encoded_public = public.encode(); + let decoded_public = Public::decode(&mut encoded_public.as_slice()).unwrap(); + assert_eq!(public, decoded_public) + } + + #[test] + fn encode_and_decode_signature_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + let encoded_signature = signature.encode(); + let decoded_signature = Signature::decode(&mut encoded_signature.as_slice()).unwrap(); + assert_eq!(signature, decoded_signature) + } +} diff --git a/substrate/primitives/core/src/testing.rs b/substrate/primitives/core/src/testing.rs index 25f5f9012c99608d89654a5833f588b90fc16e53..947dcc387fc79605e4c51d9c28e88c2664dd28c7 100644 --- a/substrate/primitives/core/src/testing.rs +++ b/substrate/primitives/core/src/testing.rs @@ -31,6 +31,8 @@ pub const BANDERSNATCH: KeyTypeId = KeyTypeId(*b"band"); pub const BLS377: KeyTypeId = KeyTypeId(*b"bls7"); /// Key type for generic BLS12-381 key. pub const BLS381: KeyTypeId = KeyTypeId(*b"bls8"); +/// Key type for (ECDSA,BLS12-377) key pair +pub const ECDSA_BLS377: KeyTypeId = KeyTypeId(*b"ecb7"); /// Macro for exporting functions from wasm in with the expected signature for using it with the /// wasm executor. This is useful for tests where you need to call a function in wasm. diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 15a6e85abb9675149bad87cfc769d9a8d8b89f7e..1484406e7b2417454e8f1b472e36ae1af71cb82c 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "sp-crypto-ec-utils" -version = "0.4.0" +version = "0.4.1" authors.workspace = true -description = "Host function interface for common elliptic curve operations in Substrate runtimes" +description = "Host functions for common Arkworks elliptic curve operations" edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" @@ -12,51 +12,54 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-serialize = { version = "0.4.2", default-features = false } -ark-ff = { version = "0.4.2", default-features = false } -ark-ec = { version = "0.4.2", default-features = false } -ark-std = { version = "0.4.0", default-features = false } -ark-bls12-377 = { version = "0.4.0", features = ["curve"], default-features = false } -ark-bls12-381 = { version = "0.4.0", features = ["curve"], default-features = false } -ark-bw6-761 = { version = "0.4.0", default-features = false } -ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false } -ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false } -sp-std = { path = "../../std", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -ark-scale = { version = "0.0.10", features = ["hazmat"], default-features = false } -sp-runtime-interface = { path = "../../runtime-interface", default-features = false} - -[dev-dependencies] -sp-io = { path = "../../io", default-features = false } -ark-algebra-test-templates = { version = "0.4.2", default-features = false } -sp-ark-models = { version = "0.4.1-beta", default-features = false } -sp-ark-bls12-377 = { version = "0.4.1-beta", default-features = false } -sp-ark-bls12-381 = { version = "0.4.1-beta", default-features = false } -sp-ark-bw6-761 = { version = "0.4.1-beta", default-features = false } -sp-ark-ed-on-bls12-377 = { version = "0.4.1-beta", default-features = false } -sp-ark-ed-on-bls12-381-bandersnatch = { version = "0.4.1-beta", default-features = false } +ark-ec = { version = "0.4.2", default-features = false, optional = true } +ark-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bls12-377 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } +ark-bls12-381-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bls12-381 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } +ark-bw6-761-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bw6-761 = { version = "0.4.0", default-features = false, optional = true } +ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false, optional = true } +ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false, optional = true } +ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } +ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false, optional = true } +ark-scale = { version = "0.0.11", default-features = false, features = ["hazmat"], optional = true } +sp-runtime-interface = { path = "../../runtime-interface", default-features = false, optional = true } +sp-std = { path = "../../std", default-features = false, optional = true } [features] default = [ "std" ] std = [ - "ark-algebra-test-templates/std", - "ark-bls12-377/std", - "ark-bls12-381/std", - "ark-bw6-761/std", - "ark-ec/std", - "ark-ed-on-bls12-377/std", - "ark-ed-on-bls12-381-bandersnatch/std", - "ark-ff/std", - "ark-scale/std", - "ark-serialize/std", - "ark-std/std", - "codec/std", - "sp-ark-bls12-377/std", - "sp-ark-bls12-381/std", - "sp-ark-bw6-761/std", - "sp-ark-ed-on-bls12-377/std", - "sp-ark-ed-on-bls12-381-bandersnatch/std", - "sp-io/std", - "sp-runtime-interface/std", - "sp-std/std", + "ark-bls12-377-ext?/std", + "ark-bls12-377?/std", + "ark-bls12-381-ext?/std", + "ark-bls12-381?/std", + "ark-bw6-761-ext?/std", + "ark-bw6-761?/std", + "ark-ec?/parallel", + "ark-ec?/std", + "ark-ed-on-bls12-377-ext?/std", + "ark-ed-on-bls12-377?/std", + "ark-ed-on-bls12-381-bandersnatch-ext?/std", + "ark-ed-on-bls12-381-bandersnatch?/std", + "ark-scale?/std", + "sp-runtime-interface?/std", + "sp-std?/std", +] +common = [ "ark-ec", "ark-scale", "sp-runtime-interface", "sp-std" ] +bls12-377 = [ "ark-bls12-377", "ark-bls12-377-ext", "common" ] +bls12-381 = [ "ark-bls12-381", "ark-bls12-381-ext", "common" ] +bw6-761 = [ "ark-bw6-761", "ark-bw6-761-ext", "common" ] +ed-on-bls12-377 = [ "ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common" ] +ed-on-bls12-381-bandersnatch = [ + "ark-ed-on-bls12-381-bandersnatch", + "ark-ed-on-bls12-381-bandersnatch-ext", + "common", +] +all-curves = [ + "bls12-377", + "bls12-381", + "bw6-761", + "ed-on-bls12-377", + "ed-on-bls12-381-bandersnatch", ] diff --git a/substrate/primitives/crypto/ec-utils/src/bls12_377.rs b/substrate/primitives/crypto/ec-utils/src/bls12_377.rs index 78f20297299b08492c71dcedec5f3d43a695afd4..8f19a2c4a1911f0d1045d0eebf98958236830e0f 100644 --- a/substrate/primitives/crypto/ec-utils/src/bls12_377.rs +++ b/substrate/primitives/crypto/ec-utils/src/bls12_377.rs @@ -15,89 +15,191 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support functions for bls12_377 to improve the performance of -//! multi_miller_loop, final_exponentiation, msm's and projective -//! multiplications by host function calls +//! *BLS12-377* types and host functions. -use crate::utils::{ - final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, -}; -use ark_bls12_377::{g1, g2, Bls12_377}; +use crate::utils; +use ark_bls12_377_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; use sp_std::vec::Vec; -/// Compute a multi miller loop through arkworks -pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop_generic::(a, b) +/// First pairing group definitions. +pub mod g1 { + pub use ark_bls12_377_ext::g1::{ + G1_GENERATOR_X, G1_GENERATOR_Y, TE_GENERATOR_X, TE_GENERATOR_Y, + }; + /// Group configuration. + pub type Config = ark_bls12_377_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bls12_377_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bls12_377_ext::g1::G1Projective; + /// Short Weierstrass form point affine representation. + pub type G1SWAffine = ark_bls12_377_ext::g1::G1SWAffine; + /// Short Weierstrass form point projective representation. + pub type G1SWProjective = ark_bls12_377_ext::g1::G1SWProjective; + /// Twisted Edwards form point affine representation. + pub type G1TEAffine = ark_bls12_377_ext::g1::G1TEAffine; + /// Twisted Edwards form point projective representation. + pub type G1TEProjective = ark_bls12_377_ext::g1::G1TEProjective; } -/// Compute a final exponentiation through arkworks -pub fn final_exponentiation(target: Vec) -> Result, ()> { - final_exponentiation_generic::(target) +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bls12_377_ext::g2::{ + G2_GENERATOR_X, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_Y, G2_GENERATOR_Y_C0, + G2_GENERATOR_Y_C1, + }; + /// Group configuration. + pub type Config = ark_bls12_377_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bls12_377_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bls12_377_ext::g2::G2Projective; } -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G1. -pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G2. -pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; -/// Compute a projective scalar multiplication for short_weierstrass -/// through arkworks on G1. -pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) -} +/// Configuration for *BLS12-377* curve. +pub type Config = ark_bls12_377_ext::Config; + +/// *BLS12-377* definition. +/// +/// A generic *BLS12* model specialized with *BLS12-377* configuration. +pub type Bls12_377 = ark_bls12_377_ext::Bls12_377; + +impl CurveHooks for HostHooks { + fn bls12_377_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bls12_377_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_377_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bls12_377_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_377_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_377_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } -/// Compute a projective scalar multiplication for short_weierstrass -/// through arkworks on G2. -pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) + fn bls12_377_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_377_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_377_mul_projective_g1( + base: &G1Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_377_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_377_mul_projective_g2( + base: &G2Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_377_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } } -#[cfg(test)] -mod tests { - use super::*; - use ark_algebra_test_templates::*; - use sp_ark_bls12_377::{ - Bls12_377 as Bls12_377Host, G1Projective as G1ProjectiveHost, - G2Projective as G2ProjectiveHost, HostFunctions, - }; +/// Interfaces for working with *Arkworks* *BLS12-377* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BLS12-377*. + /// + /// - Receives encoded: + /// - `a`: `ArkScale>`. + /// - `b`: `ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) + } - #[derive(PartialEq, Eq)] - struct Host; - - impl HostFunctions for Host { - fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_multi_miller_loop(a, b) - } - fn bls12_377_final_exponentiation(f12: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_final_exponentiation(f12) - } - fn bls12_377_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_msm_g1(bases, bigints) - } - fn bls12_377_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_msm_g2(bases, bigints) - } - fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_mul_projective_g1(base, scalar) - } - fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_377_mul_projective_g2(base, scalar) - } + /// Pairing final exponentiation for *BLS12-377.* + /// + /// - Receives encoded: `ArkScale`. + /// - Returns encoded: `ArkScale`. + fn bls12_377_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) } - type Bls12_377 = Bls12_377Host; - type G1Projective = G1ProjectiveHost; - type G2Projective = G2ProjectiveHost; + /// Multi scalar multiplication on *G1* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Multi scalar multiplication on *G2* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } - test_group!(g1; G1Projective; sw); - test_group!(g2; G2Projective; sw); - test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); - test_pairing!(pairing; super::Bls12_377); + /// Projective multiplication on *G1* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } + + /// Projective multiplication on *G2* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } } diff --git a/substrate/primitives/crypto/ec-utils/src/bls12_381.rs b/substrate/primitives/crypto/ec-utils/src/bls12_381.rs index b2ec48a42fc53ee1b33f6ac87abce1d3df0a3a19..99a0289b7ad26b09bcb39e469ec8bf5203e47cb2 100644 --- a/substrate/primitives/crypto/ec-utils/src/bls12_381.rs +++ b/substrate/primitives/crypto/ec-utils/src/bls12_381.rs @@ -15,205 +15,181 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support functions for bls12_381 to improve the performance of -//! multi_miller_loop, final_exponentiation, msm's and projective -//! multiplications by host function calls +//! *BLS12-381* types and host functions. -use crate::utils::{ - final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, -}; -use ark_bls12_381::{g1, g2, Bls12_381}; +use crate::utils; +use ark_bls12_381_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; use sp_std::vec::Vec; -/// Compute a multi miller loop through arkworks -pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop_generic::(a, b) -} - -/// Compute a final exponentiation through arkworks -pub fn final_exponentiation(target: Vec) -> Result, ()> { - final_exponentiation_generic::(target) -} - -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G1. -pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} - -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G2. -pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) +/// First pairing group definitions. +pub mod g1 { + pub use ark_bls12_381_ext::g1::{BETA, G1_GENERATOR_X, G1_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bls12_381_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bls12_381_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bls12_381_ext::g1::G1Projective; } -/// Compute a projective scalar multiplication for short_weierstrass -/// through arkworks on G1. -pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bls12_381_ext::g2::{ + G2_GENERATOR_X, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_Y, G2_GENERATOR_Y_C0, + G2_GENERATOR_Y_C1, + }; + /// Group configuration. + pub type Config = ark_bls12_381_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bls12_381_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bls12_381_ext::g2::G2Projective; } -/// Compute a projective scalar multiplication for short_weierstrass -/// through arkworks on G2. -pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) -} +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; -#[cfg(test)] -mod tests { - use super::*; - use ark_algebra_test_templates::*; - use ark_ec::{AffineRepr, CurveGroup, Group}; - use ark_ff::{fields::Field, One, Zero}; - use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; - use ark_std::{rand::Rng, test_rng, vec, UniformRand}; - use sp_ark_bls12_381::{ - fq::Fq, fq2::Fq2, fr::Fr, Bls12_381 as Bls12_381Host, G1Affine as G1AffineHost, - G1Projective as G1ProjectiveHost, G2Affine as G2AffineHost, - G2Projective as G2ProjectiveHost, HostFunctions, - }; - use sp_ark_models::pairing::PairingOutput; - - #[derive(PartialEq, Eq)] - struct Host; - - impl HostFunctions for Host { - fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_multi_miller_loop(a, b) - } - fn bls12_381_final_exponentiation(f12: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_final_exponentiation(f12) - } - fn bls12_381_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_msm_g1(bases, bigints) - } - fn bls12_381_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_msm_g2(bases, bigints) - } - fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_mul_projective_g1(base, scalar) - } - fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bls12_381_mul_projective_g2(base, scalar) - } +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Configuration for *BLS12-381* curve. +pub type Config = ark_bls12_381_ext::Config; + +/// *BLS12-381* definition. +/// +/// A generic *BLS12* model specialized with *BLS12-381* configuration. +pub type Bls12_381 = ark_bls12_381_ext::Bls12_381; + +impl CurveHooks for HostHooks { + fn bls12_381_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bls12_381_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) } - type Bls12_381 = Bls12_381Host; - type G1Projective = G1ProjectiveHost; - type G2Projective = G2ProjectiveHost; - type G1Affine = G1AffineHost; - type G2Affine = G2AffineHost; - - test_group!(g1; G1Projective; sw); - test_group!(g2; G2Projective; sw); - test_group!(pairing_output; PairingOutput; msm); - test_pairing!(ark_pairing; super::Bls12_381); + fn bls12_381_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bls12_381_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } - #[test] - fn test_g1_endomorphism_beta() { - assert!(sp_ark_bls12_381::g1::BETA.pow([3u64]).is_one()); + fn bls12_381_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_381_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) } - #[test] - fn test_g1_subgroup_membership_via_endomorphism() { - let mut rng = test_rng(); - let generator = G1Projective::rand(&mut rng).into_affine(); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + fn bls12_381_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_381_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) } - #[test] - fn test_g1_subgroup_non_membership_via_endomorphism() { - let mut rng = test_rng(); - loop { - let x = Fq::rand(&mut rng); - let greatest = rng.gen(); - - if let Some(p) = G1Affine::get_point_from_x_unchecked(x, greatest) { - if !::is_zero(&p.mul_bigint(Fr::characteristic())) { - assert!(!p.is_in_correct_subgroup_assuming_on_curve()); - return - } - } - } + fn bls12_381_mul_projective_g1( + base: &G1Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_381_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) } - #[test] - fn test_g2_subgroup_membership_via_endomorphism() { - let mut rng = test_rng(); - let generator = G2Projective::rand(&mut rng).into_affine(); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + fn bls12_381_mul_projective_g2( + base: &G2Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_381_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) } +} - #[test] - fn test_g2_subgroup_non_membership_via_endomorphism() { - let mut rng = test_rng(); - loop { - let x = Fq2::rand(&mut rng); - let greatest = rng.gen(); - - if let Some(p) = G2Affine::get_point_from_x_unchecked(x, greatest) { - if !::is_zero(&p.mul_bigint(Fr::characteristic())) { - assert!(!p.is_in_correct_subgroup_assuming_on_curve()); - return - } - } - } +/// Interfaces for working with *Arkworks* *BLS12-381* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BLS12-381*. + /// + /// - Receives encoded: + /// - `a`: `ArkScale>`. + /// - `b`: `ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) } - // Test vectors and macro adapted from https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs - macro_rules! test_vectors { - ($projective:ident, $affine:ident, $compress:expr, $expected:ident) => { - let mut e = $projective::zero(); - - let mut v = vec![]; - { - let mut expected = $expected; - for _ in 0..1000 { - let e_affine = $affine::from(e); - let mut serialized = vec![0u8; e.serialized_size($compress)]; - e_affine.serialize_with_mode(serialized.as_mut_slice(), $compress).unwrap(); - v.extend_from_slice(&serialized[..]); - - let mut decoded = serialized; - let len_of_encoding = decoded.len(); - (&mut decoded[..]).copy_from_slice(&expected[0..len_of_encoding]); - expected = &expected[len_of_encoding..]; - let decoded = - $affine::deserialize_with_mode(&decoded[..], $compress, Validate::Yes) - .unwrap(); - assert_eq!(e_affine, decoded); - - e += &$projective::generator(); - } - } - - assert_eq!(&v[..], $expected); - }; + /// Pairing final exponentiation for *BLS12-381*. + /// + /// - Receives encoded: `ArkScale<`. + /// - Returns encoded: `ArkScale<` + fn bls12_381_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) } - #[test] - fn g1_compressed_valid_test_vectors() { - let bytes: &'static [u8] = include_bytes!("test-data/g1_compressed_valid_test_vectors.dat"); - test_vectors!(G1Projective, G1Affine, Compress::Yes, bytes); + /// Multi scalar multiplication on *G1* for *BLS12-381* + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) } - #[test] - fn g1_uncompressed_valid_test_vectors() { - let bytes: &'static [u8] = - include_bytes!("test-data/g1_uncompressed_valid_test_vectors.dat"); - test_vectors!(G1Projective, G1Affine, Compress::No, bytes); + /// Multi scalar multiplication on *G2* for *BLS12-381* + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) } - #[test] - fn g2_compressed_valid_test_vectors() { - let bytes: &'static [u8] = include_bytes!("test-data/g2_compressed_valid_test_vectors.dat"); - test_vectors!(G2Projective, G2Affine, Compress::Yes, bytes); + /// Projective multiplication on *G1* for *BLS12-381*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) } - #[test] - fn g2_uncompressed_valid_test_vectors() { - let bytes: &'static [u8] = - include_bytes!("test-data/g2_uncompressed_valid_test_vectors.dat"); - test_vectors!(G2Projective, G2Affine, Compress::No, bytes); + /// Projective multiplication on *G2* for *BLS12-381* + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) } } diff --git a/substrate/primitives/crypto/ec-utils/src/bw6_761.rs b/substrate/primitives/crypto/ec-utils/src/bw6_761.rs index 4ad26fc54b1e0c13727f65dec83609853b9a8fce..a68abf6e43e0384a1afe6beafdd99963a6fc48ff 100644 --- a/substrate/primitives/crypto/ec-utils/src/bw6_761.rs +++ b/substrate/primitives/crypto/ec-utils/src/bw6_761.rs @@ -15,89 +15,172 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support functions for bw6_761 to improve the performance of -//! multi_miller_loop, final_exponentiation, msm's and projective -//! multiplications by host function calls. +//! *BW6-761* types and host functions. -use crate::utils::{ - final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, -}; -use ark_bw6_761::{g1, g2, BW6_761}; +use crate::utils; +use ark_bw6_761_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; use sp_std::vec::Vec; -/// Compute a multi miller loop through arkworks -pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop_generic::(a, b) +/// First pairing group definitions. +pub mod g1 { + pub use ark_bw6_761_ext::g1::{G1_GENERATOR_X, G1_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bw6_761_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bw6_761_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bw6_761_ext::g1::G1Projective; } -/// Compute a final exponentiation through arkworks -pub fn final_exponentiation(target: Vec) -> Result, ()> { - final_exponentiation_generic::(target) +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bw6_761_ext::g2::{G2_GENERATOR_X, G2_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bw6_761_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bw6_761_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bw6_761_ext::g2::G2Projective; } -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G1. -pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks on G2. -pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; -/// Compute a projective scalar multiplication for short_weierstrass through -/// arkworks on G1. -pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) -} +/// Configuration for *BW6-361* curve. +pub type Config = ark_bw6_761_ext::Config; + +/// *BW6-361* definition. +/// +/// A generic *BW6* model specialized with *BW6-761* configuration. +pub type BW6_761 = ark_bw6_761_ext::BW6_761; -/// Compute a projective scalar multiplication for short_weierstrass through -/// arkworks on G2. -pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) +impl CurveHooks for HostHooks { + fn bw6_761_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bw6_761_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) + } + + fn bw6_761_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bw6_761_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } + + fn bw6_761_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bw6_761_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bw6_761_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_mul_projective_g1(base: &G1Projective, scalar: &[u64]) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bw6_761_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_mul_projective_g2(base: &G2Projective, scalar: &[u64]) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bw6_761_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } } -#[cfg(test)] -mod tests { - use super::*; - use ark_algebra_test_templates::*; - use sp_ark_bw6_761::{ - G1Projective as G1ProjectiveHost, G2Projective as G2ProjectiveHost, HostFunctions, - BW6_761 as BW6_761Host, - }; - - #[derive(PartialEq, Eq)] - struct Host; - - impl HostFunctions for Host { - fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_multi_miller_loop(a, b) - } - fn bw6_761_final_exponentiation(f12: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_final_exponentiation(f12) - } - fn bw6_761_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_msm_g1(bases, bigints) - } - fn bw6_761_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_msm_g2(bases, bigints) - } - fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_mul_projective_g1(base, scalar) - } - fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::bw6_761_mul_projective_g2(base, scalar) - } +/// Interfaces for working with *Arkworks* *BW6-761* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BW6-761*. + /// + /// - Receives encoded: + /// - `a: ArkScale>`. + /// - `b: ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) + } + + /// Pairing final exponentiation for *BW6-761*. + /// + /// - Receives encoded: `ArkScale`. + /// - Returns encoded: `ArkScale`. + fn bw6_761_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) } - type BW6_761 = BW6_761Host; - type G1Projective = G1ProjectiveHost; - type G2Projective = G2ProjectiveHost; + /// Multi scalar multiplication on *G1* for *BW6-761*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Multi scalar multiplication on *G2* for *BW6-761*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } - test_group!(g1; G1Projective; sw); - test_group!(g2; G2Projective; sw); - test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); - test_pairing!(pairing; super::BW6_761); + /// Projective multiplication on *G1* for *BW6-761*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } + + /// Projective multiplication on *G2* for *BW6-761*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } } diff --git a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs index e69dbd7984645918d946e48418a5c433114b5642..a03be41b854284d5a02956f5ae099f1b2c21d4ed 100644 --- a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs +++ b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs @@ -15,42 +15,74 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support functions for ed_on_bls12_377 to improve the performance of -//! msm and projective multiplication by host function calls +//! *Ed-on-BLS12-377* types and host functions. -use crate::utils::{msm_te_generic, mul_projective_te_generic}; -use ark_ed_on_bls12_377::EdwardsConfig; +use crate::utils; +use ark_ec::CurveConfig; +use ark_ed_on_bls12_377_ext::CurveHooks; +use sp_runtime_interface::runtime_interface; use sp_std::vec::Vec; -/// Compute a multi scalar mulitplication for twisted_edwards through -/// arkworks. -pub fn msm(bases: Vec, scalars: Vec) -> Result, ()> { - msm_te_generic::(bases, scalars) -} +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Group configuration. +pub type EdwardsConfig = ark_ed_on_bls12_377_ext::EdwardsConfig; +/// Twisted Edwards form point affine representation. +pub type EdwardsAffine = ark_ed_on_bls12_377_ext::EdwardsAffine; +/// Twisted Edwards form point projective representation. +pub type EdwardsProjective = ark_ed_on_bls12_377_ext::EdwardsProjective; + +impl CurveHooks for HostHooks { + fn ed_on_bls12_377_msm( + bases: &[EdwardsAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::ed_on_bls12_377_te_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_te(res) + } -/// Compute a projective scalar multiplication for twisted_edwards -/// through arkworks. -pub fn mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_te_generic::(base, scalar) + fn ed_on_bls12_377_mul_projective( + base: &EdwardsProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_te(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_377_te_mul_projective(base, scalar).unwrap_or_default(); + utils::decode_proj_te(res) + } } -#[cfg(test)] -mod tests { - use super::*; - use ark_algebra_test_templates::*; - use sp_ark_ed_on_bls12_377::{EdwardsProjective as EdwardsProjectiveHost, HostFunctions}; - - struct Host {} - - impl HostFunctions for Host { - fn ed_on_bls12_377_msm(bases: Vec, scalars: Vec) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_377_msm(bases, scalars) - } - fn ed_on_bls12_377_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_377_mul_projective(base, scalar) - } +/// Interfaces for working with *Arkworks* *Ed-on-BLS12-377* elliptic curve +/// related types from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Twisted Edwards multi scalar multiplication for *Ed-on-BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_377_te_msm(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_te::(bases, scalars) } - type EdwardsProjective = EdwardsProjectiveHost; - test_group!(te; EdwardsProjective; te); + /// Twisted Edwards projective multiplication for *Ed-on-BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_377_te_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_te::(base, scalar) + } } diff --git a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs index 7e9cd87e543d3731a4942f793787522c9677a9c2..9d63f35876521091fd757c9e7ca15a446fd1f1e9 100644 --- a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs +++ b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs @@ -15,80 +15,139 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support functions for ed_on_bls12_381_bandersnatch to improve the -//! performance of msm' and projective multiplications by host function -//! calls. +//! Elliptic Curves host functions to handle some of the *Arkworks* *Ed-on-BLS12-381-Bandersnatch* +//! computationally expensive operations. -use crate::utils::{ - msm_sw_generic, msm_te_generic, mul_projective_generic, mul_projective_te_generic, -}; -use ark_ed_on_bls12_381_bandersnatch::BandersnatchConfig; +use crate::utils; +use ark_ec::CurveConfig; +use ark_ed_on_bls12_381_bandersnatch_ext::CurveHooks; +use sp_runtime_interface::runtime_interface; use sp_std::vec::Vec; -/// Compute a multi scalar multiplication for short_weierstrass through -/// arkworks. -pub fn sw_msm(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw_generic::(bases, scalars) -} +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; -/// Compute a multi scalar mulitplication for twisted_edwards through -/// arkworks. -pub fn te_msm(bases: Vec, scalars: Vec) -> Result, ()> { - msm_te_generic::(bases, scalars) -} +/// Group configuration. +pub type BandersnatchConfig = ark_ed_on_bls12_381_bandersnatch_ext::BandersnatchConfig; +/// Group configuration for Twisted Edwards form (equal to [`BandersnatchConfig`]). +pub type EdwardsConfig = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsConfig; +/// Twisted Edwards form point affine representation. +pub type EdwardsAffine = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsAffine; +/// Twisted Edwards form point projective representation. +pub type EdwardsProjective = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsProjective; +/// Group configuration for Short Weierstrass form (equal to [`BandersnatchConfig`]). +pub type SWConfig = ark_ed_on_bls12_381_bandersnatch_ext::SWConfig; +/// Short Weierstrass form point affine representation. +pub type SWAffine = ark_ed_on_bls12_381_bandersnatch_ext::SWAffine; +/// Short Weierstrass form point projective representation. +pub type SWProjective = ark_ed_on_bls12_381_bandersnatch_ext::SWProjective; -/// Compute a projective scalar multiplication for short_weierstrass -/// through arkworks. -pub fn sw_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_generic::(base, scalar) -} +impl CurveHooks for HostHooks { + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: &[EdwardsAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = + host_calls::ed_on_bls12_381_bandersnatch_te_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_te(res) + } -/// Compute a projective scalar multiplication for twisted_edwards -/// through arkworks. -pub fn te_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_te_generic::(base, scalar) -} + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: &EdwardsProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_te(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_381_bandersnatch_te_mul_projective(base, scalar) + .unwrap_or_default(); + utils::decode_proj_te(res) + } -#[cfg(test)] -mod tests { - use super::*; - use ark_algebra_test_templates::*; - use sp_ark_ed_on_bls12_381_bandersnatch::{ - EdwardsProjective as EdwardsProjectiveHost, HostFunctions, SWProjective as SWProjectiveHost, - }; + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: &[SWAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = + host_calls::ed_on_bls12_381_bandersnatch_sw_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } - pub struct Host {} + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: &SWProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_381_bandersnatch_sw_mul_projective(base, scalar) + .unwrap_or_default(); + utils::decode_proj_sw(res) + } +} + +/// Interfaces for working with *Arkworks* *Ed-on-BLS12-381-Bandersnatch* elliptic curve +/// related types from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Twisted Edwards multi scalar multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + utils::msm_te::(bases, scalars) + } - impl HostFunctions for Host { - fn ed_on_bls12_381_bandersnatch_te_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_381_bandersnatch_te_msm(bases, scalars) - } - fn ed_on_bls12_381_bandersnatch_sw_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_381_bandersnatch_sw_msm(bases, scalars) - } - fn ed_on_bls12_381_bandersnatch_te_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_381_bandersnatch_te_mul_projective(base, scalar) - } - fn ed_on_bls12_381_bandersnatch_sw_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - crate::elliptic_curves::ed_on_bls12_381_bandersnatch_sw_mul_projective(base, scalar) - } + /// Twisted Edwards projective multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + utils::mul_projective_te::(base, scalar) } - type EdwardsProjective = EdwardsProjectiveHost; - type SWProjective = SWProjectiveHost; + /// Short Weierstrass multi scalar multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } - test_group!(sw; SWProjective; sw); - test_group!(te; EdwardsProjective; te); + /// Short Weierstrass projective multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } } diff --git a/substrate/primitives/crypto/ec-utils/src/lib.rs b/substrate/primitives/crypto/ec-utils/src/lib.rs index 22e24e61f7b357d338e4cd85f411eb8ae43e82e5..970ad71765a5b27791a84a4fb963f8e4386b9297 100644 --- a/substrate/primitives/crypto/ec-utils/src/lib.rs +++ b/substrate/primitives/crypto/ec-utils/src/lib.rs @@ -15,251 +15,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The main elliptic curves trait, allowing Substrate to call into host functions -//! for operations on elliptic curves. +//! This crate offers elliptic curves types which are compatible with the +//! [Arkworks](https://github.com/arkworks-rs) library functionalities. +//! +//! The implementation has been primarily designed to be used in slow hosted +//! targets (e.g. wasm32) and offloads the most computationally expensive +//! operations to the host by leveraging the +//! [arkworks-extensions](https://github.com/paritytech/arkworks-extensions) +//! library and Substrate's host functions. +//! +//! The exported types are organized and named in a way that mirrors the structure +//! of the types in the original Arkworks library. This design choice aims to make +//! it easier for users already familiar with the library to understand and utilize +//! the exported types effectively. #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use sp_runtime_interface::runtime_interface; -use sp_std::vec::Vec; - +#[cfg(feature = "bls12-377")] pub mod bls12_377; +#[cfg(feature = "bls12-381")] pub mod bls12_381; +#[cfg(feature = "bw6-761")] pub mod bw6_761; +#[cfg(feature = "ed-on-bls12-377")] pub mod ed_on_bls12_377; +#[cfg(feature = "ed-on-bls12-381-bandersnatch")] pub mod ed_on_bls12_381_bandersnatch; -mod utils; - -/// Interfaces for working with elliptic curves related types from within the runtime. -/// All type are (de-)serialized through the wrapper types from the ark-scale trait, -/// with ark_scale::{ArkScale, ArkScaleProjective}; -#[runtime_interface] -pub trait EllipticCurves { - /// Compute a multi Miller loop for bls12_37 - /// Receives encoded: - /// a: ArkScale>> - /// b: ArkScale>> - /// Returns encoded: ArkScale>> - fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - bls12_377::multi_miller_loop(a, b) - } - - /// Compute a final exponentiation for bls12_377 - /// Receives encoded: ArkScale>> - /// Returns encoded: ArkScale>> - fn bls12_377_final_exponentiation(f12: Vec) -> Result, ()> { - bls12_377::final_exponentiation(f12) - } - - /// Compute a projective multiplication on G1 for bls12_377 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - bls12_377::mul_projective_g1(base, scalar) - } - - /// Compute a projective multiplication on G2 for bls12_377 - /// through arkworks on G2 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - bls12_377::mul_projective_g2(base, scalar) - } - - /// Compute a msm on G1 for bls12_377 - /// Receives encoded: - /// bases: ArkScale<&[ark_bls12_377::G1Affine]> - /// scalars: ArkScale<&[ark_bls12_377::Fr]> - /// Returns encoded: ArkScaleProjective - fn bls12_377_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - bls12_377::msm_g1(bases, scalars) - } - - /// Compute a msm on G2 for bls12_377 - /// Receives encoded: - /// bases: ArkScale<&[ark_bls12_377::G2Affine]> - /// scalars: ArkScale<&[ark_bls12_377::Fr]> - /// Returns encoded: ArkScaleProjective - fn bls12_377_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - bls12_377::msm_g2(bases, scalars) - } - - /// Compute a multi Miller loop on bls12_381 - /// Receives encoded: - /// a: ArkScale>> - /// b: ArkScale>> - /// Returns encoded: ArkScale>> - fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - bls12_381::multi_miller_loop(a, b) - } - - /// Compute a final exponentiation on bls12_381 - /// Receives encoded: ArkScale>> - /// Returns encoded:ArkScale>> - fn bls12_381_final_exponentiation(f12: Vec) -> Result, ()> { - bls12_381::final_exponentiation(f12) - } - - /// Compute a projective multiplication on G1 for bls12_381 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - bls12_381::mul_projective_g1(base, scalar) - } - - /// Compute a projective multiplication on G2 for bls12_381 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - bls12_381::mul_projective_g2(base, scalar) - } - - /// Compute a msm on G1 for bls12_381 - /// Receives encoded: - /// bases: ArkScale<&[ark_bls12_381::G1Affine]> - /// scalars: ArkScale<&[ark_bls12_381::Fr]> - /// Returns encoded: ArkScaleProjective - fn bls12_381_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - bls12_381::msm_g1(bases, scalars) - } - /// Compute a msm on G2 for bls12_381 - /// Receives encoded: - /// bases: ArkScale<&[ark_bls12_381::G2Affine]> - /// scalars: ArkScale<&[ark_bls12_381::Fr]> - /// Returns encoded: ArkScaleProjective - fn bls12_381_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - bls12_381::msm_g2(bases, scalars) - } - - /// Compute a multi Miller loop on bw6_761 - /// Receives encoded: - /// a: ArkScale>> - /// b: ArkScale>> - /// Returns encoded: ArkScale>> - fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - bw6_761::multi_miller_loop(a, b) - } - - /// Compute a final exponentiation on bw6_761 - /// Receives encoded: ArkScale>> - /// Returns encoded: ArkScale>> - fn bw6_761_final_exponentiation(f12: Vec) -> Result, ()> { - bw6_761::final_exponentiation(f12) - } - - /// Compute a projective multiplication on G1 for bw6_761 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - bw6_761::mul_projective_g1(base, scalar) - } - - /// Compute a projective multiplication on G2 for bw6_761 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - bw6_761::mul_projective_g2(base, scalar) - } - - /// Compute a msm on G1 for bw6_761 - /// Receives encoded: - /// bases: ArkScale<&[ark_bw6_761::G1Affine]> - /// scalars: ArkScale<&[ark_bw6_761::Fr]> - /// Returns encoded: ArkScaleProjective - fn bw6_761_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { - bw6_761::msm_g1(bases, bigints) - } - - /// Compute a msm on G2 for bw6_761 - /// Receives encoded: - /// bases: ArkScale<&[ark_bw6_761::G2Affine]> - /// scalars: ArkScale<&[ark_bw6_761::Fr]> - /// Returns encoded: ArkScaleProjective - fn bw6_761_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { - bw6_761::msm_g2(bases, bigints) - } - - /// Compute projective multiplication on ed_on_bls12_377 - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn ed_on_bls12_377_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - ed_on_bls12_377::mul_projective(base, scalar) - } - - /// Compute msm on ed_on_bls12_377 - /// Receives encoded: - /// bases: ArkScale<&[ark_ed_on_bls12_377::EdwardsAffine]> - /// scalars: - /// ArkScale<&[ark_ed_on_bls12_377::Fr]> - /// Returns encoded: - /// ArkScaleProjective - fn ed_on_bls12_377_msm(bases: Vec, scalars: Vec) -> Result, ()> { - ed_on_bls12_377::msm(bases, scalars) - } - - /// Compute short weierstrass projective multiplication on ed_on_bls12_381_bandersnatch - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn ed_on_bls12_381_bandersnatch_sw_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - ed_on_bls12_381_bandersnatch::sw_mul_projective(base, scalar) - } - - /// Compute twisted edwards projective multiplication on ed_on_bls12_381_bandersnatch - /// Receives encoded: - /// base: ArkScaleProjective - /// scalar: ArkScale<&[u64]> - /// Returns encoded: ArkScaleProjective - fn ed_on_bls12_381_bandersnatch_te_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - ed_on_bls12_381_bandersnatch::te_mul_projective(base, scalar) - } - - /// Compute short weierstrass msm on ed_on_bls12_381_bandersnatch - /// Receives encoded: - /// bases: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::SWAffine]> - /// scalars: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]> - /// Returns encoded: - /// ArkScaleProjective - fn ed_on_bls12_381_bandersnatch_sw_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - ed_on_bls12_381_bandersnatch::sw_msm(bases, scalars) - } - - /// Compute twisted edwards msm on ed_on_bls12_381_bandersnatch - /// Receives encoded: - /// base: ArkScaleProjective - /// scalars: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]> - /// Returns encoded: - /// ArkScaleProjective - fn ed_on_bls12_381_bandersnatch_te_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - ed_on_bls12_381_bandersnatch::te_msm(bases, scalars) - } -} +#[cfg(any( + feature = "bls12-377", + feature = "bls12-381", + feature = "bw6-761", + feature = "ed-on-bls12-377", + feature = "ed-on-bls12-381-bandersnatch", +))] +mod utils; diff --git a/substrate/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat b/substrate/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat deleted file mode 100644 index ea8cd67652d133010e79df8488452450b6f5cb17..0000000000000000000000000000000000000000 Binary files a/substrate/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat and /dev/null differ diff --git a/substrate/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat b/substrate/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat deleted file mode 100644 index 86abfba945c7b701750d637bffe6701330e10e88..0000000000000000000000000000000000000000 Binary files a/substrate/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat and /dev/null differ diff --git a/substrate/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat b/substrate/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat deleted file mode 100644 index a40bbe251d90e576060adb7eaa2456197878804c..0000000000000000000000000000000000000000 Binary files a/substrate/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat and /dev/null differ diff --git a/substrate/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat b/substrate/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat deleted file mode 100644 index 92e4bc528e8937a311b502e4940e5e363a1f7c57..0000000000000000000000000000000000000000 Binary files a/substrate/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat and /dev/null differ diff --git a/substrate/primitives/crypto/ec-utils/src/utils.rs b/substrate/primitives/crypto/ec-utils/src/utils.rs index 5560d5921160509a8f2a00b28ee29331008cc780..d0dd8ed8131c4d40b68bdbc0a1f7155318a9c449 100644 --- a/substrate/primitives/crypto/ec-utils/src/utils.rs +++ b/substrate/primitives/crypto/ec-utils/src/utils.rs @@ -15,116 +15,102 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The generic executions of the operations on arkworks elliptic curves -//! which get instantiatied by the corresponding curves. +//! Generic executions of the operations for *Arkworks* elliptic curves. + +// As not all functions are used by each elliptic curve and some elliptic +// curve may be excluded by the build we resort to `#[allow(unused)]` to +// suppress the expected warning. + use ark_ec::{ - pairing::{MillerLoopOutput, Pairing, PairingOutput}, - short_weierstrass, - short_weierstrass::SWCurveConfig, - twisted_edwards, - twisted_edwards::TECurveConfig, + pairing::{MillerLoopOutput, Pairing}, + short_weierstrass::{Affine as SWAffine, Projective as SWProjective, SWCurveConfig}, + twisted_edwards::{Affine as TEAffine, Projective as TEProjective, TECurveConfig}, CurveConfig, VariableBaseMSM, }; -use ark_scale::hazmat::ArkScaleProjective; -use ark_std::vec::Vec; -use codec::{Decode, Encode}; - -const HOST_CALL: ark_scale::Usage = ark_scale::HOST_CALL; -type ArkScale = ark_scale::ArkScale; - -pub(crate) fn multi_miller_loop_generic( - g1: Vec, - g2: Vec, -) -> Result, ()> { - let g1 = ::G1Affine>> as Decode>::decode(&mut g1.as_slice()) - .map_err(|_| ())?; - let g2 = ::G2Affine>> as Decode>::decode(&mut g2.as_slice()) - .map_err(|_| ())?; - - let result = Curve::multi_miller_loop(g1.0, g2.0).0; - - let result: ArkScale<::TargetField> = result.into(); - Ok(result.encode()) +use ark_scale::{ + ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, + scale::{Decode, Encode}, +}; +use sp_std::vec::Vec; + +// SCALE encoding parameters shared by all the enabled modules +const SCALE_USAGE: u8 = ark_scale::make_usage(Compress::No, Validate::No); +type ArkScale = ark_scale::ArkScale; +type ArkScaleProjective = ark_scale::hazmat::ArkScaleProjective; + +#[inline(always)] +pub fn encode(val: T) -> Vec { + ArkScale::from(val).encode() } -pub(crate) fn final_exponentiation_generic(target: Vec) -> Result, ()> { - let target = - ::TargetField> as Decode>::decode(&mut target.as_slice()) - .map_err(|_| ())?; +#[inline(always)] +pub fn decode(buf: Vec) -> Result { + ArkScale::::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) +} - let result = Curve::final_exponentiation(MillerLoopOutput(target.0)).ok_or(())?; +#[inline(always)] +pub fn encode_proj_sw(val: &SWProjective) -> Vec { + ArkScaleProjective::from(val).encode() +} - let result: ArkScale> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn decode_proj_sw(buf: Vec) -> Result, ()> { + ArkScaleProjective::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) } -pub(crate) fn msm_sw_generic( - bases: Vec, - scalars: Vec, -) -> Result, ()> { - let bases = - >> as Decode>::decode(&mut bases.as_slice()) - .map_err(|_| ())?; - let scalars = ::ScalarField>> as Decode>::decode( - &mut scalars.as_slice(), - ) - .map_err(|_| ())?; - - let result = - as VariableBaseMSM>::msm(&bases.0, &scalars.0) - .map_err(|_| ())?; - - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn encode_proj_te(val: &TEProjective) -> Vec { + ArkScaleProjective::from(val).encode() } -pub(crate) fn msm_te_generic( - bases: Vec, - scalars: Vec, -) -> Result, ()> { - let bases = - >> as Decode>::decode(&mut bases.as_slice()) - .map_err(|_| ())?; - let scalars = ::ScalarField>> as Decode>::decode( - &mut scalars.as_slice(), - ) - .map_err(|_| ())?; - - let result = as VariableBaseMSM>::msm(&bases.0, &scalars.0) - .map_err(|_| ())?; - - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn decode_proj_te(buf: Vec) -> Result, ()> { + ArkScaleProjective::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) } -pub(crate) fn mul_projective_generic( - base: Vec, - scalar: Vec, -) -> Result, ()> { - let base = > as Decode>::decode( - &mut base.as_slice(), - ) - .map_err(|_| ())?; - let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; +#[allow(unused)] +pub fn multi_miller_loop(g1: Vec, g2: Vec) -> Result, ()> { + let g1 = decode::::G1Affine>>(g1)?; + let g2 = decode::::G2Affine>>(g2)?; + let res = T::multi_miller_loop(g1, g2); + Ok(encode(res.0)) +} - let result = ::mul_projective(&base.0, &scalar.0); +#[allow(unused)] +pub fn final_exponentiation(target: Vec) -> Result, ()> { + let target = decode::<::TargetField>(target)?; + let res = T::final_exponentiation(MillerLoopOutput(target)).ok_or(())?; + Ok(encode(res.0)) +} - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[allow(unused)] +pub fn msm_sw(bases: Vec, scalars: Vec) -> Result, ()> { + let bases = decode::>>(bases)?; + let scalars = decode::::ScalarField>>(scalars)?; + let res = as VariableBaseMSM>::msm(&bases, &scalars).map_err(|_| ())?; + Ok(encode_proj_sw(&res)) } -pub(crate) fn mul_projective_te_generic( - base: Vec, - scalar: Vec, -) -> Result, ()> { - let base = > as Decode>::decode( - &mut base.as_slice(), - ) - .map_err(|_| ())?; - let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; +#[allow(unused)] +pub fn msm_te(bases: Vec, scalars: Vec) -> Result, ()> { + let bases = decode::>>(bases)?; + let scalars = decode::::ScalarField>>(scalars)?; + let res = as VariableBaseMSM>::msm(&bases, &scalars).map_err(|_| ())?; + Ok(encode_proj_te(&res)) +} - let result = ::mul_projective(&base.0, &scalar.0); +#[allow(unused)] +pub fn mul_projective_sw(base: Vec, scalar: Vec) -> Result, ()> { + let base = decode_proj_sw::(base)?; + let scalar = decode::>(scalar)?; + let res = ::mul_projective(&base, &scalar); + Ok(encode_proj_sw(&res)) +} - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[allow(unused)] +pub fn mul_projective_te(base: Vec, scalar: Vec) -> Result, ()> { + let base = decode_proj_te::(base)?; + let scalar = decode::>(scalar)?; + let res = ::mul_projective(&base, &scalar); + Ok(encode_proj_te(&res)) } diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 8f083e2d7d54ca1396d46b22e002b706fce2f406..cf7ce99571156ed3b1169e62d956a7bbd3407c2a 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { path = "../api", default-features = false} sp-runtime = { path = "../runtime", default-features = false} sp-std = { path = "../std", default-features = false} -serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } +serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [features] default = [ "std" ] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index aa0aa95b3f8d060c909c23fb3f6bbdfeac3643ad..4a511c653fd7a07a87117ace4d2607fd25509dea 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.48", optional = true } sp-runtime = { path = "../runtime", default-features = false, optional = true} diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index ab9d26ec26f49fc4ad0097f3be71dd0e6a7c675c..59df8895bb7fd57884a453a29c968cf29e0df3f2 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -28,7 +28,7 @@ sp-trie = { path = "../trie", default-features = false, optional = true} sp-externalities = { path = "../externalities", default-features = false} sp-tracing = { path = "../tracing", default-features = false} log = { version = "0.4.17", optional = true } -secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.28.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} @@ -44,8 +44,9 @@ std = [ "bytes/std", "codec/std", "ed25519-dalek", + "ed25519-dalek?/std", "libsecp256k1", - "log", + "log/std", "secp256k1", "sp-core/std", "sp-externalities/std", diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index ec098a155c9c55644693dd6fffb4bcea0b57bb9e..a300152ee66d1288de4633918d08590b483d6423 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -106,7 +106,7 @@ use sp_core::{ }; #[cfg(feature = "bls-experimental")] -use sp_core::bls377; +use sp_core::{bls377, ecdsa_bls377}; #[cfg(feature = "std")] use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration}; @@ -1139,7 +1139,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; @@ -1185,7 +1185,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; @@ -1207,6 +1207,25 @@ pub trait Crypto { .expect("`bls377_generate` failed") } + /// Generate an `(ecdsa,bls12-377)` key for the given key type using an optional `seed` and + /// store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_generate( + &mut self, + id: KeyTypeId, + seed: Option>, + ) -> ecdsa_bls377::Public { + let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .ecdsa_bls377_generate_new(id, seed) + .expect("`ecdsa_bls377_generate` failed") + } + /// Generate a `bandersnatch` key pair for the given key type using an optional /// `seed` and store it in the keystore. /// diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index c3ad86409e905a184b2993f586f5251427f5b920..3060bfb1ad9870f8f7a6af51ed57e634931127e8 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -35,6 +35,12 @@ pub enum Keyring { Dave, Eve, Ferdie, + AliceStash, + BobStash, + CharlieStash, + DaveStash, + EveStash, + FerdieStash, One, Two, } @@ -104,6 +110,12 @@ impl From for &'static str { Keyring::Dave => "Dave", Keyring::Eve => "Eve", Keyring::Ferdie => "Ferdie", + Keyring::AliceStash => "Alice//stash", + Keyring::BobStash => "Bob//stash", + Keyring::CharlieStash => "Charlie//stash", + Keyring::DaveStash => "Dave//stash", + Keyring::EveStash => "Eve//stash", + Keyring::FerdieStash => "Ferdie//stash", Keyring::One => "One", Keyring::Two => "Two", } diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index 1db18f7edbdc857a7e4731b2993a61290438bd53..ee7fd56ba11bf5855d8ae648f073ea004bb257aa 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -27,9 +27,8 @@ pub mod ed25519; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; -/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, -/// since it tends to be used for accounts (although it may also be used -/// by authorities). +/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, since it tends to be +/// used for accounts (although it may also be used by authorities). pub use sr25519::Keyring as AccountKeyring; #[cfg(feature = "bandersnatch-experimental")] diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index c738cfdc59d9eac368bc2d1925eb600cb8dfba0a..914a66b4d837c94b1073d10cfc92187402597700 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -35,6 +35,12 @@ pub enum Keyring { Dave, Eve, Ferdie, + AliceStash, + BobStash, + CharlieStash, + DaveStash, + EveStash, + FerdieStash, One, Two, } @@ -114,6 +120,12 @@ impl From for &'static str { Keyring::Dave => "Dave", Keyring::Eve => "Eve", Keyring::Ferdie => "Ferdie", + Keyring::AliceStash => "Alice//stash", + Keyring::BobStash => "Bob//stash", + Keyring::CharlieStash => "Charlie//stash", + Keyring::DaveStash => "Dave//stash", + Keyring::EveStash => "Eve//stash", + Keyring::FerdieStash => "Ferdie//stash", Keyring::One => "One", Keyring::Two => "Two", } diff --git a/substrate/primitives/keystore/src/lib.rs b/substrate/primitives/keystore/src/lib.rs index 035af7099a6f8835364c14e3150ba880e3efc2c1..e415080779cf43774eabb933a9778f1c94b7a489 100644 --- a/substrate/primitives/keystore/src/lib.rs +++ b/substrate/primitives/keystore/src/lib.rs @@ -23,7 +23,7 @@ pub mod testing; #[cfg(feature = "bandersnatch-experimental")] use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] -use sp_core::{bls377, bls381}; +use sp_core::{bls377, bls381, ecdsa_bls377}; use sp_core::{ crypto::{ByteArray, CryptoTypeId, KeyTypeId}, ecdsa, ed25519, sr25519, @@ -270,6 +270,10 @@ pub trait Keystore: Send + Sync { #[cfg(feature = "bls-experimental")] fn bls377_public_keys(&self, id: KeyTypeId) -> Vec; + /// Returns all (ecdsa,bls12-377) paired public keys for the given key type. + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_public_keys(&self, id: KeyTypeId) -> Vec; + /// Generate a new bls381 key pair for the given key type and an optional seed. /// /// Returns an `bls381::Public` key of the generated key pair or an `Err` if @@ -292,6 +296,17 @@ pub trait Keystore: Send + Sync { seed: Option<&str>, ) -> Result; + /// Generate a new (ecdsa,bls377) key pair for the given key type and an optional seed. + /// + /// Returns an `ecdsa_bls377::Public` key of the generated key pair or an `Err` if + /// something failed during key generation. + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result; + /// Generate a bls381 signature for a given message. /// /// Receives [`KeyTypeId`] and a [`bls381::Public`] key to be able to map @@ -324,6 +339,22 @@ pub trait Keystore: Send + Sync { msg: &[u8], ) -> Result, Error>; + /// Generate a (ecdsa,bls377) signature pair for a given message. + /// + /// Receives [`KeyTypeId`] and a [`ecdsa_bls377::Public`] key to be able to map + /// them to a private key that exists in the keystore. + /// + /// Returns an [`ecdsa_bls377::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_sign( + &self, + key_type: KeyTypeId, + public: &ecdsa_bls377::Public, + msg: &[u8], + ) -> Result, Error>; + /// Insert a new secret key. fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; @@ -349,6 +380,7 @@ pub trait Keystore: Send + Sync { /// - bandersnatch /// - bls381 /// - bls377 + /// - (ecdsa,bls377) paired keys /// /// To support more schemes you can overwrite this method. /// @@ -398,6 +430,13 @@ pub trait Keystore: Send + Sync { .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; self.bls377_sign(id, &public, msg)?.map(|s| s.encode()) }, + #[cfg(feature = "bls-experimental")] + ecdsa_bls377::CRYPTO_ID => { + let public = ecdsa_bls377::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.ecdsa_bls377_sign(id, &public, msg)?.map(|s| s.encode()) + }, + _ => return Err(Error::KeyNotSupported(id)), }; Ok(signature) @@ -560,6 +599,11 @@ impl Keystore for Arc { (**self).bls377_public_keys(id) } + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_public_keys(&self, id: KeyTypeId) -> Vec { + (**self).ecdsa_bls377_public_keys(id) + } + #[cfg(feature = "bls-experimental")] fn bls381_generate_new( &self, @@ -578,6 +622,15 @@ impl Keystore for Arc { (**self).bls377_generate_new(key_type, seed) } + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).ecdsa_bls377_generate_new(key_type, seed) + } + #[cfg(feature = "bls-experimental")] fn bls381_sign( &self, @@ -598,6 +651,16 @@ impl Keystore for Arc { (**self).bls377_sign(key_type, public, msg) } + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_sign( + &self, + key_type: KeyTypeId, + public: &ecdsa_bls377::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).ecdsa_bls377_sign(key_type, public, msg) + } + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { (**self).insert(key_type, suri, public) } diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index efa35fd24bf46bd77e235b207e442c717411d8d3..08110e8e497919db42d52d3d6e75194e0d4e3130 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -22,7 +22,7 @@ use crate::{Error, Keystore, KeystorePtr}; #[cfg(feature = "bandersnatch-experimental")] use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] -use sp_core::{bls377, bls381}; +use sp_core::{bls377, bls381, ecdsa_bls377}; use sp_core::{ crypto::{ByteArray, KeyTypeId, Pair, VrfSecret}, ecdsa, ed25519, sr25519, @@ -32,7 +32,7 @@ use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; /// A keystore implementation usable in tests. -#[derive(Default)] +#[derive(Default, Clone)] pub struct MemoryKeystore { /// `KeyTypeId` maps to public keys and public keys map to private keys. keys: Arc, String>>>>, @@ -322,6 +322,30 @@ impl Keystore for MemoryKeystore { self.sign::(key_type, public, msg) } + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + self.generate_new::(key_type, seed) + } + + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls377_sign( + &self, + key_type: KeyTypeId, + public: &ecdsa_bls377::Public, + msg: &[u8], + ) -> Result, Error> { + self.sign::(key_type, public, msg) + } + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { self.keys .write() diff --git a/substrate/primitives/maybe-compressed-blob/README.md b/substrate/primitives/maybe-compressed-blob/README.md index b5bb869c30e4ffd46d15049763e52af6f65e7f05..a2f6a1c53432657b85a09cf0d1df65e541a37fa9 100644 --- a/substrate/primitives/maybe-compressed-blob/README.md +++ b/substrate/primitives/maybe-compressed-blob/README.md @@ -1,3 +1,3 @@ -Handling of blobs, typicaly validation code, which may be compressed. +Handling of blobs, typically validation code, which may be compressed. License: Apache-2.0 diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 747b967dd9ed78162840f9648c208ae55f9442fa..5216765825ff71f4b0320df14e372ca851113fe8 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } @@ -33,6 +33,7 @@ std = [ "codec/std", "log/std", "mmr-lib/std", + "scale-info/std", "serde/std", "sp-api/std", "sp-core/std", diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index d17c654aaf3882f3bebfe16204f5ef0f7e77ce24..77c21b920f2f8e2f211271098e7479b8bf62f8f2 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../std", default-features = false} [features] diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 3e2dcc7ec5c4c866b6254700c97b1160ed5148af..bc6878086cf5eae3f355b0741c356ee57c4fd4c6 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } sp-std = { default-features = false, path = "../std" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 68f1bef91665c6b168f736e8125a633a265652d9..90418e561f217aeaf28c7d9023839715e8a22bf9 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } sp-arithmetic = { path = "../arithmetic", default-features = false} sp-core = { path = "../core", default-features = false} diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index 0afe1ec5bb692346fdebbc0cd046b4238b806842..62ae0502114823c3e701c39f727d1ce8aca39e16 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -19,7 +19,7 @@ //! - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast //! election method that ensures PJR, but does not provide a constant factor approximation of the //! maximin problem. -//! - [`phragmms`](phragmms::phragmms): Implements a hybrid approach inspired by Phragmén which is +//! - [`ghragmms`](phragmms::phragmms()): Implements a hybrid approach inspired by Phragmén which is //! executed faster but it can achieve a constant factor approximation of the maximin problem, //! similar to that of the MMS algorithm. //! - [`balance`](balancing::balance): Implements the star balancing algorithm. This iterative diff --git a/substrate/primitives/rpc/Cargo.toml b/substrate/primitives/rpc/Cargo.toml index 4739c96740e9ff1c72ed0aa8000437283d4000ac..77bdcc4f89a1d7a8bfc1666884f4d8804ce497a1 100644 --- a/substrate/primitives/rpc/Cargo.toml +++ b/substrate/primitives/rpc/Cargo.toml @@ -18,4 +18,4 @@ serde = { version = "1.0.188", features = ["derive"] } sp-core = { path = "../core" } [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" diff --git a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml index de0d6f9a13e2d513f15fa90affc80777b388485a..7729f89fa39ab263af141f9a797d90d24937d8f8 100644 --- a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml @@ -25,6 +25,7 @@ substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true [features] default = [ "std" ] std = [ + "bytes/std", "sp-core/std", "sp-io/std", "sp-runtime-interface/std", diff --git a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr index 23e671f6ce3f3e93ef8e49aba332b86e2c029473..10012ede793dee015a10279257a9d640096de0c8 100644 --- a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr +++ b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr @@ -3,3 +3,15 @@ error[E0425]: cannot find function `bar` in module `test` | 33 | test::bar(); | ^^^ not found in `test` + | +note: found an item that was configured out + --> tests/ui/no_feature_gated_method.rs:25:5 + | +25 | fn bar() {} + | ^^^ + = note: the item is gated behind the `bar-feature` feature +note: found an item that was configured out + --> tests/ui/no_feature_gated_method.rs:25:5 + | +25 | fn bar() {} + | ^^^ diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index fa3c3ae2e6ea032447de8e0fe158e845ead2b9ec..bf6cf93c5f048412342f8176b02d784a2c434b5c 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -21,7 +21,7 @@ impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } sp-application-crypto = { path = "../application-crypto", default-features = false} sp-arithmetic = { path = "../arithmetic", default-features = false} @@ -32,7 +32,7 @@ sp-weights = { path = "../weights", default-features = false} [dev-dependencies] rand = "0.8.5" -serde_json = "1.0.107" +serde_json = "1.0.108" zstd = { version = "0.12.4", default-features = false } sp-api = { path = "../api" } sp-state-machine = { path = "../state-machine" } diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 0b1cd2b54290e0a829b09029b630a30c7410cb31..1cdc0b8e4051b7977b2eea4caa1339b17b4ceb06 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -134,16 +134,16 @@ impl Checkable - for UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where - Address: Member + MaybeDisplay, + LookupSource: Member + MaybeDisplay, Call: Encode + Member, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, Extra: SignedExtension, AccountId: Member + MaybeDisplay, - Lookup: traits::Lookup, + Lookup: traits::Lookup, { type Checked = CheckedExtrinsic; diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index dd861ad05de9be9717e337be79f5c17e9d3b676f..ddf92554c83056f192015a26285408b7fdf33695 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -466,7 +466,7 @@ impl Verify for MultiSignature { } /// Signature verify that can work with any known signature types.. -#[derive(Eq, PartialEq, Clone, Default, Encode, Decode, RuntimeDebug)] +#[derive(Eq, PartialEq, Clone, Default, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct AnySignature(H512); @@ -954,6 +954,32 @@ pub fn print(print: impl traits::Printable) { print.print(); } +/// Utility function to declare string literals backed by an array of length N. +/// +/// The input can be shorter than N, in that case the end of the array is padded with zeros. +/// +/// [`str_array`] is useful when converting strings that end up in the storage as fixed size arrays +/// or in const contexts where static data types have strings that could also end up in the storage. +/// +/// # Example +/// +/// ```rust +/// # use sp_runtime::str_array; +/// const MY_STR: [u8; 6] = str_array("data"); +/// assert_eq!(MY_STR, *b"data\0\0"); +/// ``` +pub const fn str_array(s: &str) -> [u8; N] { + debug_assert!(s.len() <= N, "String literal doesn't fit in array"); + let mut i = 0; + let mut arr = [0; N]; + let s = s.as_bytes(); + while i < s.len() { + arr[i] = s[i]; + i += 1; + } + arr +} + /// Describes on what should happen with a storage transaction. pub enum TransactionOutcome { /// Commit the transaction. diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 17dc7ce50ea8b90254c41c388b2adf550730cef4..ec79f43cabdc36249f56f1829de6cdb2289f9913 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -226,7 +226,7 @@ pub trait StaticLookup { } /// A lookup implementation returning the input value. -#[derive(Default)] +#[derive(Default, Clone, Copy, PartialEq, Eq)] pub struct IdentityLookup(PhantomData); impl StaticLookup for IdentityLookup { type Source = T; @@ -1666,8 +1666,6 @@ impl SignedExtension for Tuple { } } -/// Only for bare bone testing when you don't care about signed extensions at all. -#[cfg(feature = "std")] impl SignedExtension for () { type AccountId = u64; type AdditionalSigned = (); diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 9a5e77c9dc297ab2ca42c8f1b56b2871f1a244e2..4c11762ffb7cf67890108e03fbc9dde05a5576b7 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false} sp-core = { path = "../core", default-features = false} sp-runtime = { path = "../runtime", optional = true} diff --git a/substrate/primitives/session/src/lib.rs b/substrate/primitives/session/src/lib.rs index 45395e9766f55920f5ca4700c398aae02bd6decd..9933495fd60135c826d822cc3d3e1471492797fb 100644 --- a/substrate/primitives/session/src/lib.rs +++ b/substrate/primitives/session/src/lib.rs @@ -26,28 +26,12 @@ use sp_api::ProvideRuntimeApi; #[cfg(feature = "std")] use sp_runtime::traits::Block as BlockT; -use sp_core::{crypto::KeyTypeId, RuntimeDebug}; +use sp_core::RuntimeDebug; use sp_staking::SessionIndex; use sp_std::vec::Vec; -sp_api::decl_runtime_apis! { - /// Session keys runtime api. - pub trait SessionKeys { - /// Generate a set of session keys with optionally using the given seed. - /// The keys should be stored within the keystore exposed via runtime - /// externalities. - /// - /// The seed needs to be a valid `utf8` string. - /// - /// Returns the concatenated SCALE encoded public keys. - fn generate_session_keys(seed: Option>) -> Vec; - - /// Decode the given public session keys. - /// - /// Returns the list of public raw public keys + key type. - fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>>; - } -} +pub mod runtime_api; +pub use runtime_api::*; /// Number of validators in a given session. pub type ValidatorCount = u32; diff --git a/substrate/primitives/session/src/runtime_api.rs b/substrate/primitives/session/src/runtime_api.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e508cd3dbd3872bc5a5cf99b80e0eed1f755116 --- /dev/null +++ b/substrate/primitives/session/src/runtime_api.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use sp_core::crypto::KeyTypeId; +use sp_std::prelude::*; + +sp_api::decl_runtime_apis! { + /// Session keys runtime api. + pub trait SessionKeys { + /// Generate a set of session keys with optionally using the given seed. + /// The keys should be stored within the keystore exposed via runtime + /// externalities. + /// + /// The seed needs to be a valid `utf8` string. + /// + /// Returns the concatenated SCALE encoded public keys. + fn generate_session_keys(seed: Option>) -> Vec; + + /// Decode the given public session keys. + /// + /// Returns the list of public raw public keys + key type. + fn decode_session_keys(encoded: Vec) -> Option, sp_core::crypto::KeyTypeId)>>; + } +} diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 825806078f6e9fde8b0a3e41d4013166cd649f96..ef96276a00394eba9297dc5a5305cdf1227bb98b 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" sp-core = { path = "../core", default-features = false} diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 8b5797d7918fbf74e11f296ee0208567fad17236..c2ac5ae004b1b7621341020e614a9c769e27d2dd 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -21,11 +21,13 @@ //! approaches in general. Definitions related to sessions, slashing, etc go here. use crate::currency_to_vote::CurrencyToVote; -use codec::{FullCodec, MaxEncodedLen}; +use codec::{Decode, Encode, FullCodec, HasCompact, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_core::RuntimeDebug; -use sp_runtime::{DispatchError, DispatchResult, Saturating}; -use sp_std::{collections::btree_map::BTreeMap, ops::Sub, vec::Vec}; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Zero}, + DispatchError, DispatchResult, RuntimeDebug, Saturating, +}; +use sp_std::{collections::btree_map::BTreeMap, ops::Sub, vec, vec::Vec}; pub mod offence; @@ -37,6 +39,25 @@ pub type SessionIndex = u32; /// Counter for the number of eras that have passed. pub type EraIndex = u32; +/// Type for identifying a page. +pub type Page = u32; +/// Representation of a staking account, which may be a stash or controller account. +/// +/// Note: once the controller is completely deprecated, this enum can also be deprecated in favor of +/// the stash account. Tracking issue: . +#[derive(Clone, Debug)] +pub enum StakingAccount { + Stash(AccountId), + Controller(AccountId), +} + +#[cfg(feature = "std")] +impl From for StakingAccount { + fn from(account: AccountId) -> Self { + StakingAccount::Stash(account) + } +} + /// Representation of the status of a staker. #[derive(RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone))] @@ -263,6 +284,9 @@ pub trait StakingInterface { } } + #[cfg(feature = "runtime-benchmarks")] + fn max_exposure_page_size() -> Page; + #[cfg(feature = "runtime-benchmarks")] fn add_era_stakers( current_era: &EraIndex, @@ -274,4 +298,122 @@ pub trait StakingInterface { fn set_current_era(era: EraIndex); } +/// The amount of exposure for an era that an individual nominator has (susceptible to slashing). +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct IndividualExposure { + /// The stash account of the nominator in question. + pub who: AccountId, + /// Amount of funds exposed. + #[codec(compact)] + pub value: Balance, +} + +/// A snapshot of the stake backing a single validator in the system. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct Exposure { + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, +} + +impl Default for Exposure { + fn default() -> Self { + Self { total: Default::default(), own: Default::default(), others: vec![] } + } +} + +impl< + AccountId: Clone, + Balance: HasCompact + AtLeast32BitUnsigned + Copy + codec::MaxEncodedLen, + > Exposure +{ + /// Splits an `Exposure` into `PagedExposureMetadata` and multiple chunks of + /// `IndividualExposure` with each chunk having maximum of `page_size` elements. + pub fn into_pages( + self, + page_size: Page, + ) -> (PagedExposureMetadata, Vec>) { + let individual_chunks = self.others.chunks(page_size as usize); + let mut exposure_pages: Vec> = + Vec::with_capacity(individual_chunks.len()); + + for chunk in individual_chunks { + let mut page_total: Balance = Zero::zero(); + let mut others: Vec> = + Vec::with_capacity(chunk.len()); + for individual in chunk.iter() { + page_total.saturating_accrue(individual.value); + others.push(IndividualExposure { + who: individual.who.clone(), + value: individual.value, + }) + } + + exposure_pages.push(ExposurePage { page_total, others }); + } + + ( + PagedExposureMetadata { + total: self.total, + own: self.own, + nominator_count: self.others.len() as u32, + page_count: exposure_pages.len() as Page, + }, + exposure_pages, + ) + } +} + +/// A snapshot of the stake backing a single validator in the system. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct ExposurePage { + /// The total balance of this chunk/page. + #[codec(compact)] + pub page_total: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, +} + +impl Default for ExposurePage { + fn default() -> Self { + ExposurePage { page_total: Default::default(), others: vec![] } + } +} + +/// Metadata for Paged Exposure of a validator such as total stake across pages and page count. +/// +/// In combination with the associated `ExposurePage`s, it can be used to reconstruct a full +/// `Exposure` set of a validator. This is useful for cases where we want to query full set of +/// `Exposure` as one page (for backward compatibility). +#[derive( + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Encode, + Decode, + RuntimeDebug, + TypeInfo, + Default, + MaxEncodedLen, +)] +pub struct PagedExposureMetadata { + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// Number of nominators backing this validator. + pub nominator_count: u32, + /// Number of pages of nominators. + pub page_count: Page, +} + sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index afa80addd2bae1bb8694ded9c84f4b11a673f65f..7b337b5fd54036059b0c777ff03e51bdfbc74cca 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -52,7 +52,10 @@ pub trait TrieCacheProvider { /// Return a [`trie_db::TrieDB`] compatible cache. /// - /// The `storage_root` parameter should be the storage root of the used trie. + /// The `storage_root` parameter *must* be the storage root of the trie this cache is used for. + /// + /// NOTE: Implementors should use the `storage_root` to differentiate between storage keys that + /// may belong to different tries. fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_>; /// Returns a cache that can be used with a [`trie_db::TrieDBMut`]. diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index cf41d9f829920ee2d31b9a39960618d477df4f3a..658229cef220b1be0c8ac25bf5b0993ebd18413d 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false} sp-runtime = { path = "../runtime", default-features = false} sp-std = { path = "../std", default-features = false} @@ -37,10 +37,12 @@ rand = { version = "0.8.5", features = ["small_rng"], optional = true } default = [ "std" ] std = [ "aes-gcm", + "aes-gcm?/std", "codec/std", "curve25519-dalek", "ed25519-dalek", "hkdf", + "hkdf?/std", "rand", "scale-info/std", "sha2", diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 91d532b6e16caa2584a68eff94408ae47f26c6ff..a3775d7f61f7b07089910992d3afa1e1283ef671 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } sp-application-crypto = { path = "../application-crypto", default-features = false} sp-core = { path = "../core", default-features = false} diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 9efff2892bd66218f54346beb232c07c07f246ef..5a35dd8f11f7daa5e084968ec8719dfe81b35983 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true} sp-inherents = { path = "../inherents", default-features = false} sp-runtime = { path = "../runtime", default-features = false} diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 0b54514f6003d9c10f96640ff61172cee5bf8d2b..0822d84a76e6da8bcb2c31c341134c44ecb949d2 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -26,7 +26,8 @@ lazy_static = { version = "1.4.0", optional = true } memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +rand = { version = "0.8", optional = true } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } @@ -53,6 +54,7 @@ std = [ "memory-db/std", "nohash-hasher", "parking_lot", + "rand", "scale-info/std", "schnellru", "sp-core/std", diff --git a/substrate/primitives/trie/src/cache/shared_cache.rs b/substrate/primitives/trie/src/cache/shared_cache.rs index 28b3274fde11ef5d1acae13668805ac3efb6d457..01ac41a1e47d995a99bea354d7f6dfaf82c433c2 100644 --- a/substrate/primitives/trie/src/cache/shared_cache.rs +++ b/substrate/primitives/trie/src/cache/shared_cache.rs @@ -30,7 +30,11 @@ use std::{ use trie_db::{node::NodeOwned, CachedValue}; lazy_static::lazy_static! { - static ref RANDOM_STATE: ahash::RandomState = ahash::RandomState::default(); + static ref RANDOM_STATE: ahash::RandomState = { + use rand::Rng; + let mut rng = rand::thread_rng(); + ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen()) + }; } pub struct SharedNodeCacheLimiter { diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 1ab51a08bbe3ca476e58ef2501a8d3ea61eaa3c6..41a83f01f66a9aed891f348c1e5036b9f47089c3 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } thiserror = { version = "1.0.48", optional = true } sp-core-hashing-proc-macro = { path = "../core/hashing/proc-macro" } diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index bd8408bb4a48ae28b51c7ead0012bfa240e26ddf..13f4520f6e69480466c77644835f23c1dc79721f 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -175,17 +175,24 @@ pub struct RuntimeVersion { /// will not attempt to author blocks unless this is equal to its native runtime. pub authoring_version: u32, - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. + /// Version of the runtime specification. + /// + /// A full-node will not attempt to use its native runtime in substitute for the on-chain + /// Wasm runtime unless all of `spec_name`, `spec_version` and `authoring_version` are the same + /// between Wasm and native. + /// + /// This number should never decrease. pub spec_version: u32, - /// Version of the implementation of the specification. Nodes are free to ignore this; it - /// serves only as an indication that the code is different; as long as the other two versions - /// are the same then while the actual code may be different, it is nonetheless required to - /// do the same thing. - /// Non-consensus-breaking optimizations are about the only changes that could be made which - /// would result in only the `impl_version` changing. + /// Version of the implementation of the specification. + /// + /// Nodes are free to ignore this; it serves only as an indication that the code is different; + /// as long as the other two versions are the same then while the actual code may be different, + /// it is nonetheless required to do the same thing. Non-consensus-breaking optimizations are + /// about the only changes that could be made which would result in only the `impl_version` + /// changing. + /// + /// This number can be reverted to `0` after a [`spec_version`](Self::spec_version) bump. pub impl_version: u32, /// List of supported API "features" along with their versions. @@ -198,15 +205,25 @@ pub struct RuntimeVersion { )] pub apis: ApisVec, - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. + /// All existing calls (dispatchables) are fully compatible when this number doesn't change. If + /// this number changes, then [`spec_version`](Self::spec_version) must change, also. /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// This number must change when an existing call (pallet index, call index) is changed, /// either through an alteration in its user-level semantics, a parameter - /// added/removed/changed, a dispatchable being removed, a module being removed, or a - /// dispatchable/module changing its index. + /// added/removed, a parameter type changed, or a call/pallet changing its index. An alteration + /// of the user level semantics is for example when the call was before `transfer` and now is + /// `transfer_all`, the semantics of the call changed completely. + /// + /// Removing a pallet or a call doesn't require a *bump* as long as no pallet or call is put at + /// the same index. Removing doesn't require a bump as the chain will reject a transaction + /// referencing this removed call/pallet while decoding and thus, the user isn't at risk to + /// execute any unknown call. FRAME runtime devs have control over the index of a call/pallet + /// to prevent that an index gets reused. + /// + /// Adding a new pallet or call also doesn't require a *bump* as long as they also don't reuse + /// any previously used index. /// - /// It need *not* change when a new module is added or when a dispatchable is added. + /// This number should never decrease. pub transaction_version: u32, /// Version of the state implementation used by this runtime. diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 92f884e3fd2792d1f56457d9a90dcf1c5dbbf797..c7413fec43c42ff4039b9043fb89d14787de4071 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -23,5 +23,5 @@ sp-std = { path = "../std", default-features = false} [features] default = [ "std" ] -std = [ "codec/std", "log", "sp-std/std", "wasmtime" ] +std = [ "codec/std", "log/std", "sp-std/std", "wasmtime" ] wasmtime = [ "anyhow", "dep:wasmtime" ] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 03e06aad086535c8b5313a81c284c0e8a90cd1cb..6642f97029ff67b73c4072b7e930f579ae2507c6 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false} diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index 36cf864dd5389b08c5e692fc691050b3895f8cb1..ececb622fa0f55d401498a408ff8bb7195efa37d 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -248,7 +248,25 @@ where } } +/// Implementor of [`WeightToFee`] such that it maps any unit of weight to a fixed fee. +pub struct FixedFee(sp_std::marker::PhantomData); + +impl WeightToFee for FixedFee +where + T: BaseArithmetic + From + Copy + Unsigned, +{ + type Balance = T; + + fn weight_to_fee(_: &Weight) -> Self::Balance { + F.into() + } +} + +/// An implementation of [`WeightToFee`] that collects no fee. +pub type NoFee = FixedFee<0, T>; + /// Implementor of [`WeightToFee`] that uses a constant multiplier. +/// /// # Example /// /// ``` diff --git a/substrate/primitives/weights/src/weight_v2.rs b/substrate/primitives/weights/src/weight_v2.rs index 3946cfe42c8d0efb7b7de4811c6ab2a2a8d08c15..d692aaff8f5ab98d7cd3e701f09b85e3c9465d9c 100644 --- a/substrate/primitives/weights/src/weight_v2.rs +++ b/substrate/primitives/weights/src/weight_v2.rs @@ -18,13 +18,10 @@ use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; -use sp_debug_derive::RuntimeDebug; use super::*; -#[derive( - Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, RuntimeDebug, Default, -)] +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, Debug, Default)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Weight { #[codec(compact)] diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml index ca059e384a358b242f7ba0e524f9b534df91d74f..1afb4a4f693d13f4e28f1d8ad4a86a6fe4e35743 100644 --- a/substrate/scripts/ci/deny.toml +++ b/substrate/scripts/ci/deny.toml @@ -38,7 +38,7 @@ exceptions = [ { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, diff --git a/substrate/scripts/ci/monitoring/alerting-rules/alerting-rules.yaml b/substrate/scripts/ci/monitoring/alerting-rules/alerting-rules.yaml index 4171f92f68feff11d2f10cbeeea63be64f80f546..ac89c58bfc3e223831795fb160d5d51816c7f6f4 100644 --- a/substrate/scripts/ci/monitoring/alerting-rules/alerting-rules.yaml +++ b/substrate/scripts/ci/monitoring/alerting-rules/alerting-rules.yaml @@ -146,11 +146,7 @@ groups: hours.' - alert: UnboundedChannelPersistentlyLarge - expr: '( - (substrate_unbounded_channel_len{action = "send"} - - ignoring(action) substrate_unbounded_channel_len{action = "received"}) - or on(instance) substrate_unbounded_channel_len{action = "send"} - ) >= 200' + expr: 'substrate_unbounded_channel_size >= 200' for: 5m labels: severity: warning diff --git a/substrate/scripts/ci/node-template-release.sh b/substrate/scripts/ci/node-template-release.sh index 09ef98e04627a023ed0892e96a3a5bc798df5a11..bc57b0f538efbaed123fdff8be9a178f12051778 100755 --- a/substrate/scripts/ci/node-template-release.sh +++ b/substrate/scripts/ci/node-template-release.sh @@ -3,7 +3,9 @@ set -e export TERM=xterm -PROJECT_ROOT=`git rev-parse --show-toplevel` +SUBSTRATE_FOLDER="/substrate" +GIT_ROOT=`git rev-parse --show-toplevel` +PROJECT_ROOT=${GIT_ROOT}${SUBSTRATE_FOLDER} if [ "$#" -ne 1 ]; then echo "node-template-release.sh path_to_target_archive" @@ -11,6 +13,6 @@ if [ "$#" -ne 1 ]; then fi PATH_TO_ARCHIVE=$1 -cd $PROJECT_ROOT/scripts/ci/node-template-release +cd $PROJECT_ROOT/scripts/ci/node-template-release cargo run $PROJECT_ROOT/bin/node-template $PROJECT_ROOT/$PATH_TO_ARCHIVE diff --git a/substrate/scripts/ci/node-template-release/src/main.rs b/substrate/scripts/ci/node-template-release/src/main.rs index 850535e4045f9fa2e63caa782ad6f66efd3ef28c..fc8089f3051fe14a7608bcfad1418069d3bfcff9 100644 --- a/substrate/scripts/ci/node-template-release/src/main.rs +++ b/substrate/scripts/ci/node-template-release/src/main.rs @@ -32,7 +32,7 @@ use tar; use tempfile; use toml_edit::{self, value, Array, Item, Table}; -const SUBSTRATE_GIT_URL: &str = "https://github.com/paritytech/substrate.git"; +const SUBSTRATE_GIT_URL: &str = "https://github.com/paritytech/polkadot-sdk.git"; type CargoToml = toml_edit::Document; @@ -63,7 +63,7 @@ fn copy_node_template(node_template: &Path, dest_path: &Path) { /// Find all `Cargo.toml` files in the given path. fn find_cargo_tomls(path: &PathBuf) -> Vec { - let path = format!("{}/**/*.toml", path.display()); + let path = format!("{}/**/Cargo.toml", path.display()); let glob = glob::glob(&path).expect("Generates globbing pattern"); @@ -196,7 +196,6 @@ fn update_root_cargo_toml( ) { let mut workspace = Table::new(); workspace.insert("members", value(Array::from_iter(members.iter()))); - let mut workspace_dependencies = Table::new(); deps.values() .flatten() @@ -212,6 +211,10 @@ fn update_root_cargo_toml( workspace_dependencies[name]["rev"] = value(commit_id); }); + let mut package = Table::new(); + package.insert("edition", value("2021")); + workspace.insert("package", Item::Table(package)); + workspace.insert("dependencies", Item::Table(workspace_dependencies)); cargo_toml.insert("workspace", Item::Table(workspace)); @@ -428,9 +431,12 @@ frame-system = { workspace = true } let expected_toml = r#"[workspace] members = ["node", "pallets/template", "runtime"] +[workspace.package] +edition = "2021" + [workspace.dependencies] -frame-system = { version = "4.0.0-dev", default-features = true, git = "https://github.com/paritytech/substrate.git", rev = "commit_id" } -sp-io = { version = "7.0.0", git = "https://github.com/paritytech/substrate.git", rev = "commit_id" } +frame-system = { version = "4.0.0-dev", default-features = true, git = "https://github.com/paritytech/polkadot-sdk.git", rev = "commit_id" } +sp-io = { version = "7.0.0", git = "https://github.com/paritytech/polkadot-sdk.git", rev = "commit_id" } [profile] diff --git a/substrate/src/lib.rs b/substrate/src/lib.rs index 409515ea505bfb8bcc2f32d63d3db9b3d210c992..b5a583fcfcf1ed1f67ac3e9473392807dd4ae1a9 100644 --- a/substrate/src/lib.rs +++ b/substrate/src/lib.rs @@ -55,9 +55,22 @@ //! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, //! same as updating an account's balance. //! -//! To learn more about the substrate architecture using some visuals, see [`substrate_diagram`]. +//! ### Architecture //! -//! `FRAME`, Substrate's default runtime development library takes the above even further by +//! Therefore, Substrate can be visualized as follows: +#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_simple.mmd")] +//! +//! The client and the runtime of course need to communicate. This is done through two concepts: +//! +//! 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are +//! defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that +//! allow the runtime to read and write data to the on-chain state. +//! 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined +//! using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic +//! runtime API that any blockchain must implement in order to be able to (re) execute blocks. +#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_client_runtime.mmd")] +//! +//! [`FRAME`], Substrate's default runtime development library takes the above even further by //! embracing a declarative programming model whereby correctness is enhanced and the system is //! highly configurable through parameterization. //! @@ -68,8 +81,8 @@ //! ## How to Get Stared //! //! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, -//! look into the [`frame_support`] crate, which is the entry point crate into runtime development -//! with FRAME. +//! look into the [`frame`] crate, which is the entry point crate into runtime development with +//! FRAME. //! //! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an //! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). @@ -79,8 +92,8 @@ //! * **Templates**: A number of substrate-based templates exist and they can be used for various //! purposes, with zero to little additional code needed. All of these templates contain runtimes //! that are highly configurable and are likely suitable for basic needs. -//! * `FRAME`: If need, one can customize that runtime even further, by using `FRAME` and developing -//! custom modules. +//! * [`FRAME`]: If need, one can customize that runtime even further, by using `FRAME` and +//! developing custom modules. //! * **Core**: To the contrary, some developers may want to customize the client side software to //! achieve novel goals such as a new consensus engine, or a new database backend. While //! Substrate's main configurability is in the runtime, the client is also highly generic and can @@ -100,10 +113,9 @@ //! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These //! are the traits that glue the client and runtime together, but are not opinionated about what //! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], -//! which form the communication bridge between the client and runtime, as explained in -//! [`substrate_diagram`]. +//! which form the communication bridge between the client and runtime. //! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related -//! to FRAME. See [`frame_support`] for more information. +//! to FRAME. See [`frame`] for more information. //! //! ### Wasm Build //! @@ -129,8 +141,9 @@ //! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the //! modules that are provided with `FRAME`. This node and runtime is only used for testing and //! demonstration. -//! * [`chain-spec-builder`]: Utility to build more detailed chain-specs for the aforementioned -//! node. Other projects typically contain a `build-spec` subcommand that does the same. +//! * [`chain-spec-builder`]: Utility to build more detailed [chain-spec][`sc-chain-spec`] for the +//! aforementioned node. Other projects typically contain a `build-spec` subcommand that does the +//! same. //! * [`node-template`]: a template node that contains a minimal set of features and can act as a //! starting point of a project. //! * [`subkey`]: Substrate's key management utility. @@ -157,13 +170,16 @@ //! through which Polkadot can be utilized is by building "parachains", blockchains that are //! connected to Polkadot's shared security. //! -//! To build a parachain, one could use [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the -//! library on top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. +//! To build a parachain, one could use +//! [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the library on top +//! of Substrate, empowering any substrate-based chain to be a Polkadot parachain. //! //! ## Where To Go Next? //! //! Additional noteworthy crates within substrate: //! +//! - Chain specification of a Substrate node: +//! - [`sc-chain-spec`] //! - RPC APIs of a Substrate node: [`sc-rpc-api`]/[`sc-rpc`] //! - CLI Options of a Substrate node: [`sc-cli`] //! - All of the consensus related crates provided by Substrate: @@ -199,10 +215,12 @@ //! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white //! [github]: //! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [`FRAME`]: ../frame/index.html //! [`sp-io`]: ../sp_io/index.html //! [`sp-api`]: ../sp_api/index.html //! [`sp-api`]: ../sp_api/index.html //! [`sc-client-db`]: ../sc_client_db/index.html +//! [`sc-chain-spec`]: ../sc_chain_spec/index.html //! [`sc-network`]: ../sc_network/index.html //! [`sc-rpc-api`]: ../sc_rpc_api/index.html //! [`sc-rpc`]: ../sc_rpc/index.html @@ -222,76 +240,3 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// In this module, we explore substrate at a more depth. First, let's establish substrate being -/// divided into a client and runtime. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// end -/// ``` -/// -/// The client and the runtime of course need to communicate. This is done through two concepts: -/// -/// 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are -/// defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that -/// allow the runtime to read and write data to the on-chain state. -/// 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined -/// using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic -/// runtime API that any blockchain must implement in order to be able to (re) execute blocks. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// Finally, let's expand the diagram a bit further and look at the internals of each component: -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// Database -/// Networking -/// Consensus -/// end -/// subgraph Runtime -/// subgraph FRAME -/// direction LR -/// Governance -/// Currency -/// Staking -/// Identity -/// end -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// As noted the runtime contains all of the application specific logic of the blockchain. This is -/// usually written with `FRAME`. The client, on the other hand, contains reusable and generic -/// components that are not specific to one single blockchain, such as networking, database, and the -/// consensus engine. -/// -/// [`sp-io`]: ../../sp_io/index.html -/// [`sp-api`]: ../../sp_api/index.html -/// [`sp-io::storage`]: ../../sp_io/storage/index.html -/// [`sp-api::Core`]: ../../sp_api/trait.Core.html -pub mod substrate_diagram {} diff --git a/substrate/src/src/lib.rs b/substrate/src/src/lib.rs deleted file mode 100644 index 16a606778965792fcfa5e9f1639ef960e47a7fe2..0000000000000000000000000000000000000000 --- a/substrate/src/src/lib.rs +++ /dev/null @@ -1,297 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! # Substrate -//! -//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in -//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. -//! -//! [![github]](https://github.com/paritytech/substrate/) - [![polkadot]](https://polkadot.network) -//! -//! This crate in itself does not contain any code and is just meant ot be a documentation hub for -//! substrate-based crates. -//! -//! ## Overview -//! -//! Substrate approaches blockchain development with an acknowledgement of a few self-evident -//! truths: -//! -//! 1. Society and technology evolves. -//! 2. Humans are fallible. -//! -//! This, specifically, makes the task of designing a correct, safe and long-lasting blockchain -//! system hard. -//! -//! Nonetheless, in order to achieve this goal, substrate embraces the following: -//! -//! 1. Use of **Rust** as a modern, and safe programming language, which limits human error through -//! various means, most notably memory safety. -//! 2. Substrate is written from the ground-up with a generic, modular and extensible design. This -//! ensures that software components can be easily swapped and upgraded. Examples of this is -//! multiple consensus mechanisms provided by Substrate, as listed below. -//! 3. Lastly, the final blockchain system created with the above properties needs to be -//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the -//! application logic of the blockchain (called "Runtime") is encoded as a Wasm blob, and is -//! stored onchain. The rest of the system (called "Client") acts as the executor of the Wasm -//! blob. -//! -//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as Wasm blob" -//! accord. This enables the Runtime to become inherently upgradeable (without forks). The upgrade -//! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, -//! same as updating an account's balance. -//! -//! To learn more about the substrate architecture using some visuals, see [`substrate_diagram`]. -//! -//! `FRAME`, Substrate's default runtime development library takes the above even further by -//! embracing a declarative programming model whereby correctness is enhanced and the system is -//! highly configurable through parameterization. -//! -//! All in all, this design enables all substrate-based chains to achieve forkless, self-enacting -//! upgrades out of the box. Combined with governance abilities that are shipped with `FRAME`, this -//! enables a chain to survive the test of time. -//! -//! ## How to Get Stared -//! -//! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, -//! look into the [`frame_support`] crate, which is the entry point crate into runtime development -//! with FRAME. -//! -//! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an -//! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). -//! -//! In more broad terms, the following avenues exist into developing with substrate: -//! -//! * **Templates**: A number of substrate-based templates exist and they can be used for various -//! purposes, with zero to little additional code needed. All of these templates contain runtimes -//! that are highly configurable and are likely suitable for basic needs. -//! * `FRAME`: If need, one can customize that runtime even further, by using `FRAME` and developing -//! custom modules. -//! * **Core**: To the contrary, some developers may want to customize the client side software to -//! achieve novel goals such as a new consensus engine, or a new database backend. While -//! Substrate's main configurability is in the runtime, the client is also highly generic and can -//! be customized to a great extent. -//! -//! ## Structure -//! -//! Substrate is a massive cargo workspace with hundreds of crates, therefore it is useful to know -//! how to navigate its crates. -//! -//! In broad terms, it is divided into three categories: -//! -//! * `sc-*` (short for *substrate-client*) crates, located under `./client` folder. These are all -//! the client crates. Notable examples are crates such as [`sc-network`], various consensus -//! crates, [`sc-rpc-api`] and [`sc-client-db`], all of which are expected to reside in the client -//! side. -//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These -//! are the traits that glue the client and runtime together, but are not opinionated about what -//! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], -//! which form the communication bridge between the client and runtime, as explained in -//! [`substrate_diagram`]. -//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related -//! to FRAME. See [`frame_support`] for more information. -//! -//! ### Wasm Build -//! -//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both Wasm (when a Wasm -//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate -//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a -//! crate is being built with the standard library, and is built for native. Otherwise, it is built -//! for `no_std`. -//! -//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find -//! in any Substrate-based runtime. -//! -//! Substrate-based runtimes use [`substrate-wasm-builder`] in their `build.rs` to automatically -//! build their Wasm files as a part of normal build commandsOnce built, the wasm file is placed in -//! `./target/{debug|release}/wbuild/{runtime_name}.wasm`. -//! -//! ### Binaries -//! -//! Multiple binaries are shipped with substrate, the most important of which are located in the -//! `./bin` folder. -//! -//! * [`node`] is an extensive substrate node that contains the superset of all runtime and client -//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the -//! modules that are provided with `FRAME`. This node and runtime is only used for testing and -//! demonstration. -//! * [`chain-spec-builder`]: Utility to build more detailed chain-specs for the aforementioned -//! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node-template`]: a template node that contains a minimal set of features and can act as a -//! starting point of a project. -//! * [`subkey`]: Substrate's key management utility. -//! -//! ### Anatomy of a Binary Crate -//! -//! From the above, [`node`] and [`node-template`] are essentially blueprints of a substrate-based -//! project, as the name of the latter is implying. Each substrate-based project typically contains -//! the following: -//! -//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. -//! This file typically contains the [`frame_support::construct_runtime`] macro, which is the -//! final definition of a runtime. -//! -//! * Under `./node`, a `main.rs`, which is the point, and a `./service.rs`, which contains all the -//! client side components. Skimming this file yields an overview of the networking, database, -//! consensus and similar client side components. -//! -//! > The above two are conventions, not rules. -//! -//! ## Parachain? -//! -//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways -//! through which Polkadot can be utilized is by building "parachains", blockchains that are -//! connected to Polkadot's shared security. -//! -//! To build a parachain, one could use [`Cumulus`](https://github.com/paritytech/cumulus/), the -//! library on top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. -//! -//! ## Where To Go Next? -//! -//! Additional noteworthy crates within substrate: -//! -//! - RPC APIs of a Substrate node: [`sc-rpc-api`]/[`sc-rpc`] -//! - CLI Options of a Substrate node: [`sc-cli`] -//! - All of the consensus related crates provided by Substrate: -//! - [`sc-consensus-aura`] -//! - [`sc-consensus-babe`] -//! - [`sc-consensus-grandpa`] -//! - [`sc-consensus-beefy`] -//! - [`sc-consensus-manual-seal`] -//! - [`sc-consensus-pow`] -//! -//! Additional noteworthy external resources: -//! -//! - [Substrate Developer Hub](https://substrate.dev) -//! - [Parity Tech's Documentation Hub](https://paritytech.github.io/) -//! - [Frontier: Substrate's Ethereum Compatibility Library](https://paritytech.github.io/frontier/) -//! - [Polkadot Wiki](https://wiki.polkadot.network/en/) -//! -//! Notable upstream crates: -//! -//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) -//! - [`parity-db`](https://github.com/paritytech/parity-db) -//! - [`trie`](https://github.com/paritytech/trie) -//! - [`parity-common`](https://github.com/paritytech/parity-common) -//! -//! Templates: -//! -//! - classic [`substrate-node-template`](https://github.com/substrate-developer-hub/substrate-node-template) -//! - classic [cumulus-parachain-template](https://github.com/substrate-developer-hub/substrate-parachain-template) -//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template) -//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template) -//! -//! [polkadot]: -//! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white -//! [github]: -//! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [`sp-io`]: ../sp_io/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sc-client-db`]: ../sc_client_db/index.html -//! [`sc-network`]: ../sc_network/index.html -//! [`sc-rpc-api`]: ../sc_rpc_api/index.html -//! [`sc-rpc`]: ../sc_rpc/index.html -//! [`sc-cli`]: ../sc_cli/index.html -//! [`sc-consensus-aura`]: ../sc_consensus_aura/index.html -//! [`sc-consensus-babe`]: ../sc_consensus_babe/index.html -//! [`sc-consensus-grandpa`]: ../sc_consensus_grandpa/index.html -//! [`sc-consensus-beefy`]: ../sc_consensus_beefy/index.html -//! [`sc-consensus-manual-seal`]: ../sc_consensus_manual_seal/index.html -//! [`sc-consensus-pow`]: ../sc_consensus_pow/index.html -//! [`node`]: ../node_cli/index.html -//! [`node-template`]: ../node_template/index.html -//! [`kitchensink_runtime`]: ../kitchensink_runtime/index.html -//! [`subkey`]: ../subkey/index.html -//! [`chain-spec-builder`]: ../chain_spec_builder/index.html -//! [`substrate-wasm-builder`]: https://crates.io/crates/substrate-wasm-builder - -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(rustdoc::private_intra_doc_links)] - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// In this module, we explore substrate at a more depth. First, let's establish substrate being -/// divided into a client and runtime. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// end -/// ``` -/// -/// The client and the runtime of course need to communicate. This is done through two concepts: -/// -/// 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are -/// defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that -/// allow the runtime to read and write data to the on-chain state. -/// 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined -/// using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic -/// runtime API that any blockchain must implement in order to be able to (re) execute blocks. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// Finally, let's expand the diagram a bit further and look at the internals of each component: -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// Database -/// Networking -/// Consensus -/// end -/// subgraph Runtime -/// subgraph FRAME -/// direction LR -/// Governance -/// Currency -/// Staking -/// Identity -/// end -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// As noted the runtime contains all of the application specific logic of the blockchain. This is -/// usually written with `FRAME`. The client, on the other hand, contains reusable and generic -/// components that are not specific to one single blockchain, such as networking, database, and the -/// consensus engine. -/// -/// [`sp-io`]: ../../sp_io/index.html -/// [`sp-api`]: ../../sp_api/index.html -/// [`sp-io::storage`]: ../../sp_io/storage/index.html -/// [`sp-api::Core`]: ../../sp_api/trait.Core.html -pub mod substrate_diagram {} diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 9c4167c9b6e191a3574c6caf1004e92aeafb2640..022db32c34f1c5c2308435271474f31208b159fa 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -20,7 +20,7 @@ nix = "0.26.2" regex = "1.7.3" tokio = { version = "1.22.0", features = ["full"] } node-primitives = { path = "../../bin/node/primitives" } -node-cli = { path = "../../bin/node/cli" } +node-cli = { package = "staging-node-cli", path = "../../bin/node/cli" } sc-cli = { path = "../../client/cli" } sc-service = { path = "../../client/service" } futures = "0.3.28" diff --git a/substrate/test-utils/cli/src/lib.rs b/substrate/test-utils/cli/src/lib.rs index 99119a44d2e9896adc445e53c9dfaa8da3695b7b..d77a89b4dbf4c9048d3ba3546cde0d32d645dc0a 100644 --- a/substrate/test-utils/cli/src/lib.rs +++ b/substrate/test-utils/cli/src/lib.rs @@ -135,7 +135,7 @@ pub fn build_substrate(args: &[&str]) { // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable let mut cmd = Command::new("cargo"); - cmd.arg("build").arg("-p=node-cli"); + cmd.arg("build").arg("-p=staging-node-cli"); if is_release_build { cmd.arg("--release"); diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 12863ac184c1192b3ac688ed12496ca5a6fc6928..032fbaf4e654c99d08d0d8b3dc9f2bec1bd04640 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -18,7 +18,7 @@ async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ "test-helpers", diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 8efa7b5f07f8d71664a79a7b3633afc0f7ca3fb3..73581a4f0efa31c195f3856a8894692a2f71bcfb 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -20,9 +20,11 @@ use sc_client_api::{backend::Finalizer, client::BlockBackend}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_service::client::Client; -use sp_consensus::{BlockOrigin, Error as ConsensusError}; +use sp_consensus::Error as ConsensusError; use sp_runtime::{traits::Block as BlockT, Justification, Justifications}; +pub use sp_consensus::BlockOrigin; + /// Extension trait for a test client. pub trait ClientExt: Sized { /// Finalize a block. diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 90e15e0f8d53e1b6a3457113ddb7ee7c6fd32be8..084dd2a1861cd3b73c5d3f6d89f6cd84c81ac0ef 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -21,7 +21,7 @@ pub mod client_ext; -pub use self::client_ext::{ClientBlockImportExt, ClientExt}; +pub use self::client_ext::{BlockOrigin, ClientBlockImportExt, ClientExt}; pub use sc_client_api::{execution_extensions::ExecutionExtensions, BadBlocks, ForkBlocks}; pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor}; diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 4d279c7b703dd44977287b41f8451400a768d073..2f1e192eded0d608eb157058f7b2c0cef7e9c49b 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -19,7 +19,7 @@ sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false} sp-block-builder = { path = "../../primitives/block-builder", default-features = false} codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false} sp-keyring = { path = "../../primitives/keyring", optional = true} sp-offchain = { path = "../../primitives/offchain", default-features = false} @@ -48,8 +48,6 @@ sp-externalities = { path = "../../primitives/externalities", default-features = # 3rd party array-bytes = { version = "6.1", optional = true } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.188", features = ["alloc", "derive"], default-features = false } -serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } [dev-dependencies] futures = "0.3.21" @@ -60,6 +58,8 @@ sp-consensus = { path = "../../primitives/consensus/common" } substrate-test-runtime-client = { path = "client" } sp-tracing = { path = "../../primitives/tracing" } json-patch = { version = "1.0.0", default-features = false } +serde = { version = "1.0.188", features = ["alloc", "derive"], default-features = false } +serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [build-dependencies] substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } @@ -81,6 +81,8 @@ std = [ "sc-executor/std", "sc-service", "scale-info/std", + "serde/std", + "serde_json/std", "sp-api/std", "sp-application-crypto/std", "sp-block-builder/std", diff --git a/substrate/test-utils/runtime/client/src/block_builder_ext.rs b/substrate/test-utils/runtime/client/src/block_builder_ext.rs index 78863209e33e9fe98f4332b46ba60e06e12fd1ad..f7bf76b017e4a554076c6d02597f833954f09fbe 100644 --- a/substrate/test-utils/runtime/client/src/block_builder_ext.rs +++ b/substrate/test-utils/runtime/client/src/block_builder_ext.rs @@ -17,10 +17,8 @@ //! Block Builder extensions for tests. -use sc_client_api::backend; -use sp_api::{ApiExt, ProvideRuntimeApi}; - use sc_block_builder::BlockBuilderApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use substrate_test_runtime::*; /// Extension trait for test block builder. @@ -45,12 +43,12 @@ pub trait BlockBuilderExt { ) -> Result<(), sp_blockchain::Error>; } -impl<'a, A, B> BlockBuilderExt - for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> +impl<'a, A> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A> where - A: ProvideRuntimeApi + 'a, + A: ProvideRuntimeApi + + sp_api::CallApiAt + + 'a, A::Api: BlockBuilderApi + ApiExt, - B: backend::Backend, { fn push_transfer( &mut self, diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 5fce7a2860b755c10b57173b01b9a29d9e8f6740..6f6bb5c36ee486b3551df8efbbf44ada170a30a6 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -26,14 +26,14 @@ use crate::{ AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; use futures::executor::block_on; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{ backend, blockchain::{Backend as BlockChainBackendT, HeaderBackend}, }; use sp_consensus::BlockOrigin; use sp_runtime::traits::Block as BlockT; -use substrate_test_runtime::{self, Transfer}; +use substrate_test_runtime::Transfer; /// helper to test the `leaves` implementation for various backends pub fn test_leaves_for_backend(backend: Arc) @@ -54,13 +54,24 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![a1.hash()]); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -70,8 +81,11 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()]); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -81,8 +95,11 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a3.hash()]); // A3 -> A4 - let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -91,8 +108,11 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a4.hash()]); // A4 -> A5 - let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -102,7 +122,12 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()]); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder @@ -118,8 +143,11 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()]); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -129,8 +157,11 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()]); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -139,7 +170,12 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()]); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -154,7 +190,12 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()]); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -182,14 +223,26 @@ where let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); + let genesis_hash = client.chain_info().genesis_hash; // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -197,8 +250,11 @@ where block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -206,8 +262,11 @@ where block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -215,8 +274,11 @@ where block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -224,7 +286,12 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -238,8 +305,11 @@ where block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -247,8 +317,11 @@ where block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -256,7 +329,12 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -270,7 +348,12 @@ where block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -309,14 +392,26 @@ where // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); + let genesis_hash = client.chain_info().genesis_hash; // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -324,8 +419,11 @@ where block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -333,8 +431,11 @@ where block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -342,8 +443,11 @@ where block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -351,7 +455,12 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -365,8 +474,11 @@ where block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -374,8 +486,11 @@ where block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() .unwrap() .build() .unwrap() @@ -383,7 +498,12 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -397,7 +517,12 @@ where block_on(client.import(BlockOrigin::Own, c3)).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .fetch_parent_block_number(&client) + .unwrap() + .build() + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { diff --git a/substrate/test-utils/runtime/src/test_json/README.md b/substrate/test-utils/runtime/res/README.md similarity index 92% rename from substrate/test-utils/runtime/src/test_json/README.md rename to substrate/test-utils/runtime/res/README.md index 6d6ae55c34639dcd601c94e1ee0ed2a3ac9e04a6..fdf94fd9d876194395dda0fbb93417e60820c31c 100644 --- a/substrate/test-utils/runtime/src/test_json/README.md +++ b/substrate/test-utils/runtime/res/README.md @@ -20,5 +20,5 @@ comparing keys, not the values. `renamed_authorities`. -`default_genesis_config_invalid.json` is just an imcomplete copy of +`default_genesis_config_invalid.json` is just an incomplete copy of `default_genesis_config.json` with `babe::authorities` field removed. diff --git a/substrate/test-utils/runtime/src/test_json/default_genesis_config.json b/substrate/test-utils/runtime/res/default_genesis_config.json similarity index 98% rename from substrate/test-utils/runtime/src/test_json/default_genesis_config.json rename to substrate/test-utils/runtime/res/default_genesis_config.json index b0218d417daa57e46b32efff28187fc48b96b4b9..95c7799a033d8cf693ba573dcca162c76ed53f65 100644 --- a/substrate/test-utils/runtime/src/test_json/default_genesis_config.json +++ b/substrate/test-utils/runtime/res/default_genesis_config.json @@ -1,7 +1,5 @@ { - "system": { - "code": "0x52" - }, + "system": {}, "babe": { "authorities": [ [ diff --git a/substrate/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json b/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json similarity index 98% rename from substrate/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json rename to substrate/test-utils/runtime/res/default_genesis_config_incomplete.json index e25730ee11cf01b59d84ae0d85d29a49c636f8f7..510ed87c93c577c2fc7f30ee8a3fb53bc3e6cb90 100644 --- a/substrate/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json +++ b/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json @@ -1,7 +1,5 @@ { - "system": { - "code": "0x52" - }, + "system": {}, "babe": { "epochConfig": { "c": [ diff --git a/substrate/test-utils/runtime/src/test_json/default_genesis_config_invalid.json b/substrate/test-utils/runtime/res/default_genesis_config_invalid.json similarity index 98% rename from substrate/test-utils/runtime/src/test_json/default_genesis_config_invalid.json rename to substrate/test-utils/runtime/res/default_genesis_config_invalid.json index 00550efaeec9f5c8d5e48e79629ebedd208f75ee..f8e06f91d66583387c038a6398882ce3ecdc0d8e 100644 --- a/substrate/test-utils/runtime/src/test_json/default_genesis_config_invalid.json +++ b/substrate/test-utils/runtime/res/default_genesis_config_invalid.json @@ -1,7 +1,5 @@ { - "system": { - "code": "0x52" - }, + "system": {}, "babe": { "renamed_authorities": [ [ diff --git a/substrate/test-utils/runtime/res/default_genesis_config_invalid_2.json b/substrate/test-utils/runtime/res/default_genesis_config_invalid_2.json new file mode 100644 index 0000000000000000000000000000000000000000..a1345542bcda75890665db237be9079c4dedb0ca --- /dev/null +++ b/substrate/test-utils/runtime/res/default_genesis_config_invalid_2.json @@ -0,0 +1,113 @@ +{ + "system": {}, + "babex": { + "authorities": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1 + ] + ], + "epochConfig": { + "c": [ + 3, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" + ] + }, + "balances": { + "balances": [ + [ + "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", + 100000000000000000 + ], + [ + "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", + 100000000000000000 + ], + [ + "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", + 100000000000000000 + ], + [ + "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", + 100000000000000000 + ], + [ + "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", + 100000000000000000 + ], + [ + "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", + 100000000000000000 + ], + [ + "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", + 100000000000000000 + ], + [ + "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", + 100000000000000000 + ], + [ + "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", + 100000000000000000 + ], + [ + "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", + 100000000000000000 + ], + [ + "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", + 100000000000000000 + ], + [ + "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", + 100000000000000000 + ], + [ + "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", + 100000000000000000 + ], + [ + "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", + 100000000000000000 + ], + [ + "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", + 100000000000000000 + ], + [ + "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", + 100000000000000000 + ], + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 100000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 100000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 100000000000000000 + ] + ] + } +} diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 8a4d6dbe4a71a59c9d1086f4eb61b3618211690b..5ed9c8a645886f741611bc9fdc5d4eae01ae2704 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -117,10 +117,7 @@ impl GenesisStorageBuilder { .collect(); RuntimeGenesisConfig { - system: frame_system::GenesisConfig { - code: self.wasm_code.clone().unwrap_or(wasm_binary_unwrap().to_vec()), - ..Default::default() - }, + system: Default::default(), babe: pallet_babe::GenesisConfig { authorities: authorities_sr25519 .clone() @@ -149,6 +146,11 @@ impl GenesisStorageBuilder { storage.top.insert(well_known_keys::HEAP_PAGES.into(), heap_pages.encode()); } + storage.top.insert( + well_known_keys::CODE.into(), + self.wasm_code.clone().unwrap_or(wasm_binary_unwrap().to_vec()), + ); + storage.top.extend(self.extra_storage.top.clone()); storage.children_default.extend(self.extra_storage.children_default.clone()); diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 687790f2ffaf8bfe13a4bb58075efc3da750114a..1a4e9fd04667c830ad912316bf7fe6fcaae11c41 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -396,6 +396,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = ConstU32<1>; } @@ -463,11 +464,10 @@ impl_opaque_keys! { } } -pub(crate) const TEST_RUNTIME_BABE_EPOCH_CONFIGURATION: BabeEpochConfiguration = - BabeEpochConfiguration { - c: (3, 10), - allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, - }; +pub const TEST_RUNTIME_BABE_EPOCH_CONFIGURATION: BabeEpochConfiguration = BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, +}; impl_runtime_apis! { impl sp_api::Core for Runtime { @@ -1019,7 +1019,7 @@ mod tests { use super::*; use codec::Encode; use frame_support::dispatch::DispatchInfo; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; @@ -1051,7 +1051,11 @@ mod tests { // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to // ~2048k of heap memory. let (new_at_hash, block) = { - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(best_hash) + .with_parent_block_number(0) + .build() + .unwrap(); builder.push_storage_change(HEAP_PAGES.to_vec(), Some(32u64.encode())).unwrap(); let block = builder.build().unwrap().block; let hash = block.header.hash(); @@ -1232,7 +1236,7 @@ mod tests { #[test] fn build_minimal_genesis_config_works() { sp_tracing::try_init_simple(); - let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let default_minimal_json = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; let mut t = BasicExternalities::new_empty(); executor_call(&mut t, "GenesisBuilder_build_config", &default_minimal_json.encode()) @@ -1259,8 +1263,6 @@ mod tests { // System|LastRuntimeUpgrade "26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8", - // :code - "3a636f6465", // :extrinsic_index "3a65787472696e7369635f696e646578", // Balances|TotalIssuance @@ -1289,35 +1291,55 @@ mod tests { let r = Vec::::decode(&mut &r[..]).unwrap(); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; assert_eq!(expected.to_string(), json); } #[test] fn build_config_from_json_works() { sp_tracing::try_init_simple(); - let j = include_str!("test_json/default_genesis_config.json"); + let j = include_str!("../res/default_genesis_config.json"); let mut t = BasicExternalities::new_empty(); let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); let r = BuildResult::decode(&mut &r[..]); assert!(r.is_ok()); - let keys = t.into_storages().top.keys().cloned().map(hex).collect::>(); + let mut keys = t.into_storages().top.keys().cloned().map(hex).collect::>(); + + // following keys are not placed during `::build` + // process, add them `keys` to assert against known keys. + keys.push(hex(b":code")); + keys.sort(); + assert_eq!(keys, storage_key_generator::get_expected_storage_hashed_keys(false)); } #[test] fn build_config_from_invalid_json_fails() { sp_tracing::try_init_simple(); - let j = include_str!("test_json/default_genesis_config_invalid.json"); + let j = include_str!("../res/default_genesis_config_invalid.json"); let mut t = BasicExternalities::new_empty(); let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); let r = BuildResult::decode(&mut &r[..]).unwrap(); log::info!("result: {:#?}", r); assert_eq!(r, Err( sp_runtime::RuntimeString::Owned( - "Invalid JSON blob: unknown field `renamed_authorities`, expected `authorities` or `epochConfig` at line 6 column 25".to_string(), + "Invalid JSON blob: unknown field `renamed_authorities`, expected `authorities` or `epochConfig` at line 4 column 25".to_string(), + )) + ); + } + + #[test] + fn build_config_from_invalid_json_fails_2() { + sp_tracing::try_init_simple(); + let j = include_str!("../res/default_genesis_config_invalid_2.json"); + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); + let r = BuildResult::decode(&mut &r[..]).unwrap(); + assert_eq!(r, Err( + sp_runtime::RuntimeString::Owned( + "Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 3 column 9".to_string(), )) ); } @@ -1325,7 +1347,7 @@ mod tests { #[test] fn build_config_from_incomplete_json_fails() { sp_tracing::try_init_simple(); - let j = include_str!("test_json/default_genesis_config_incomplete.json"); + let j = include_str!("../res/default_genesis_config_incomplete.json"); let mut t = BasicExternalities::new_empty(); let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); @@ -1334,7 +1356,7 @@ mod tests { assert_eq!( r, Err(sp_runtime::RuntimeString::Owned( - "Invalid JSON blob: missing field `authorities` at line 13 column 3" + "Invalid JSON blob: missing field `authorities` at line 11 column 3" .to_string() )) ); @@ -1433,10 +1455,6 @@ mod tests { ); assert_eq!(u64::decode(&mut &value[..]).unwrap(), 0); - // :code - let value: Vec = get_from_storage("3a636f6465"); - assert!(Vec::::decode(&mut &value[..]).is_err()); - //System|ParentHash let value: Vec = get_from_storage( "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs index 309c6d71d77b1478211875fa8c06e372470c4ae0..549e499b11022304a06a5510ed7aaea809f4b488 100644 --- a/substrate/utils/build-script-utils/src/version.rs +++ b/substrate/utils/build-script-utils/src/version.rs @@ -31,7 +31,11 @@ pub fn generate_cargo_keys() { Cow::from(sha) }, Ok(o) => { - println!("cargo:warning=Git command failed with status: {}", o.status); + let stderr = String::from_utf8_lossy(&o.stderr).trim().to_owned(); + println!( + "cargo:warning=Git command failed with status '{}' with message: '{}'", + o.status, stderr, + ); Cow::from("unknown") }, Err(err) => { @@ -55,3 +59,34 @@ fn get_version(impl_commit: &str) -> String { impl_commit ) } + +/// Generate `SUBSTRATE_WASMTIME_VERSION` +pub fn generate_wasmtime_version() { + generate_dependency_version("wasmtime", "SUBSTRATE_WASMTIME_VERSION"); +} + +fn generate_dependency_version(dep: &str, env_var: &str) { + // we only care about the root + match std::process::Command::new("cargo") + .args(["tree", "--depth=0", "--locked", "--package", dep]) + .output() + { + Ok(output) if output.status.success() => { + let version = String::from_utf8_lossy(&output.stdout); + + // vX.X.X + if let Some(ver) = version.strip_prefix(&format!("{} v", dep)) { + println!("cargo:rustc-env={}={}", env_var, ver); + } else { + println!("cargo:warning=Unexpected result {}", version); + } + }, + + // command errors out when it could not find the given dependency + // or when having multiple versions of it + Ok(output) => + println!("cargo:warning=`cargo tree` {}", String::from_utf8_lossy(&output.stderr)), + + Err(err) => println!("cargo:warning=Could not run `cargo tree`: {}", err), + } +} diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index e32fe47b729712368d4803e7d756dffd18a52476..b67d08a85c2eb7f97c5d9900d14f86301a8bc60f 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -27,7 +27,7 @@ log = "0.4.17" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0.48" thousands = "0.2.0" frame-benchmarking = { path = "../../../frame/benchmarking" } diff --git a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs index c9a7fb1ad33dfd3d5145b7ca671bc36d932119bc..a028c7d438e80ac80409c2ae7ddc1d683de21052 100644 --- a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs @@ -20,7 +20,7 @@ use codec::DecodeAll; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_NANOS; use frame_system::ConsumedWeight; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::BlockBuilderApi; use sc_cli::{Error, Result}; use sc_client_api::{ Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider, @@ -71,8 +71,7 @@ impl Benchmark where Block: BlockT, BA: ClientBackend, - C: BlockBuilderProvider - + ProvideRuntimeApi + C: ProvideRuntimeApi + StorageProvider + UsageProvider + BlockBackend diff --git a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs index ee12c1c5dac33fc60ecec0726734d8f9482540a8..e28688caa2dddd0240fd402c4c35c68ce374e52f 100644 --- a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -18,7 +18,7 @@ //! Contains the [`BlockCmd`] as entry point for the CLI to execute //! the *block* benchmark. -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::BlockBuilderApi; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider}; use sp_api::{ApiExt, ProvideRuntimeApi}; @@ -84,8 +84,7 @@ impl BlockCmd { where Block: BlockT, BA: ClientBackend, - C: BlockBuilderProvider - + BlockBackend + C: BlockBackend + ProvideRuntimeApi + StorageProvider + UsageProvider diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 693b9f99f08e88151df43db025dec3f19b44d2e4..f0a7436dc729a462c025924589e81f8d0e836312 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -17,10 +17,10 @@ //! Contains the core benchmarking logic. -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; use sc_cli::{Error, Result}; -use sc_client_api::Backend as ClientBackend; -use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sc_client_api::UsageProvider; +use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi}; use sp_blockchain::{ ApplyExtrinsicFailed::Validity, Error::{ApplyExtrinsicFailed, RuntimeApiError}, @@ -61,20 +61,20 @@ pub struct BenchmarkParams { pub(crate) type BenchRecord = Vec; /// Holds all objects needed to run the *overhead* benchmarks. -pub(crate) struct Benchmark { +pub(crate) struct Benchmark { client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, digest_items: Vec, - _p: PhantomData<(Block, BA)>, + _p: PhantomData, } -impl Benchmark +impl Benchmark where Block: BlockT, - BA: ClientBackend, - C: BlockBuilderProvider - + ProvideRuntimeApi + C: ProvideRuntimeApi + + CallApiAt + + UsageProvider + sp_blockchain::HeaderBackend, C::Api: ApiExt + BlockBuilderApi, { @@ -129,7 +129,13 @@ where &self, ext_builder: Option<&dyn ExtrinsicBuilder>, ) -> Result<(Block, Option)> { - let mut builder = self.client.new_block(Digest { logs: self.digest_items.clone() })?; + let chain = self.client.usage_info().chain; + let mut builder = BlockBuilderBuilder::new(&*self.client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .with_inherent_digests(Digest { logs: self.digest_items.clone() }) + .build()?; + // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; for inherent in inherents { diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 4c3a6ed1bcd7816ba388c3d07c33fe9a8cfa7361..99c0230617cb36f4cb98a5d0e99d75dddd003219 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::BlockBuilderApi; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_client_api::Backend as ClientBackend; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sc_client_api::UsageProvider; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; use clap::{Args, Parser}; @@ -84,7 +84,7 @@ impl ExtrinsicCmd { /// Benchmark the execution time of a specific type of extrinsic. /// /// The output will be printed to console. - pub fn run( + pub fn run( &self, client: Arc, inherent_data: sp_inherents::InherentData, @@ -93,9 +93,9 @@ impl ExtrinsicCmd { ) -> Result<()> where Block: BlockT, - BA: ClientBackend, - C: BlockBuilderProvider - + ProvideRuntimeApi + C: ProvideRuntimeApi + + CallApiAt + + UsageProvider + sp_blockchain::HeaderBackend, C::Api: ApiExt + BlockBuilderApi, { diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 5a4c37b1f6f07301766a6693fde250c769c50e58..4fa8cecf2f7ddf748fe6d338e61f5236e8e965b5 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -18,11 +18,11 @@ //! Contains the [`OverheadCmd`] as entry point for the CLI to execute //! the *overhead* benchmarks. -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::BlockBuilderApi; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_client_api::Backend as ClientBackend; +use sc_client_api::UsageProvider; use sc_service::Configuration; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; use clap::{Args, Parser}; @@ -97,7 +97,7 @@ impl OverheadCmd { /// /// Writes the results to console and into two instances of the /// `weights.hbs` template, one for each benchmark. - pub fn run( + pub fn run( &self, cfg: Configuration, client: Arc, @@ -107,9 +107,9 @@ impl OverheadCmd { ) -> Result<()> where Block: BlockT, - BA: ClientBackend, - C: BlockBuilderProvider - + ProvideRuntimeApi + C: ProvideRuntimeApi + + CallApiAt + + UsageProvider + sp_blockchain::HeaderBackend, C::Api: ApiExt + BlockBuilderApi, { diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 84da3aaa02c00ec86fe4ead0438706aab64d6476..5c76ca68e85f5708cbccde38c5f35cb4046033c5 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -25,7 +25,7 @@ use frame_support::traits::StorageInfo; use linked_hash_map::LinkedHashMap; use sc_cli::{execution_method_from_cli, CliConfiguration, Result, SharedParams}; use sc_client_db::BenchmarkingState; -use sc_executor::WasmExecutor; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_service::Configuration; use serde::Serialize; use sp_core::{ @@ -219,12 +219,20 @@ impl PalletCmd { let method = execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy); + let heap_pages = + self.heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { + extra_pages: p as _, + }); + let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions, ExtraHostFunctions, )>::builder() .with_execution_method(method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) .with_max_runtime_instances(2) .with_runtime_cache_size(2) .build(); @@ -748,13 +756,17 @@ impl CliConfiguration for PalletCmd { /// List the benchmarks available in the runtime, in a CSV friendly format. fn list_benchmark( - benchmarks_to_run: Vec<( + mut benchmarks_to_run: Vec<( Vec, Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, )>, ) { + // Sort and de-dub by pallet and function name. + benchmarks_to_run.sort_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa).cmp(&(pb, sb))); + benchmarks_to_run.dedup_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa) == (pb, sb)); + println!("pallet, benchmark"); for (pallet, extrinsic, _, _) in benchmarks_to_run { println!("{}, {}", String::from_utf8_lossy(&pallet), String::from_utf8_lossy(&extrinsic)); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index 69c95d13c098530a0b1c09ef569c5ff976bf02e3..9493a693bbed321785bf0d23326d0f68b3efd534 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -779,6 +779,7 @@ fn worst_case_pov( /// A simple match statement which outputs the log 16 of some value. fn easy_log_16(i: u32) -> u32 { + #[allow(clippy::redundant_guards)] match i { i if i == 0 => 0, i if i <= 16 => 1, diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index ad6ab006da1dcd0c69c3ffa1b24a2a00128c6d34..7067aed238aca08b5b03ae99f568ad1777751911 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -23,7 +23,6 @@ sp-runtime = { path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } futures = "0.3" -async-recursion = "1.0.4" indicatif = "0.17.3" spinners = "4.1.0" tokio-retry = "0.3.0" diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 072ea6ef5e5970a6fe377e742e04d623f0c34607..71e9320ebeeb272faa156d758dd6571c9cb148a9 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -20,7 +20,6 @@ //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate //! based chain, or a local state snapshot file. -use async_recursion::async_recursion; use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; use jsonrpsee::{ @@ -44,7 +43,7 @@ use sp_runtime::{ use sp_state_machine::TestExternalities; use spinners::{Spinner, Spinners}; use std::{ - cmp::max, + cmp::{max, min}, fs, ops::{Deref, DerefMut}, path::{Path, PathBuf}, @@ -353,10 +352,11 @@ where const PARALLEL_REQUESTS: usize = 4; const BATCH_SIZE_INCREASE_FACTOR: f32 = 1.10; const BATCH_SIZE_DECREASE_FACTOR: f32 = 0.50; - const INITIAL_BATCH_SIZE: usize = 5000; + const REQUEST_DURATION_TARGET: Duration = Duration::from_secs(15); + const INITIAL_BATCH_SIZE: usize = 10; // nodes by default will not return more than 1000 keys per request const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000; - const KEYS_PAGE_MAX_RETRIES: usize = 12; + const MAX_RETRIES: usize = 12; const KEYS_PAGE_RETRY_INTERVAL: Duration = Duration::from_secs(5); async fn rpc_get_storage( @@ -411,8 +411,8 @@ where let keys = loop { // This loop can hit the node with very rapid requests, occasionally causing it to // error out in CI (https://github.com/paritytech/substrate/issues/14129), so we retry. - let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL) - .take(Self::KEYS_PAGE_MAX_RETRIES); + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let get_page_closure = || self.get_keys_single_page(Some(prefix.clone()), last_key.clone(), at); let page = Retry::spawn(retry_strategy, get_page_closure).await?; @@ -448,8 +448,6 @@ where /// /// * `client` - An `Arc` wrapped `HttpClient` used for making the requests. /// * `payloads` - A vector of tuples containing a JSONRPC method name and `ArrayParams` - /// * `batch_size` - The initial batch size to use for the request. The batch size will be - /// adjusted dynamically in case of failure. /// /// # Returns /// @@ -485,80 +483,107 @@ where /// } /// } /// ``` - #[async_recursion] async fn get_storage_data_dynamic_batch_size( client: &HttpClient, payloads: Vec<(String, ArrayParams)>, - batch_size: usize, bar: &ProgressBar, ) -> Result>, String> { - // All payloads have been processed - if payloads.is_empty() { - return Ok(vec![]) - }; - - log::debug!( - target: LOG_TARGET, - "Remaining payloads: {} Batch request size: {}", - payloads.len(), - batch_size, - ); + let mut all_data: Vec> = vec![]; + let mut start_index = 0; + let mut retries = 0usize; + let mut batch_size = Self::INITIAL_BATCH_SIZE; + let total_payloads = payloads.len(); + + while start_index < total_payloads { + log::debug!( + target: LOG_TARGET, + "Remaining payloads: {} Batch request size: {}", + total_payloads - start_index, + batch_size, + ); - // Payloads to attempt to process this batch - let page = payloads.iter().take(batch_size).cloned().collect::>(); + let end_index = usize::min(start_index + batch_size, total_payloads); + let page = &payloads[start_index..end_index]; - // Build the batch request - let mut batch = BatchRequestBuilder::new(); - for (method, params) in page.iter() { - batch - .insert(method, params.clone()) - .map_err(|_| "Invalid batch method and/or params")? - } - let batch_response = match client.batch_request::>(batch).await { - Ok(batch_response) => batch_response, - Err(e) => { - if batch_size < 2 { - return Err(e.to_string()) - } + // Build the batch request + let mut batch = BatchRequestBuilder::new(); + for (method, params) in page.iter() { + batch + .insert(method, params.clone()) + .map_err(|_| "Invalid batch method and/or params")?; + } - log::debug!( - target: LOG_TARGET, - "Batch request failed, trying again with smaller batch size. {}", - e.to_string() - ); + let request_started = Instant::now(); + let batch_response = match client.batch_request::>(batch).await { + Ok(batch_response) => { + retries = 0; + batch_response + }, + Err(e) => { + if retries > Self::MAX_RETRIES { + return Err(e.to_string()) + } + + retries += 1; + let failure_log = format!( + "Batch request failed ({}/{} retries). Error: {}", + retries, + Self::MAX_RETRIES, + e.to_string() + ); + // after 2 subsequent failures something very wrong is happening. log a warning + // and reset the batch size down to 1. + if retries >= 2 { + log::warn!("{}", failure_log); + batch_size = 1; + } else { + log::debug!("{}", failure_log); + // Decrease batch size by DECREASE_FACTOR + batch_size = + (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize; + } + continue + }, + }; - return Self::get_storage_data_dynamic_batch_size( - client, - payloads, - max(1, (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize), - bar, + let request_duration = request_started.elapsed(); + batch_size = if request_duration > Self::REQUEST_DURATION_TARGET { + // Decrease batch size + max(1, (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize) + } else { + // Increase batch size, but not more than the remaining total payloads to process + min( + total_payloads - start_index, + max( + batch_size + 1, + (batch_size as f32 * Self::BATCH_SIZE_INCREASE_FACTOR) as usize, + ), ) - .await - }, - }; + }; + + log::debug!( + target: LOG_TARGET, + "Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}", + request_duration, + Self::REQUEST_DURATION_TARGET, + end_index - start_index, + batch_size + ); - // Collect the data from this batch - let mut data: Vec> = vec![]; - let batch_response_len = batch_response.len(); - for item in batch_response.into_iter() { - match item { - Ok(x) => data.push(x), - Err(e) => return Err(e.message().to_string()), + let batch_response_len = batch_response.len(); + for item in batch_response.into_iter() { + match item { + Ok(x) => all_data.push(x), + Err(e) => return Err(e.message().to_string()), + } } + bar.inc(batch_response_len as u64); + + // Update the start index for the next iteration + start_index = end_index; } - bar.inc(batch_response_len as u64); - // Return this data joined with the remaining keys - let remaining_payloads = payloads.iter().skip(batch_size).cloned().collect::>(); - let mut rest = Self::get_storage_data_dynamic_batch_size( - client, - remaining_payloads, - max(batch_size + 1, (batch_size as f32 * Self::BATCH_SIZE_INCREASE_FACTOR) as usize), - bar, - ) - .await?; - data.append(&mut rest); - Ok(data) + Ok(all_data) } /// Synonym of `getPairs` that uses paged queries to first get the keys, and then @@ -605,12 +630,7 @@ where ); let payloads_chunked = payloads.chunks((&payloads.len() / Self::PARALLEL_REQUESTS).max(1)); let requests = payloads_chunked.map(|payload_chunk| { - Self::get_storage_data_dynamic_batch_size( - &client, - payload_chunk.to_vec(), - Self::INITIAL_BATCH_SIZE, - &bar, - ) + Self::get_storage_data_dynamic_batch_size(&client, payload_chunk.to_vec(), &bar) }); // Execute the requests and move the Result outside. let storage_data_result: Result, _> = @@ -683,20 +703,14 @@ where .collect::>(); let bar = ProgressBar::new(payloads.len() as u64); - let storage_data = match Self::get_storage_data_dynamic_batch_size( - client, - payloads, - Self::INITIAL_BATCH_SIZE, - &bar, - ) - .await - { - Ok(storage_data) => storage_data, - Err(e) => { - log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e); - return Err("batch processing failed") - }, - }; + let storage_data = + match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await { + Ok(storage_data) => storage_data, + Err(e) => { + log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e); + return Err("batch processing failed") + }, + }; assert_eq!(child_keys_len, storage_data.len()); diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 8ce9d06c2d11e650bb6687de70e628da3d6d4e22..d25e6ec67c9a841f7a908cb6d4c6a229250cc905 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -29,4 +29,4 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1" +serde_json = "1.0.108" diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index b0a00e1207a653bba33506be47398acbec90d6a0..22283fbf4506cccc0cbfbda3fabdb852c5b89168 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -20,7 +20,7 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] -scale-info = "2.1.1" +scale-info = "2.10.0" jsonrpsee = { version = "0.16.2", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.22.0" sp-core = { path = "../../../../primitives/core" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 65380a22ce6e0adc95b837b1a7b340764c3ad775..6be4306193ce41437d80f4a2f6bf53b1432f4f83 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -40,7 +40,7 @@ hex = { version = "0.4.3", default-features = false } log = "0.4.17" parity-scale-codec = "3.6.1" serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" zstd = { version = "0.12.4", default-features = false } [dev-dependencies] diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 73586ebe614e3e85fd6d73c06dbb2fa42481eaca..2550122ad4b4c038367ffe9ddf4b6ad45fe76245 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -22,5 +22,5 @@ toml = "0.7.3" walkdir = "2.3.2" sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" -wasm-opt = "0.114" +wasm-opt = "0.116" parity-wasm = "0.45" diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index e0a30644b231adce3eb539a1e5226781415df17d..9c1655d85623b3c0b8568699b923af9390837c16 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -273,9 +273,9 @@ fn build_project( ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { - (wasm_binary.wasm_binary_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) + (wasm_binary.wasm_binary_path_escaped(), bloaty.bloaty_path_escaped()) } else { - (bloaty.wasm_binary_bloaty_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) + (bloaty.bloaty_path_escaped(), bloaty.bloaty_path_escaped()) }; crate::write_file_if_changed( diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index f5a985ab92b5d6b4b6b680aa5dad43eabef8cc4e..8e81e6774faa68b0081a5cb0b679175160440042 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -143,9 +143,10 @@ fn check_wasm_toolchain_installed( run_cmd.current_dir(&temp); run_cmd.args(&["run", "--manifest-path", &manifest_path]); - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock - build_cmd.env_remove("CARGO_TARGET_DIR"); - run_cmd.env_remove("CARGO_TARGET_DIR"); + // manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock + let target_dir = temp.path().join("target").display().to_string(); + build_cmd.env("CARGO_TARGET_DIR", &target_dir); + run_cmd.env("CARGO_TARGET_DIR", &target_dir); // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified // in the RUSTFLAGS then the check we do here will break unless we clear these. diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 849af853c6da4647df7bd8bdd0135e6e1e8ac381..2e6f671c45edf1558e25edd35771929c89b3bca2 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -18,7 +18,7 @@ use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; -use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; +use cargo_metadata::{DependencyKind, Metadata, MetadataCommand}; use parity_wasm::elements::{deserialize_buffer, Module}; use std::{ borrow::ToOwned, @@ -48,13 +48,13 @@ fn colorize_info_message(message: &str) -> String { pub struct WasmBinaryBloaty(PathBuf); impl WasmBinaryBloaty { - /// Returns the escaped path to the bloaty wasm binary. - pub fn wasm_binary_bloaty_path_escaped(&self) -> String { + /// Returns the escaped path to the bloaty binary. + pub fn bloaty_path_escaped(&self) -> String { self.0.display().to_string().escape_default().to_string() } - /// Returns the path to the wasm binary. - pub fn wasm_binary_bloaty_path(&self) -> &Path { + /// Returns the path to the binary. + pub fn bloaty_path(&self) -> &Path { &self.0 } } @@ -89,8 +89,7 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { cargo_manifest.to_path_buf() }; - let mut crate_metadata_command = create_metadata_command(cargo_manifest); - crate_metadata_command.features(CargoOpt::AllFeatures); + let crate_metadata_command = create_metadata_command(cargo_manifest); let crate_metadata = crate_metadata_command .exec() @@ -110,78 +109,112 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// /// # Returns /// -/// The path to the compact WASM binary and the bloaty WASM binary. +/// The path to the compact runtime binary and the bloaty runtime binary. pub(crate) fn create_and_compile( project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, features_to_enable: Vec, - wasm_binary_name: Option, + bloaty_blob_out_name_override: Option, check_for_runtime_version_section: bool, ) -> (Option, WasmBinaryBloaty) { - let wasm_workspace_root = get_wasm_workspace_root(); - let wasm_workspace = wasm_workspace_root.join("wbuild"); + let runtime_workspace_root = get_wasm_workspace_root(); + let runtime_workspace = runtime_workspace_root.join("wbuild"); let crate_metadata = crate_metadata(project_cargo_toml); let project = create_project( project_cargo_toml, - &wasm_workspace, + &runtime_workspace, &crate_metadata, crate_metadata.workspace_root.as_ref(), features_to_enable, ); - let profile = build_project(&project, default_rustflags, cargo_cmd); - let (wasm_binary, wasm_binary_compressed, bloaty) = - compact_wasm_file(&project, profile, project_cargo_toml, wasm_binary_name); + let build_config = BuildConfiguration::detect(&project); + + // Build the bloaty runtime blob + build_bloaty_blob(&build_config.blob_build_profile, &project, default_rustflags, cargo_cmd); + + // Get the name of the bloaty runtime blob. + let bloaty_blob_default_name = get_blob_name(project_cargo_toml); + let bloaty_blob_out_name = + bloaty_blob_out_name_override.unwrap_or_else(|| bloaty_blob_default_name.clone()); + + let bloaty_blob_binary = copy_bloaty_blob( + &project, + &build_config.blob_build_profile, + &bloaty_blob_default_name, + &bloaty_blob_out_name, + ); + + // Try to compact and compress the bloaty blob, if the *outer* profile wants it. + // + // This is because, by default the inner profile will be set to `Release` even when the outer + // profile is `Debug`, because the blob built in `Debug` profile is too slow for normal + // development activities. + let (compact_blob_path, compact_compressed_blob_path) = + if build_config.outer_build_profile.wants_compact() { + let compact_blob_path = compact_wasm( + &project, + &build_config.blob_build_profile, + project_cargo_toml, + &bloaty_blob_out_name, + ); + let compact_compressed_blob_path = compact_blob_path + .as_ref() + .and_then(|p| try_compress_blob(&p.0, &bloaty_blob_out_name)); + (compact_blob_path, compact_compressed_blob_path) + } else { + (None, None) + }; if check_for_runtime_version_section { - ensure_runtime_version_wasm_section_exists(bloaty.wasm_binary_bloaty_path()); + ensure_runtime_version_wasm_section_exists(bloaty_blob_binary.bloaty_path()); } - wasm_binary + compact_blob_path .as_ref() - .map(|wasm_binary| copy_wasm_to_target_directory(project_cargo_toml, wasm_binary)); + .map(|wasm_binary| copy_blob_to_target_directory(project_cargo_toml, wasm_binary)); - wasm_binary_compressed.as_ref().map(|wasm_binary_compressed| { - copy_wasm_to_target_directory(project_cargo_toml, wasm_binary_compressed) + compact_compressed_blob_path.as_ref().map(|wasm_binary_compressed| { + copy_blob_to_target_directory(project_cargo_toml, wasm_binary_compressed) }); - let final_wasm_binary = wasm_binary_compressed.or(wasm_binary); + let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); generate_rerun_if_changed_instructions( project_cargo_toml, &project, - &wasm_workspace, - final_wasm_binary.as_ref(), - &bloaty, + &runtime_workspace, + final_blob_binary.as_ref(), + &bloaty_blob_binary, ); - if let Err(err) = adjust_mtime(&bloaty, final_wasm_binary.as_ref()) { - build_helper::warning!("Error while adjusting the mtime of the wasm binaries: {}", err) + if let Err(err) = adjust_mtime(&bloaty_blob_binary, final_blob_binary.as_ref()) { + build_helper::warning!("Error while adjusting the mtime of the blob binaries: {}", err) } - (final_wasm_binary, bloaty) + (final_blob_binary, bloaty_blob_binary) } -/// Ensures that the `runtime_version` wasm section exists in the given wasm file. +/// Ensures that the `runtime_version` section exists in the given blob. /// /// If the section can not be found, it will print an error and exit the builder. -fn ensure_runtime_version_wasm_section_exists(wasm: &Path) { - let wasm_blob = fs::read(wasm).expect("`{wasm}` was just written and should exist; qed"); +fn ensure_runtime_version_wasm_section_exists(blob_path: &Path) { + let blob = fs::read(blob_path).expect("`{blob_path}` was just written and should exist; qed"); - let module: Module = match deserialize_buffer(&wasm_blob) { + let module: Module = match deserialize_buffer(&blob) { Ok(m) => m, Err(e) => { - println!("Failed to deserialize `{}`: {e:?}", wasm.display()); + println!("Failed to deserialize `{}`: {e:?}", blob_path.display()); process::exit(1); }, }; if !module.custom_sections().any(|cs| cs.name() == "runtime_version") { println!( - "Couldn't find the `runtime_version` wasm section. \ + "Couldn't find the `runtime_version` section. \ Please ensure that you are using the `sp_version::runtime_version` attribute macro!" ); process::exit(1); @@ -208,7 +241,7 @@ fn adjust_mtime( let metadata = fs::metadata(invoked_timestamp)?; let mtime = filetime::FileTime::from_last_modification_time(&metadata); - filetime::set_file_mtime(bloaty_wasm.wasm_binary_bloaty_path(), mtime)?; + filetime::set_file_mtime(bloaty_wasm.bloaty_path(), mtime)?; if let Some(binary) = compressed_or_compact_wasm.as_ref() { filetime::set_file_mtime(binary.wasm_binary_path(), mtime)?; } @@ -279,8 +312,8 @@ fn get_crate_name(cargo_manifest: &Path) -> String { .expect("Package name exists; qed") } -/// Returns the name for the wasm binary. -fn get_wasm_binary_name(cargo_manifest: &Path) -> String { +/// Returns the name for the blob binary. +fn get_blob_name(cargo_manifest: &Path) -> String { get_crate_name(cargo_manifest).replace('-', "_") } @@ -501,7 +534,7 @@ fn create_project( ) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); - let wasm_binary = get_wasm_binary_name(project_cargo_toml); + let wasm_binary = get_blob_name(project_cargo_toml); let wasm_project_folder = wasm_workspace.join(&crate_name); fs::create_dir_all(wasm_project_folder.join("src")) @@ -539,8 +572,8 @@ fn create_project( wasm_project_folder } -/// The cargo profile that is used to build the wasm project. -#[derive(Debug, EnumIter)] +/// A rustc profile. +#[derive(Clone, Debug, EnumIter)] enum Profile { /// The `--profile dev` profile. Debug, @@ -551,17 +584,63 @@ enum Profile { } impl Profile { - /// Create a profile by detecting which profile is used for the main build. + /// The name of the profile as supplied to the cargo `--profile` cli option. + fn name(&self) -> &'static str { + match self { + Self::Debug => "dev", + Self::Release => "release", + Self::Production => "production", + } + } + + /// The sub directory within `target` where cargo places the build output. + /// + /// # Note + /// + /// Usually this is the same as [`Self::name`] with the exception of the debug + /// profile which is called `dev`. + fn directory(&self) -> &'static str { + match self { + Self::Debug => "debug", + _ => self.name(), + } + } + + /// Whether the resulting binary should be compacted and compressed. + fn wants_compact(&self) -> bool { + !matches!(self, Self::Debug) + } +} + +/// The build configuration for this build. +#[derive(Debug)] +struct BuildConfiguration { + /// The profile that is used to build the outer project. + pub outer_build_profile: Profile, + /// The profile to use to build the runtime blob. + pub blob_build_profile: Profile, +} + +impl BuildConfiguration { + /// Create a [`BuildConfiguration`] by detecting which profile is used for the main build and + /// checking any env var overrides. /// /// We cannot easily determine the profile that is used by the main cargo invocation /// because the `PROFILE` environment variable won't contain any custom profiles like /// "production". It would only contain the builtin profile where the custom profile /// inherits from. This is why we inspect the build path to learn which profile is used. /// + /// When not overriden by a env variable we always default to building wasm with the `Release` + /// profile even when the main build uses the debug build. This is because wasm built with the + /// `Debug` profile is too slow for normal development activities and almost never intended. + /// + /// When cargo is building in `--profile dev`, user likely intends to compile fast, so we don't + /// bother producing compact or compressed blobs. + /// /// # Note /// /// Can be overriden by setting [`crate::WASM_BUILD_TYPE_ENV`]. - fn detect(wasm_project: &Path) -> Profile { + fn detect(wasm_project: &Path) -> Self { let (name, overriden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { (name, true) } else { @@ -585,7 +664,8 @@ impl Profile { .to_string(); (name, false) }; - match (Profile::iter().find(|p| p.directory() == name), overriden) { + let outer_build_profile = Profile::iter().find(|p| p.directory() == name); + let blob_build_profile = match (outer_build_profile.clone(), overriden) { // When not overriden by a env variable we default to using the `Release` profile // for the wasm build even when the main build uses the debug build. This // is because the `Debug` profile is too slow for normal development activities. @@ -615,35 +695,12 @@ impl Profile { ); process::exit(1); }, + }; + BuildConfiguration { + outer_build_profile: outer_build_profile.unwrap_or(Profile::Release), + blob_build_profile, } } - - /// The name of the profile as supplied to the cargo `--profile` cli option. - fn name(&self) -> &'static str { - match self { - Self::Debug => "dev", - Self::Release => "release", - Self::Production => "production", - } - } - - /// The sub directory within `target` where cargo places the build output. - /// - /// # Note - /// - /// Usually this is the same as [`Self::name`] with the exception of the debug - /// profile which is called `dev`. - fn directory(&self) -> &'static str { - match self { - Self::Debug => "debug", - _ => self.name(), - } - } - - /// Whether the resulting binary should be compacted and compressed. - fn wants_compact(&self) -> bool { - !matches!(self, Self::Debug) - } } /// Check environment whether we should build without network @@ -651,12 +708,13 @@ fn offline_build() -> bool { env::var(OFFLINE).map_or(false, |v| v == "true") } -/// Build the project to create the WASM binary. -fn build_project( +/// Build the project and create the bloaty runtime blob. +fn build_bloaty_blob( + blob_build_profile: &Profile, project: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, -) -> Profile { +) { let manifest_path = project.join("Cargo.toml"); let mut build_cmd = cargo_cmd.command(); @@ -670,10 +728,10 @@ fn build_project( .args(&["rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir + // Manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir // exclusive). The runner project is created in `CARGO_TARGET_DIR` and executing it will // create a sub target directory inside of `CARGO_TARGET_DIR`. - .env_remove("CARGO_TARGET_DIR") + .env("CARGO_TARGET_DIR", &project.join("target").display().to_string()) // As we are being called inside a build-script, this env variable is set. However, we set // our own `RUSTFLAGS` and thus, we need to remove this. Otherwise cargo favors this // env variable. @@ -685,9 +743,8 @@ fn build_project( build_cmd.arg("--color=always"); } - let profile = Profile::detect(project); build_cmd.arg("--profile"); - build_cmd.arg(profile.name()); + build_cmd.arg(blob_build_profile.name()); if offline_build() { build_cmd.arg("--offline"); @@ -697,69 +754,82 @@ fn build_project( println!("{} {:?}", colorize_info_message("Executing build command:"), build_cmd); println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); - match build_cmd.status().map(|s| s.success()) { - Ok(true) => profile, - // Use `process.exit(1)` to have a clean error output. - _ => process::exit(1), + // Use `process::exit(1)` to have a clean error output. + if build_cmd.status().map(|s| s.success()).is_err() { + process::exit(1); } } -/// Compact the WASM binary using `wasm-gc` and compress it using zstd. -fn compact_wasm_file( +fn compact_wasm( project: &Path, - profile: Profile, + inner_profile: &Profile, cargo_manifest: &Path, - out_name: Option, -) -> (Option, Option, WasmBinaryBloaty) { - let default_out_name = get_wasm_binary_name(cargo_manifest); - let out_name = out_name.unwrap_or_else(|| default_out_name.clone()); + out_name: &str, +) -> Option { + let default_out_name = get_blob_name(cargo_manifest); let in_path = project .join("target/wasm32-unknown-unknown") - .join(profile.directory()) + .join(inner_profile.directory()) .join(format!("{}.wasm", default_out_name)); - let (wasm_compact_path, wasm_compact_compressed_path) = if profile.wants_compact() { - let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name,)); - wasm_opt::OptimizationOptions::new_opt_level_0() - .mvp_features_only() - .debug_info(true) - .add_pass(wasm_opt::Pass::StripDwarf) - .run(&in_path, &wasm_compact_path) - .expect("Failed to compact generated WASM binary."); - - let wasm_compact_compressed_path = - project.join(format!("{}.compact.compressed.wasm", out_name)); - if compress_wasm(&wasm_compact_path, &wasm_compact_compressed_path) { - (Some(WasmBinary(wasm_compact_path)), Some(WasmBinary(wasm_compact_compressed_path))) - } else { - (Some(WasmBinary(wasm_compact_path)), None) - } - } else { - (None, None) - }; + let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name)); + let start = std::time::Instant::now(); + wasm_opt::OptimizationOptions::new_opt_level_0() + .mvp_features_only() + .debug_info(true) + .add_pass(wasm_opt::Pass::StripDwarf) + .run(&in_path, &wasm_compact_path) + .expect("Failed to compact generated WASM binary."); + println!( + "{} {}", + colorize_info_message("Compacted wasm in"), + colorize_info_message(format!("{:?}", start.elapsed()).as_str()) + ); + Some(WasmBinary(wasm_compact_path)) +} + +fn copy_bloaty_blob( + project: &Path, + inner_profile: &Profile, + in_name: &str, + out_name: &str, +) -> WasmBinaryBloaty { + let in_path = project + .join("target/wasm32-unknown-unknown") + .join(inner_profile.directory()) + .join(format!("{}.wasm", in_name)); let bloaty_path = project.join(format!("{}.wasm", out_name)); fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); - - (wasm_compact_path, wasm_compact_compressed_path, WasmBinaryBloaty(bloaty_path)) + WasmBinaryBloaty(bloaty_path) } -fn compress_wasm(wasm_binary_path: &Path, compressed_binary_out_path: &Path) -> bool { +fn try_compress_blob(compact_blob_path: &Path, out_name: &str) -> Option { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; - let data = fs::read(wasm_binary_path).expect("Failed to read WASM binary"); + let project = compact_blob_path.parent().expect("blob path should have a parent directory"); + let compact_compressed_blob_path = + project.join(format!("{}.compact.compressed.wasm", out_name)); + + let start = std::time::Instant::now(); + let data = fs::read(compact_blob_path).expect("Failed to read WASM binary"); if let Some(compressed) = sp_maybe_compressed_blob::compress(&data, CODE_BLOB_BOMB_LIMIT) { - fs::write(compressed_binary_out_path, &compressed[..]) + fs::write(&compact_compressed_blob_path, &compressed[..]) .expect("Failed to write WASM binary"); - true + println!( + "{} {}", + colorize_info_message("Compressed blob in"), + colorize_info_message(format!("{:?}", start.elapsed()).as_str()) + ); + Some(WasmBinary(compact_compressed_blob_path)) } else { build_helper::warning!( - "Writing uncompressed wasm. Exceeded maximum size {}", + "Writing uncompressed blob. Exceeded maximum size {}", CODE_BLOB_BOMB_LIMIT, ); - - false + println!("{}", colorize_info_message("Skipping blob compression")); + None } } @@ -844,6 +914,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { + // Ignore all dev dependencies + if dependency.kind == DependencyKind::Development { + continue; + } + let path_or_git_dep = dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); @@ -869,7 +944,7 @@ fn generate_rerun_if_changed_instructions( packages.iter().for_each(package_rerun_if_changed); compressed_or_compact_wasm.map(|w| rerun_if_changed(w.wasm_binary_path())); - rerun_if_changed(bloaty_wasm.wasm_binary_bloaty_path()); + rerun_if_changed(bloaty_wasm.bloaty_path()); // Register our env variables println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); @@ -896,15 +971,13 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) - .filter(|p| { - p.is_dir() || p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default() - }) + .filter(|p| p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default()) .for_each(rerun_if_changed); } -/// Copy the WASM binary to the target directory set in `WASM_TARGET_DIRECTORY` environment +/// Copy the blob binary to the target directory set in `WASM_TARGET_DIRECTORY` environment /// variable. If the variable is not set, this is a no-op. -fn copy_wasm_to_target_directory(cargo_manifest: &Path, wasm_binary: &WasmBinary) { +fn copy_blob_to_target_directory(cargo_manifest: &Path, blob_binary: &WasmBinary) { let target_dir = match env::var(crate::WASM_TARGET_DIRECTORY) { Ok(path) => PathBuf::from(path), Err(_) => return, @@ -923,8 +996,8 @@ fn copy_wasm_to_target_directory(cargo_manifest: &Path, wasm_binary: &WasmBinary fs::create_dir_all(&target_dir).expect("Creates `WASM_TARGET_DIRECTORY`."); fs::copy( - wasm_binary.wasm_binary_path(), - target_dir.join(format!("{}.wasm", get_wasm_binary_name(cargo_manifest))), + blob_binary.wasm_binary_path(), + target_dir.join(format!("{}.wasm", get_blob_name(cargo_manifest))), ) - .expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); + .expect("Copies blob binary to `WASM_TARGET_DIRECTORY`."); }

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> Result, Box> +where + C: Send + + Sync + + 'static + + sp_api::ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: sp_block_builder::BlockBuilder, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + P: TransactionPool + 'static, +{ + let mut module = RpcModule::new(()); + let FullDeps { client, pool, deny_unsafe } = deps; + + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + + Ok(module) +} diff --git a/substrate/bin/minimal/node/src/service.rs b/substrate/bin/minimal/node/src/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6369c44dda95b08431c267c631692e5d7761be9 --- /dev/null +++ b/substrate/bin/minimal/node/src/service.rs @@ -0,0 +1,254 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use futures::FutureExt; +use runtime::{self, interface::OpaqueBlock as Block, RuntimeApi}; +use sc_client_api::backend::Backend; +use sc_executor::WasmExecutor; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use std::sync::Arc; + +use crate::cli::Consensus; + +#[cfg(feature = "runtime-benchmarks")] +type HostFunctions = + (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); + +#[cfg(not(feature = "runtime-benchmarks"))] +type HostFunctions = sp_io::SubstrateHostFunctions; + +pub(crate) type FullClient = + sc_service::TFullClient>; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + Option, + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = sc_service::new_wasm_executor(&config); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let import_queue = sc_consensus_manual_seal::import_queue( + Box::new(client.clone()), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ); + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (telemetry), + }) +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration, consensus: Consensus) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: mut telemetry, + } = new_partial(&config)?; + + let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); + + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + net_config, + block_announce_validator_builder: None, + warp_sync_params: None, + block_relay: None, + })?; + + if config.offchain_worker.enabled { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + let prometheus_registry = config.prometheus_registry().cloned(); + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client: client.clone(), + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service, + config, + telemetry: telemetry.as_mut(), + })?; + + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + match consensus { + Consensus::InstantSeal => { + let params = sc_consensus_manual_seal::InstantSealParams { + block_import: client.clone(), + env: proposer, + client, + pool: transaction_pool, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: move |_, ()| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) + }, + }; + + let authorship_future = sc_consensus_manual_seal::run_instant_seal(params); + + task_manager.spawn_essential_handle().spawn_blocking( + "instant-seal", + None, + authorship_future, + ); + }, + Consensus::ManualSeal(block_time) => { + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); + task_manager.spawn_handle().spawn("block_authoring", None, async move { + loop { + futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; + sink.try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { + create_empty: true, + finalize: true, + parent_hash: None, + sender: None, + }) + .unwrap(); + } + }); + + let params = sc_consensus_manual_seal::ManualSealParams { + block_import: client.clone(), + env: proposer, + client, + pool: transaction_pool, + select_chain, + commands_stream: Box::pin(commands_stream), + consensus_data_provider: None, + create_inherent_data_providers: move |_, ()| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) + }, + }; + let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); + + task_manager.spawn_essential_handle().spawn_blocking( + "manual-seal", + None, + authorship_future, + ); + }, + } + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..85d56d0638a59f930b0ea293b649d9cbab09559b --- /dev/null +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "minimal-runtime" +version = "0.1.0" +authors.workspace = true +description = "A minimal Substrate example runtime" +edition.workspace = true +repository.workspace = true +license.workspace = true +publish = false + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { path = "../../../frame", default-features = false, features = ["runtime", "experimental"] } +frame-support = { path = "../../../frame/support", default-features = false} + +# pallets that we want to use +pallet-balances = { path = "../../../frame/balances", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } + +# genesis builder that allows us to interacto with runtime genesis config +sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false} + + +[build-dependencies] +substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } + +[features] +default = [ "std" ] +std = [ + "frame-support/std", + "frame/std", + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-genesis-builder/std", + "substrate-wasm-builder", +] diff --git a/substrate/bin/minimal/runtime/build.rs b/substrate/bin/minimal/runtime/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7676a70dfe843e2cd47fc600ef599bbe7bff591 --- /dev/null +++ b/substrate/bin/minimal/runtime/build.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +fn main() { + #[cfg(feature = "std")] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build(); + } +} diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/substrate/bin/minimal/runtime/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..efee400c3f594437c15c14a74e3ba5670fd61852 --- /dev/null +++ b/substrate/bin/minimal/runtime/src/lib.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use frame::{ + deps::frame_support::weights::{FixedFee, NoFee}, + prelude::*, + runtime::{ + apis::{ + self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, OpaqueMetadata, + }, + prelude::*, + }, +}; +use frame_support::genesis_builder_helper::{build_config, create_default_config}; + +#[runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("minimal-runtime"), + impl_name: create_runtime_str!("minimal-runtime"), + authoring_version: 1, + spec_version: 0, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +construct_runtime!( + pub struct Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + + Balances: pallet_balances, + Sudo: pallet_sudo, + TransactionPayment: pallet_transaction_payment, + } +); + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; +} + +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type Version = Version; + type BlockHashCount = ConstU32<1024>; + type AccountData = pallet_balances::AccountData<::Balance>; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type AccountStore = System; +} + +#[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig as pallet_sudo::DefaultConfig)] +impl pallet_sudo::Config for Runtime {} + +#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig as pallet_timestamp::DefaultConfig)] +impl pallet_timestamp::Config for Runtime {} + +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type WeightToFee = NoFee<::Balance>; + type LengthToFee = FixedFee<1, ::Balance>; +} + +type Block = frame::runtime::types_common::BlockOf; +type Header = HeaderFor; + +type RuntimeExecutive = + Executive, Runtime, AllPalletsWithSystem>; + +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; + +impl_runtime_apis! { + impl apis::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + RuntimeExecutive::execute_block(block) + } + + fn initialize_block(header: &Header) { + RuntimeExecutive::initialize_block(header) + } + } + impl apis::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> Vec { + Runtime::metadata_versions() + } + } + + impl apis::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ExtrinsicFor) -> ApplyExtrinsicResult { + RuntimeExecutive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> HeaderFor { + RuntimeExecutive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: InherentData, + ) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl apis::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ExtrinsicFor, + block_hash: ::Hash, + ) -> TransactionValidity { + RuntimeExecutive::validate_transaction(source, tx, block_hash) + } + } + + impl apis::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &HeaderFor) { + RuntimeExecutive::offchain_worker(header) + } + } + + impl apis::SessionKeys for Runtime { + fn generate_session_keys(_seed: Option>) -> Vec { + Default::default() + } + + fn decode_session_keys( + _encoded: Vec, + ) -> Option, apis::KeyTypeId)>> { + Default::default() + } + } + + impl apis::AccountNonceApi for Runtime { + fn account_nonce(account: interface::AccountId) -> interface::Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + interface::Balance, + > for Runtime { + fn query_info(uxt: ExtrinsicFor, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ExtrinsicFor, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> interface::Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> interface::Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +/// Some re-exports that the node side code needs to know. Some are useful in this context as well. +/// +/// Other types should preferably be private. +// TODO: this should be standardized in some way, see: +// https://github.com/paritytech/substrate/issues/10579#issuecomment-1600537558 +pub mod interface { + use super::Runtime; + use frame::deps::frame_system; + + pub type Block = super::Block; + pub use frame::runtime::types_common::OpaqueBlock; + pub type AccountId = ::AccountId; + pub type Nonce = ::Nonce; + pub type Hash = ::Hash; + pub type Balance = ::Balance; + pub type MinimumBalance = ::ExistentialDeposit; +} diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index 23840cce2229ba0b83057584fa6657f8424f5c8a..61953631d799879894256a72b5dfde1a2cde5d1c 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -19,6 +19,7 @@ name = "node-template" [dependencies] clap = { version = "4.4.6", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"]} +serde_json = "1.0.108" sc-cli = { path = "../../../client/cli" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/bin/node-template/node/src/chain_spec.rs b/substrate/bin/node-template/node/src/chain_spec.rs index 2cd2d0729302631699b895609538cb148fc41de4..6e0d78f647a594d7e3c247041a7499691edaad1e 100644 --- a/substrate/bin/node-template/node/src/chain_spec.rs +++ b/substrate/bin/node-template/node/src/chain_spec.rs @@ -1,7 +1,4 @@ -use node_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GrandpaConfig, RuntimeGenesisConfig, Signature, - SudoConfig, SystemConfig, WASM_BINARY, -}; +use node_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -37,122 +34,84 @@ pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { } pub fn development_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; - - Ok(ChainSpec::from_genesis( - // Name - "Development", - // ID - "dev", - ChainType::Development, - move || { - testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![authority_keys_from_seed("Alice")], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ) - }, - // Bootnodes - vec![], - // Telemetry - None, - // Protocol ID - None, - None, - // Properties - None, - // Extensions + Ok(ChainSpec::builder( + WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, None, + ) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis( + // Initial PoA authorities + vec![authority_keys_from_seed("Alice")], + // Sudo account + get_account_id_from_seed::("Alice"), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, )) + .build()) } pub fn local_testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; - - Ok(ChainSpec::from_genesis( - // Name - "Local Testnet", - // ID - "local_testnet", - ChainType::Local, - move || { - testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ) - }, - // Bootnodes - vec![], - // Telemetry - None, - // Protocol ID - None, - // Properties - None, - None, - // Extensions + Ok(ChainSpec::builder( + WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, None, + ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis( + // Initial PoA authorities + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + // Sudo account + get_account_id_from_seed::("Alice"), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, )) + .build()) } /// Configure initial storage state for FRAME modules. fn testnet_genesis( - wasm_binary: &[u8], initial_authorities: Vec<(AuraId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, _enable_println: bool, -) -> RuntimeGenesisConfig { - RuntimeGenesisConfig { - system: SystemConfig { - // Add Wasm runtime to storage. - code: wasm_binary.to_vec(), - ..Default::default() - }, - balances: BalancesConfig { +) -> serde_json::Value { + serde_json::json!({ + "balances": { // Configure endowed accounts with initial balance of 1 << 60. - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), }, - aura: AuraConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), + "aura": { + "authorities": initial_authorities.iter().map(|x| (x.0.clone())).collect::>(), }, - grandpa: GrandpaConfig { - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), - ..Default::default() + "grandpa": { + "authorities": initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect::>(), }, - sudo: SudoConfig { + "sudo": { // Assign network admin rights. - key: Some(root_key), + "key": Some(root_key), }, - transaction_payment: Default::default(), - } + }) } diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/substrate/bin/node-template/pallets/template/Cargo.toml index 3e6acc5ceabffb2faeb16f26e98bf79432e723c2..77183c42cd60c8b6bd9e1e7ad6387f60a5bd1eaa 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/substrate/bin/node-template/pallets/template/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true} frame-support = { path = "../../../../frame/support", default-features = false} frame-system = { path = "../../../../frame/system", default-features = false} diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/substrate/bin/node-template/pallets/template/src/benchmarking.rs index 6c3cae6066b41982af15a56098bcec62080c4e47..5a262417629c579c6ecf5ada30ae803217623766 100644 --- a/substrate/bin/node-template/pallets/template/src/benchmarking.rs +++ b/substrate/bin/node-template/pallets/template/src/benchmarking.rs @@ -1,7 +1,6 @@ //! Benchmarking setup for pallet-template #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::vec; #[allow(unused)] use crate::Pallet as Template; diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index caca54ce2ba4e87bfb3eefcf10b85ee7edf19185..7711ddba34d0bb50e88941067bb7b513475ad7dd 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } pallet-aura = { path = "../../../frame/aura", default-features = false} pallet-balances = { path = "../../../frame/balances", default-features = false} @@ -28,18 +28,19 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa frame-executive = { path = "../../../frame/executive", default-features = false} sp-api = { path = "../../../primitives/api", default-features = false} sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false} -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} +sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } +sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } +sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"]} sp-inherents = { path = "../../../primitives/inherents", default-features = false} sp-offchain = { path = "../../../primitives/offchain", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } sp-session = { path = "../../../primitives/session", default-features = false} sp-std = { path = "../../../primitives/std", default-features = false} sp-storage = { path = "../../../primitives/storage", default-features = false} sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../primitives/version", default-features = false} -sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } +sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } +serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } +sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} @@ -75,6 +76,7 @@ std = [ "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", "scale-info/std", + "serde_json/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 4653b49bf2c344c6cd433a5881565488a0d50fa9..6aa4cb70fde17b8a6e6d6a113833e883ff727239 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -251,6 +251,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -312,6 +313,12 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, ); +/// All migrations of the runtime, aside from the ones declared in the pallets. +/// +/// This can be a tuple of types, each implementing `OnRuntimeUpgrade`. +#[allow(unused_parens)] +type Migrations = (); + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; @@ -324,6 +331,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, + Migrations, >; #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index c111d345623d584c2748d7659bc034e1a66e6087..ee429ee8c0c18ed4239e948c61326f3878fe4b2b 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -22,7 +22,7 @@ sc-client-api = { path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" kvdb-rocksdb = "0.19.0" diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 49dc39099be03b9470d712245c8d3642c3d8aaaa..8f3c2185deb03fd85814d44c144e7233de7c71b3 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-cli" +name = "staging-node-cli" version = "3.0.0-dev" authors.workspace = true description = "Generic Substrate node implementation in Rust." @@ -96,19 +96,20 @@ pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-im-online = { path = "../../../frame/im-online", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} # node-specific dependencies kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } -node-executor = { path = "../executor" } +node-executor = { package = "staging-node-executor", path = "../executor" } # CLI-specific dependencies sc-cli = { path = "../../../client/cli", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -node-inspect = { path = "../inspect", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} -serde_json = "1.0.107" +serde_json = "1.0.108" [dev-dependencies] sc-keystore = { path = "../../../client/keystore" } @@ -124,7 +125,6 @@ futures = "0.3.21" tempfile = "3.1.0" assert_cmd = "2.0.2" nix = { version = "0.26.1", features = ["signal"] } -serde_json = "1.0" regex = "1.6.0" platforms = "3.0" soketto = "0.7.1" @@ -139,7 +139,7 @@ substrate-cli-test-utils = { path = "../../../test-utils/cli" } [build-dependencies] clap = { version = "4.4.6", optional = true } clap_complete = { version = "4.0.2", optional = true } -node-inspect = { path = "../inspect", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true} substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true} @@ -169,6 +169,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", @@ -184,6 +185,7 @@ try-runtime = [ "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-im-online/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 246de8f3e925df6e3d91994af1467975a1669f4c..f59a125e1c05f27f64a45b9085fa736e98a22bc0 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -20,7 +20,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughpu use kitchensink_runtime::{constants::currency::*, BalancesCall}; use node_cli::service::{create_extrinsic, FullClient}; -use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof}; +use sc_block_builder::{BlockBuilderBuilder, BuiltBlock}; use sc_consensus::{ block_import::{BlockImportParams, ForkChoiceStrategy}, BlockImport, StateAction, @@ -39,6 +39,7 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, AccountId32, MultiAddress, OpaqueExtrinsic, }; +use staging_node_cli as node_cli; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { @@ -126,7 +127,11 @@ fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { let mut max_transfer_count = 0; let mut extrinsics = Vec::new(); - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); // Every block needs one timestamp extrinsic. let extrinsic_set_time = extrinsic_set_time(1 + MINIMUM_PERIOD_FOR_BLOCKS); @@ -173,7 +178,11 @@ fn block_production(c: &mut Criterion) { // Buliding the very first block is around ~30x slower than any subsequent one, // so let's make sure it's built and imported before we benchmark anything. - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); block_builder.push(extrinsic_set_time(1)).unwrap(); import_block(client, block_builder.build().unwrap()); @@ -185,14 +194,19 @@ fn block_production(c: &mut Criterion) { group.sample_size(10); group.throughput(Throughput::Elements(max_transfer_count as u64)); - let best_hash = client.chain_info().best_hash; + let chain = client.chain_info(); + let best_hash = chain.best_hash; + let best_number = chain.best_number; group.bench_function(format!("{} transfers (no proof)", max_transfer_count), |b| { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = - client.new_block_at(best_hash, Default::default(), RecordProof::No).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(best_hash) + .with_parent_block_number(best_number) + .build() + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -206,8 +220,11 @@ fn block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = - client.new_block_at(best_hash, Default::default(), RecordProof::Yes).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(client) + .on_parent_block(best_hash) + .with_parent_block_number(best_number) + .build() + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index 47f890574151d8f8023f901cacd6832d0dea69e5..dd6c237d4dd6fda6587a0e4c283366787cfe454a 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -35,6 +35,7 @@ use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, Transacti use sp_core::{crypto::Pair, sr25519}; use sp_keyring::Sr25519Keyring; use sp_runtime::OpaqueExtrinsic; +use staging_node_cli as node_cli; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { diff --git a/substrate/bin/node/cli/bin/main.rs b/substrate/bin/node/cli/bin/main.rs index 4b434a3e6dad53f6bbbb47eed4702d57bf1ed405..ccc7d7b6b112558832549bf7e5435f68ac0fe944 100644 --- a/substrate/bin/node/cli/bin/main.rs +++ b/substrate/bin/node/cli/bin/main.rs @@ -20,6 +20,8 @@ #![warn(missing_docs)] +use staging_node_cli as node_cli; + fn main() -> sc_cli::Result<()> { node_cli::run() } diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 52b480925aa941895397538081f25c0680d9bc7e..3559348d188c778a9c8838cbd2cf840d87521a03 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -20,10 +20,7 @@ use grandpa_primitives::AuthorityId as GrandpaId; use kitchensink_runtime::{ - constants::currency::*, wasm_binary_unwrap, BabeConfig, BalancesConfig, Block, CouncilConfig, - DemocracyConfig, ElectionsConfig, ImOnlineConfig, IndicesConfig, MaxNominations, - NominationPoolsConfig, SessionConfig, SessionKeys, SocietyConfig, StakerStatus, StakingConfig, - SudoConfig, SystemConfig, TechnicalCommitteeConfig, + constants::currency::*, wasm_binary_unwrap, Block, MaxNominations, SessionKeys, StakerStatus, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; @@ -45,6 +42,8 @@ pub use node_primitives::{AccountId, Balance, Signature}; type AccountPublic = ::Signer; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +const ENDOWMENT: Balance = 10_000_000 * DOLLARS; +const STASH: Balance = ENDOWMENT / 1000; /// Node `ChainSpec` extensions. /// @@ -78,7 +77,11 @@ fn session_keys( SessionKeys { grandpa, babe, im_online, authority_discovery, mixnet } } -fn staging_testnet_config_genesis() -> RuntimeGenesisConfig { +fn configure_accounts_for_staging_testnet() -> ( + Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, MixnetId)>, + AccountId, + Vec, +) { #[rustfmt::skip] // stash, controller, session-key // generated with secret: @@ -190,28 +193,27 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig { ); let endowed_accounts: Vec = vec![root_key.clone()]; + (initial_authorities, root_key, endowed_accounts) +} +fn staging_testnet_config_genesis() -> serde_json::Value { + let (initial_authorities, root_key, endowed_accounts) = + configure_accounts_for_staging_testnet(); testnet_genesis(initial_authorities, vec![], root_key, Some(endowed_accounts)) } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { - let boot_nodes = vec![]; - ChainSpec::from_genesis( - "Staging Testnet", - "staging_testnet", - ChainType::Live, - staging_testnet_config_genesis, - boot_nodes, - Some( + ChainSpec::builder(wasm_binary_unwrap(), Default::default()) + .with_name("Staging Testnet") + .with_id("staging_testnet") + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(staging_testnet_config_genesis()) + .with_telemetry_endpoints( TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Staging telemetry url is valid; qed"), - ), - None, - None, - None, - Default::default(), - ) + ) + .build() } /// Helper function to generate a crypto pair from seed. @@ -244,8 +246,7 @@ pub fn authority_keys_from_seed( ) } -/// Helper function to create RuntimeGenesisConfig for testing. -pub fn testnet_genesis( +fn configure_accounts( initial_authorities: Vec<( AccountId, AccountId, @@ -256,9 +257,14 @@ pub fn testnet_genesis( MixnetId, )>, initial_nominators: Vec, - root_key: AccountId, endowed_accounts: Option>, -) -> RuntimeGenesisConfig { + stash: Balance, +) -> ( + Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, MixnetId)>, + Vec, + usize, + Vec<(AccountId, AccountId, Balance, StakerStatus)>, +) { let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { vec![ get_account_id_from_seed::("Alice"), @@ -290,7 +296,7 @@ pub fn testnet_genesis( let mut rng = rand::thread_rng(); let stakers = initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::Validator)) + .map(|x| (x.0.clone(), x.0.clone(), stash, StakerStatus::Validator)) .chain(initial_nominators.iter().map(|x| { use rand::{seq::SliceRandom, Rng}; let limit = (MaxNominations::get() as usize).min(initial_authorities.len()); @@ -301,23 +307,39 @@ pub fn testnet_genesis( .into_iter() .map(|choice| choice.0.clone()) .collect::>(); - (x.clone(), x.clone(), STASH, StakerStatus::Nominator(nominations)) + (x.clone(), x.clone(), stash, StakerStatus::Nominator(nominations)) })) .collect::>(); let num_endowed_accounts = endowed_accounts.len(); - const ENDOWMENT: Balance = 10_000_000 * DOLLARS; - const STASH: Balance = ENDOWMENT / 1000; + (initial_authorities, endowed_accounts, num_endowed_accounts, stakers) +} + +/// Helper function to create RuntimeGenesisConfig json patch for testing. +pub fn testnet_genesis( + initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + MixnetId, + )>, + initial_nominators: Vec, + root_key: AccountId, + endowed_accounts: Option>, +) -> serde_json::Value { + let (initial_authorities, endowed_accounts, num_endowed_accounts, stakers) = + configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - RuntimeGenesisConfig { - system: SystemConfig { code: wasm_binary_unwrap().to_vec(), ..Default::default() }, - balances: BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect::>(), }, - indices: IndicesConfig { indices: vec![] }, - session: SessionConfig { - keys: initial_authorities + "session": { + "keys": initial_authorities .iter() .map(|x| { ( @@ -334,67 +356,45 @@ pub fn testnet_genesis( }) .collect::>(), }, - staking: StakingConfig { - validator_count: initial_authorities.len() as u32, - minimum_validator_count: initial_authorities.len() as u32, - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - slash_reward_fraction: Perbill::from_percent(10), - stakers, - ..Default::default() + "staking": { + "validatorCount": initial_authorities.len() as u32, + "minimumValidatorCount": initial_authorities.len() as u32, + "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + "slashRewardFraction": Perbill::from_percent(10), + "stakers": stakers.clone(), }, - democracy: DemocracyConfig::default(), - elections: ElectionsConfig { - members: endowed_accounts + "elections": { + "members": endowed_accounts .iter() .take((num_endowed_accounts + 1) / 2) .cloned() .map(|member| (member, STASH)) - .collect(), + .collect::>(), }, - council: CouncilConfig::default(), - technical_committee: TechnicalCommitteeConfig { - members: endowed_accounts + "technicalCommittee": { + "members": endowed_accounts .iter() .take((num_endowed_accounts + 1) / 2) .cloned() - .collect(), - phantom: Default::default(), + .collect::>(), }, - sudo: SudoConfig { key: Some(root_key) }, - babe: BabeConfig { - epoch_config: Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() + "sudo": { "key": Some(root_key.clone()) }, + "babe": { + "epochConfig": Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - im_online: ImOnlineConfig { keys: vec![] }, - authority_discovery: Default::default(), - grandpa: Default::default(), - technical_membership: Default::default(), - treasury: Default::default(), - society: SocietyConfig { pot: 0 }, - vesting: Default::default(), - assets: pallet_assets::GenesisConfig { + "society": { "pot": 0 }, + "assets": { // This asset is used by the NIS pallet as counterpart currency. - assets: vec![(9, get_account_id_from_seed::("Alice"), true, 1)], - ..Default::default() + "assets": vec![(9, get_account_id_from_seed::("Alice"), true, 1)], }, - pool_assets: Default::default(), - transaction_storage: Default::default(), - transaction_payment: Default::default(), - alliance: Default::default(), - safe_mode: Default::default(), - tx_pause: Default::default(), - alliance_motion: Default::default(), - nomination_pools: NominationPoolsConfig { - min_create_bond: 10 * DOLLARS, - min_join_bond: 1 * DOLLARS, - ..Default::default() + "nominationPools": { + "minCreateBond": 10 * DOLLARS, + "minJoinBond": 1 * DOLLARS, }, - glutton: Default::default(), - mixnet: Default::default(), - } + }) } -fn development_config_genesis() -> RuntimeGenesisConfig { +fn development_config_genesis_json() -> serde_json::Value { testnet_genesis( vec![authority_keys_from_seed("Alice")], vec![], @@ -405,21 +405,15 @@ fn development_config_genesis() -> RuntimeGenesisConfig { /// Development config (single validator Alice). pub fn development_config() -> ChainSpec { - ChainSpec::from_genesis( - "Development", - "dev", - ChainType::Development, - development_config_genesis, - vec![], - None, - None, - None, - None, - Default::default(), - ) + ChainSpec::builder(wasm_binary_unwrap(), Default::default()) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(development_config_genesis_json()) + .build() } -fn local_testnet_genesis() -> RuntimeGenesisConfig { +fn local_testnet_genesis() -> serde_json::Value { testnet_genesis( vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], vec![], @@ -430,18 +424,12 @@ fn local_testnet_genesis() -> RuntimeGenesisConfig { /// Local testnet config (multivalidator Alice + Bob). pub fn local_testnet_config() -> ChainSpec { - ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - local_testnet_genesis, - vec![], - None, - None, - None, - None, - Default::default(), - ) + ChainSpec::builder(wasm_binary_unwrap(), Default::default()) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(local_testnet_genesis()) + .build() } #[cfg(test)] @@ -451,45 +439,29 @@ pub(crate) mod tests { use sc_service_test; use sp_runtime::BuildStorage; - fn local_testnet_genesis_instant_single() -> RuntimeGenesisConfig { - testnet_genesis( - vec![authority_keys_from_seed("Alice")], - vec![], - get_account_id_from_seed::("Alice"), - None, - ) - } - /// Local testnet config (single validator - Alice). pub fn integration_test_config_with_single_authority() -> ChainSpec { - ChainSpec::from_genesis( - "Integration Test", - "test", - ChainType::Development, - local_testnet_genesis_instant_single, - vec![], - None, - None, - None, - None, - Default::default(), - ) + ChainSpec::builder(wasm_binary_unwrap(), Default::default()) + .with_name("Integration Test") + .with_id("test") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis( + vec![authority_keys_from_seed("Alice")], + vec![], + get_account_id_from_seed::("Alice"), + None, + )) + .build() } /// Local testnet config (multivalidator Alice + Bob). pub fn integration_test_config_with_two_authorities() -> ChainSpec { - ChainSpec::from_genesis( - "Integration Test", - "test", - ChainType::Development, - local_testnet_genesis, - vec![], - None, - None, - None, - None, - Default::default(), - ) + ChainSpec::builder(wasm_binary_unwrap(), Default::default()) + .with_name("Integration Test") + .with_id("test") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(local_testnet_genesis()) + .build() } #[test] diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5a85f4cde0ae0ac22c4edecec32691d9b781e877..1c71b5a39561e5ac5e6d8a8f4d5a5df4b8f2023a 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -91,21 +91,24 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( - tip, None, - ), - ); + let extra: kitchensink_runtime::SignedExtra = + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< + kitchensink_runtime::Runtime, + >::from(tip, None), + ), + ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), @@ -449,10 +452,14 @@ pub fn new_full_base( if let Some(hwbench) = hwbench { sc_sysinfo::print_hwbench(&hwbench); - if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && role.is_authority() { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." - ); + match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + Err(err) if role.is_authority() => { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority'.", + err + ); + }, + _ => {}, } if let Some(ref mut telemetry) = telemetry { @@ -875,8 +882,9 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None); + let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), + ); let extra = ( check_non_zero_sender, check_spec_version, diff --git a/substrate/bin/node/executor/Cargo.toml b/substrate/bin/node/executor/Cargo.toml index bed63697b56ef49c5b9b0d4a4889a63b4ec219ba..595a313d2cb9ba31cbcbcc21cce05a99cd7204a6 100644 --- a/substrate/bin/node/executor/Cargo.toml +++ b/substrate/bin/node/executor/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-executor" +name = "staging-node-executor" version = "3.0.0-dev" authors.workspace = true description = "Substrate node implementation in Rust." @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.5.0", features = ["derive"] } +scale-info = { version = "2.10.0", features = ["derive", "serde"] } frame-benchmarking = { path = "../../../frame/benchmarking" } node-primitives = { path = "../primitives" } kitchensink-runtime = { path = "../runtime" } sc-executor = { path = "../../../client/executor" } -sp-core = { path = "../../../primitives/core" } +sp-core = { path = "../../../primitives/core", features=["serde"] } sp-keystore = { path = "../../../primitives/keystore" } sp-state-machine = { path = "../../../primitives/state-machine" } sp-tracing = { path = "../../../primitives/tracing" } sp-trie = { path = "../../../primitives/trie" } -sp-statement-store = { path = "../../../primitives/statement-store" } +sp-statement-store = { path = "../../../primitives/statement-store", features=["serde"] } [dev-dependencies] criterion = "0.4.0" @@ -47,6 +47,7 @@ sp-consensus-babe = { path = "../../../primitives/consensus/babe" } sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } +serde_json = "1.0.108" [features] stress-test = [] diff --git a/substrate/bin/node/executor/benches/bench.rs b/substrate/bin/node/executor/benches/bench.rs index 1c9c002492cf5c2bffef3e017fb6a7a4cf5f05c7..587e76af867cb40b9d5b533869085ce79bc3a205 100644 --- a/substrate/bin/node/executor/benches/bench.rs +++ b/substrate/bin/node/executor/benches/bench.rs @@ -35,6 +35,7 @@ use sp_core::{ }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; +use staging_node_executor as node_executor; criterion_group!(benches, bench_execute_block); criterion_main!(benches); @@ -189,7 +190,7 @@ fn bench_execute_block(c: &mut Criterion) { for strategy in execution_methods { group.bench_function(format!("{:?}", strategy), |b| { - let genesis_config = node_testing::genesis::config(Some(compact_code_unwrap())); + let genesis_config = node_testing::genesis::config(); let use_native = match strategy { ExecutionMethod::Native => true, ExecutionMethod::Wasm(..) => false, diff --git a/substrate/bin/node/executor/tests/basic.rs b/substrate/bin/node/executor/tests/basic.rs index a2f46e9fdbe997cd71e2817f95f894bec74002de..cbceac04e8eaa075079fcb3048b47122e5037b3e 100644 --- a/substrate/bin/node/executor/tests/basic.rs +++ b/substrate/bin/node/executor/tests/basic.rs @@ -857,3 +857,19 @@ fn should_import_block_with_test_client() { futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } + +#[test] +fn default_config_as_json_works() { + let mut t = new_test_ext(compact_code_unwrap()); + let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![], false) + .0 + .unwrap(); + let r = Vec::::decode(&mut &r[..]).unwrap(); + let json = String::from_utf8(r.into()).expect("returned value is json. qed."); + let expected = include_str!("res/default_genesis_config.json").to_string(); + + assert_eq!( + serde_json::from_str::(&expected).unwrap(), + serde_json::from_str::(&json).unwrap() + ); +} diff --git a/substrate/bin/node/executor/tests/common.rs b/substrate/bin/node/executor/tests/common.rs index 6ce9ea3a010909209834b071a710ce52a9cfe584..2d68c88db9252a1cb43314bfa2a6d5e216b455c1 100644 --- a/substrate/bin/node/executor/tests/common.rs +++ b/substrate/bin/node/executor/tests/common.rs @@ -42,6 +42,7 @@ use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; +use staging_node_executor as node_executor; pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); @@ -123,7 +124,7 @@ pub fn executor_call( pub fn new_test_ext(code: &[u8]) -> TestExternalities { let ext = TestExternalities::new_with_code( code, - node_testing::genesis::config(Some(code)).build_storage().unwrap(), + node_testing::genesis::config().build_storage().unwrap(), ); ext } diff --git a/substrate/bin/node/executor/tests/res/default_genesis_config.json b/substrate/bin/node/executor/tests/res/default_genesis_config.json new file mode 100644 index 0000000000000000000000000000000000000000..caf12a443d36df70c109ffa9c68b552e3e36675e --- /dev/null +++ b/substrate/bin/node/executor/tests/res/default_genesis_config.json @@ -0,0 +1,108 @@ +{ + "system": {}, + "babe": { + "authorities": [], + "epochConfig": null + }, + "indices": { + "indices": [] + }, + "balances": { + "balances": [] + }, + "transactionPayment": { + "multiplier": "1000000000000000000" + }, + "staking": { + "validatorCount": 0, + "minimumValidatorCount": 0, + "invulnerables": [], + "forceEra": "NotForcing", + "slashRewardFraction": 0, + "canceledPayout": 0, + "stakers": [], + "minNominatorBond": 0, + "minValidatorBond": 0, + "maxValidatorCount": null, + "maxNominatorCount": null + }, + "session": { + "keys": [] + }, + "democracy": {}, + "council": { + "members": [] + }, + "technicalCommittee": { + "members": [] + }, + "elections": { + "members": [] + }, + "technicalMembership": { + "members": [] + }, + "grandpa": { + "authorities": [] + }, + "treasury": {}, + "sudo": { + "key": null + }, + "imOnline": { + "keys": [] + }, + "authorityDiscovery": { + "keys": [] + }, + "society": { + "pot": 0 + }, + "vesting": { + "vesting": [] + }, + "glutton": { + "compute": "0", + "storage": "0", + "trashDataCount": 0 + }, + "assets": { + "assets": [], + "metadata": [], + "accounts": [] + }, + "poolAssets": { + "assets": [], + "metadata": [], + "accounts": [] + }, + "transactionStorage": { + "byteFee": 10, + "entryFee": 1000, + "storagePeriod": 100800 + }, + "allianceMotion": { + "members": [] + }, + "alliance": { + "fellows": [], + "allies": [] + }, + "mixnet": { + "mixnodes": [] + }, + "nominationPools": { + "minJoinBond": 0, + "minCreateBond": 0, + "maxPools": 16, + "maxMembersPerPool": 32, + "maxMembers": 512, + "globalMaxCommission": null + }, + "txPause": { + "paused": [] + }, + "safeMode": { + "enteredUntil": null + } +} diff --git a/substrate/bin/node/executor/tests/submit_transaction.rs b/substrate/bin/node/executor/tests/submit_transaction.rs index 7678a3c6e5a9fc9e7d0060bd57a5de0239fc3129..5cbb0103d471b96902bb341bcd796e6cf09eac76 100644 --- a/substrate/bin/node/executor/tests/submit_transaction.rs +++ b/substrate/bin/node/executor/tests/submit_transaction.rs @@ -239,7 +239,7 @@ fn submitted_transaction_should_be_valid() { let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { data, ..Default::default() }; + let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() }; >::insert(&address, account); // check validity diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 4a92db2918589da64edbeef74ca1f04bb620a354..30cc22b0e8c63d7415e230d4e43591d9e4fa3cc3 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-inspect" +name = "staging-node-inspect" version = "0.9.0-dev" authors.workspace = true description = "Substrate node block inspection tool." @@ -7,7 +7,6 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index e5bade12029e53115a2538e5e6869ff956e8ae12..2414358b60b434e806fbb472cd9ae4ceccd4da8f 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -20,33 +20,34 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { version = "0.4.17", default-features = false } +serde_json = { version = "1.0.108", default-features = false, features = ["alloc", "arbitrary_precision"] } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } # primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false} -sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false} -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false} +sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features=["serde"] } +sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features=["serde"] } +sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features=["serde"] } sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } +sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } sp-inherents = { path = "../../../primitives/inherents", default-features = false} node-primitives = { path = "../primitives", default-features = false} sp-mixnet = { path = "../../../primitives/mixnet", default-features = false } sp-offchain = { path = "../../../primitives/offchain", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false, features=["serde"] } sp-std = { path = "../../../primitives/std", default-features = false} sp-api = { path = "../../../primitives/api", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-staking = { path = "../../../primitives/staking", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false, features=["serde"] } +sp-staking = { path = "../../../primitives/staking", default-features = false, features=["serde"] } sp-storage = { path = "../../../primitives/storage", default-features = false} sp-session = { path = "../../../primitives/session", default-features = false} sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} -sp-statement-store = { path = "../../../primitives/statement-store", default-features = false} -sp-version = { path = "../../../primitives/version", default-features = false} +sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features=["serde"] } +sp-version = { path = "../../../primitives/version", default-features = false, features=["serde"] } sp-io = { path = "../../../primitives/io", default-features = false} # frame dependencies @@ -128,6 +129,7 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false} pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false} pallet-uniques = { path = "../../../frame/uniques", default-features = false} pallet-vesting = { path = "../../../frame/vesting", default-features = false} @@ -211,6 +213,7 @@ std = [ "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", + "pallet-skip-feeless-payment/std", "pallet-society/std", "pallet-staking-runtime-api/std", "pallet-staking/std", @@ -228,7 +231,9 @@ std = [ "pallet-utility/std", "pallet-vesting/std", "pallet-whitelist/std", + "primitive-types/std", "scale-info/std", + "serde_json/std", "sp-api/std", "sp-authority-discovery/std", "sp-block-builder/std", @@ -305,6 +310,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -378,6 +384,7 @@ try-runtime = [ "pallet-salary/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 430a1ac2824b806acf7d95715c72cf5448d2a5a6..717fbeadada4fdff2e29a5ca4d7f90c567479bf1 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -26,6 +26,7 @@ use frame_support::{ }; use pallet_alliance::{IdentityVerifier, ProposalIndex, ProposalProvider}; use pallet_asset_tx_payment::HandleCredit; +use pallet_identity::legacy::IdentityField; use sp_std::prelude::*; use crate::{ @@ -56,8 +57,8 @@ impl HandleCredit for CreditToBlockAuthor { pub struct AllianceIdentityVerifier; impl IdentityVerifier for AllianceIdentityVerifier { - fn has_identity(who: &AccountId, fields: u64) -> bool { - crate::Identity::has_identity(who, fields) + fn has_required_identities(who: &AccountId) -> bool { + crate::Identity::has_identity(who, (IdentityField::Display | IdentityField::Web).bits()) } fn has_good_judgement(who: &AccountId) -> bool { diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 2070e3f12d04fd49419f5da032b125f28b6ff4a4..d7beb29becf406ed73b6e5a2870a29f11b86194e 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -60,6 +60,7 @@ use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; +use pallet_identity::legacy::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; @@ -484,7 +485,7 @@ impl pallet_babe::Config for Runtime { type DisabledValidators = Session; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -512,6 +513,8 @@ parameter_types! { } impl pallet_balances::Config for Runtime { + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; @@ -523,8 +526,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; - type MaxHolds = ConstU32<2>; + type MaxHolds = ConstU32<6>; } parameter_types! { @@ -567,6 +569,10 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { pallet_asset_conversion_tx_payment::AssetConversionAdapter; } +impl pallet_skip_feeless_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -626,7 +632,7 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 24 * 28; pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; @@ -661,7 +667,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = ConstU32<256>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -684,8 +690,6 @@ impl pallet_fast_unstake::Config for Runtime { type Currency = Balances; type Staking = Staking; type MaxErasToCheckPerBlock = ConstU32<1>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = MaxNominatorRewardedPerValidator; type WeightInfo = (); } @@ -1012,7 +1016,9 @@ impl pallet_remark::Config for Runtime { type RuntimeEvent = RuntimeEvent; } -impl pallet_root_testing::Config for Runtime {} +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} parameter_types! { pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; @@ -1347,6 +1353,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type Xcm = (); } impl pallet_sudo::Config for Runtime { @@ -1392,7 +1399,11 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), + ), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1449,7 +1460,7 @@ impl pallet_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -1457,8 +1468,10 @@ impl pallet_grandpa::Config for Runtime { } parameter_types! { - pub const BasicDeposit: Balance = 10 * DOLLARS; // 258 bytes on-chain - pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain + // difference of 26 bytes on-chain for the registration and 9 bytes on-chain for the identity + // information, already accounted for by the byte deposit + pub const BasicDeposit: Balance = deposit(1, 17); + pub const ByteDeposit: Balance = deposit(0, 1); pub const SubAccountDeposit: Balance = 2 * DOLLARS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -1469,10 +1482,10 @@ impl pallet_identity::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; + type ByteDeposit = ByteDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; + type IdentityInformation = IdentityInfo; type MaxRegistrars = MaxRegistrars; type Slashed = Treasury; type ForceOrigin = EnsureRootOrHalfCouncil; @@ -1830,6 +1843,7 @@ impl pallet_nfts::Config for Runtime { impl pallet_transaction_storage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; type RuntimeCall = RuntimeCall; type FeeDestination = (); type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; @@ -2129,6 +2143,7 @@ construct_runtime!( Statement: pallet_statement, Broker: pallet_broker, Mixnet: pallet_mixnet, + SkipFeelessPayment: pallet_skip_feeless_payment, } ); @@ -2155,7 +2170,10 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + >, ); /// Unchecked extrinsic type as expected by this runtime. @@ -2176,9 +2194,10 @@ pub type Executive = frame_executive::Executive< >; // All migrations executed on runtime upgrade as a nested tuple of types implementing -// `OnRuntimeUpgrade`. +// `OnRuntimeUpgrade`. Note: These are examples and do not need to be run directly +// after the genesis block. type Migrations = ( - pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, ); @@ -2386,10 +2405,14 @@ impl_runtime_apis! { } } - impl pallet_staking_runtime_api::StakingApi for Runtime { + impl pallet_staking_runtime_api::StakingApi for Runtime { fn nominations_quota(balance: Balance) -> u32 { Staking::api_nominations_quota(balance) } + + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { + Staking::api_eras_stakers_page_count(era, account) + } } impl sp_consensus_babe::BabeApi for Runtime { @@ -2627,10 +2650,10 @@ impl_runtime_apis! { fn system_attribute( collection: u32, - item: u32, + item: Option, key: Vec, ) -> Option> { - >::system_attribute(&collection, &item, &key) + >::system_attribute(&collection, item.as_ref(), &key) } fn collection_attribute(collection: u32, key: Vec) -> Option> { diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index f5a39693301ce6443e9bef6ba20edf5183357b6c..e4fb06b5dcd37960c5522fe79c6aacab1c832431 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -19,13 +19,14 @@ futures = "0.3.21" log = "0.4.17" tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } -node-executor = { path = "../executor" } +node-executor = { package = "staging-node-executor", path = "../executor" } node-primitives = { path = "../primitives" } kitchensink-runtime = { path = "../runtime" } pallet-asset-conversion = { path = "../../../frame/asset-conversion" } pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-client-db = { path = "../../../client/db", features = ["rocksdb"]} diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index f1ab2212239b1937361af8941c89e254c6485a84..89b96c0191ce813a6ba49c1263014da4ff3b1c9d 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -39,8 +39,8 @@ use kitchensink_runtime::{ RuntimeCall, Signature, SystemCall, UncheckedExtrinsic, }; use node_primitives::Block; -use sc_block_builder::BlockBuilderProvider; -use sc_client_api::execution_extensions::ExecutionExtensions; +use sc_block_builder::BlockBuilderBuilder; +use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; use sc_client_db::PruningMode; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod, WasmtimeInstantiationStrategy}; @@ -398,7 +398,7 @@ impl BenchDb { let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( - &keyring.generate_genesis(), + keyring.as_storage_builder(), !client_config.no_genesis, backend.clone(), executor.clone(), @@ -455,8 +455,13 @@ impl BenchDb { /// Generate new block using this database. pub fn generate_block(&mut self, content: BlockContent) -> Block { let client = self.client(); + let chain = client.usage_info().chain; - let mut block = client.new_block(Default::default()).expect("Block creation failed"); + let mut block = BlockBuilderBuilder::new(&client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .expect("Failed to create block builder."); for extrinsic in self.generate_inherents(&client) { block.push(extrinsic).expect("Push inherent failed"); @@ -585,12 +590,20 @@ impl BenchKeyring { } } - /// Generate genesis with accounts from this keyring endowed with some balance. - pub fn generate_genesis(&self) -> kitchensink_runtime::RuntimeGenesisConfig { - crate::genesis::config_endowed( - Some(kitchensink_runtime::wasm_binary_unwrap()), - self.collect_account_ids(), - ) + /// Generate genesis with accounts from this keyring endowed with some balance and + /// kitchensink_runtime code blob. + pub fn as_storage_builder(&self) -> &dyn sp_runtime::BuildStorage { + self + } +} + +impl sp_runtime::BuildStorage for BenchKeyring { + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { + storage.top.insert( + sp_core::storage::well_known_keys::CODE.to_vec(), + kitchensink_runtime::wasm_binary_unwrap().into(), + ); + crate::genesis::config_endowed(self.collect_account_ids()).assimilate_storage(storage) } } diff --git a/substrate/bin/node/testing/src/client.rs b/substrate/bin/node/testing/src/client.rs index c55867360bd62c7e225f6fcddfb333f259bdb19f..22276833fb665c8396d40d8c52763c7484be46ee 100644 --- a/substrate/bin/node/testing/src/client.rs +++ b/substrate/bin/node/testing/src/client.rs @@ -42,7 +42,12 @@ pub struct GenesisParameters; impl substrate_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> Storage { - crate::genesis::config(None).build_storage().unwrap() + let mut storage = crate::genesis::config().build_storage().unwrap(); + storage.top.insert( + sp_core::storage::well_known_keys::CODE.to_vec(), + kitchensink_runtime::wasm_binary_unwrap().into(), + ); + storage } } diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs index ab5311751a55fe39b994f8131e9131bdd3ae2227..eecbf64775b67460e08d27e47c00d9cf28554b19 100644 --- a/substrate/bin/node/testing/src/genesis.rs +++ b/substrate/bin/node/testing/src/genesis.rs @@ -20,22 +20,21 @@ use crate::keyring::*; use kitchensink_runtime::{ - constants::currency::*, wasm_binary_unwrap, AccountId, AssetsConfig, BabeConfig, - BalancesConfig, GluttonConfig, GrandpaConfig, IndicesConfig, RuntimeGenesisConfig, - SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, - BABE_GENESIS_EPOCH_CONFIG, + constants::currency::*, AccountId, AssetsConfig, BabeConfig, BalancesConfig, GluttonConfig, + GrandpaConfig, IndicesConfig, RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, + StakingConfig, BABE_GENESIS_EPOCH_CONFIG, }; use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. -pub fn config(code: Option<&[u8]>) -> RuntimeGenesisConfig { - config_endowed(code, Default::default()) +pub fn config() -> RuntimeGenesisConfig { + config_endowed(Default::default()) } /// Create genesis runtime configuration for tests with some extra /// endowed accounts. -pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> RuntimeGenesisConfig { +pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), @@ -48,10 +47,7 @@ pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> Run endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); RuntimeGenesisConfig { - system: SystemConfig { - code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), - ..Default::default() - }, + system: Default::default(), indices: IndicesConfig { indices: vec![] }, balances: BalancesConfig { balances: endowed }, session: SessionConfig { diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 22a8f5deb19f737537227ae21b4c0c20770f7759..9940077c9da638360e26d9722d61fbebdc6d124c 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -78,7 +78,9 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + ), ) } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index c7690faf7d065dd941f71aaf7f74d0166f8ee7ce..f587989e0039be68f113d62724f4cad23d2e4d73 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "chain-spec-builder" +name = "staging-chain-spec-builder" version = "2.0.0" authors.workspace = true edition.workspace = true @@ -7,7 +7,6 @@ build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -readme = "README.md" publish = false [package.metadata.docs.rs] @@ -21,11 +20,8 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -ansi_term = "0.12.1" clap = { version = "4.4.6", features = ["derive"] } -rand = "0.8" -node-cli = { path = "../../node/cli" } +log = "0.4.17" sc-chain-spec = { path = "../../../client/chain-spec" } -sc-keystore = { path = "../../../client/keystore" } -sp-core = { path = "../../../primitives/core" } -sp-keystore = { path = "../../../primitives/keystore" } +serde_json = "1.0.108" +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 53e11abbf628206a78590696f6d376416b9d9ddf..986293179a91540a2b020aeb39d9285fefef4453 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -17,73 +17,60 @@ // along with this program. If not, see . use chain_spec_builder::{ - generate_authority_keys_and_store, generate_chain_spec, print_seeds, ChainSpecBuilder, + generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd, ConvertToRawCmd, + UpdateCodeCmd, VerifyCmd, }; use clap::Parser; -use node_cli::chain_spec; -use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; -use sp_core::{crypto::Ss58Codec, sr25519}; +use sc_chain_spec::{update_code_in_json_chain_spec, GenericChainSpec}; +use staging_chain_spec_builder as chain_spec_builder; use std::fs; fn main() -> Result<(), String> { - #[cfg(build_type = "debug")] - println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime \ - compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ - the chain spec builder binary in `--release` mode.\n", - ); + sp_tracing::try_init_simple(); let builder = ChainSpecBuilder::parse(); - let chain_spec_path = builder.chain_spec_path().to_path_buf(); + let chain_spec_path = builder.chain_spec_path.to_path_buf(); - let (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) = match builder { - ChainSpecBuilder::Generate { authorities, nominators, endowed, keystore_path, .. } => { - let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric).take(32).map(char::from).collect() - }; - - let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); - let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); - let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); - let sudo_seed = rand_str(); - - print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); - - if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; - } - - let nominator_accounts = nominator_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - - let endowed_accounts = endowed_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); + match builder.command { + ChainSpecBuilderCmd::Create(cmd) => { + let chain_spec_json = generate_chain_spec_for_runtime(&cmd)?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, + ChainSpecBuilderCmd::UpdateCode(UpdateCodeCmd { + ref input_chain_spec, + ref runtime_wasm_path, + }) => { + let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + + let mut chain_spec_json = + serde_json::from_str::(&chain_spec.as_json(false)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; + update_code_in_json_chain_spec( + &mut chain_spec_json, + &fs::read(runtime_wasm_path.as_path()) + .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], + ); + + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, + ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => { + let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; - let sudo_account = - chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); + let chain_spec_json = + serde_json::from_str::(&chain_spec.as_json(true)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; - (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("Conversion to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, + ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => { + let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let _ = serde_json::from_str::(&chain_spec.as_json(true)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; }, - ChainSpecBuilder::New { - authority_seeds, - nominator_accounts, - endowed_accounts, - sudo_account, - .. - } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), }; - - let json = - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account)?; - - fs::write(chain_spec_path, json).map_err(|err| err.to_string()) + Ok(()) } diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 2b88e40ef74fd804a086f83b2903572aef373904..60ec0f1a656b42095d760a3016efb90681f7a242 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -21,223 +21,220 @@ //! A chain-spec is short for `chain-configuration`. See the [`sc-chain-spec`] for more information. //! //! Note that this binary is analogous to the `build-spec` subcommand, contained in typical -//! substrate-based nodes. This particular binary is capable of building a more sophisticated chain -//! specification that can be used with the substrate-node, ie. [`node-cli`]. +//! substrate-based nodes. This particular binary is capable of interacting with +//! [`sp-genesis-builder`] implementation of any provided runtime allowing to build chain-spec JSON +//! files. //! -//! See [`ChainSpecBuilder`] for a list of available commands. +//! See [`ChainSpecBuilderCmd`] for a list of available commands. +//! +//! ## Typical use-cases. +//! ##### Get default config from runtime. +//! +//! Query the default genesis config from the provided `runtime.wasm` and use it in the chain +//! spec. Tool can also store runtime's default genesis config in given file: +//! ```text +//! chain-spec-builder create -r runtime.wasm default /dev/stdout +//! ``` +//! +//! _Note:_ [`GenesisBuilder::create_default_config`][sp-genesis-builder-create] runtime function is called. +//! +//! +//! ##### Generate raw storage chain spec using genesis config patch. +//! +//! Patch the runtime's default genesis config with provided `patch.json` and generate raw +//! storage (`-s`) version of chain spec: +//! ```text +//! chain-spec-builder create -s -r runtime.wasm patch patch.json +//! ``` +//! +//! _Note:_ [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate raw storage chain spec using full genesis config. +//! +//! Build the chain spec using provided full genesis config json file. No defaults will be used: +//! ```text +//! chain-spec-builder create -s -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! _Note_: [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate human readable chain spec using provided genesis config patch. +//! ```text +//! chain-spec-builder create -r runtime.wasm patch patch.json +//! ``` +//! +//! ##### Generate human readable chain spec using provided full genesis config. +//! ```text +//! chain-spec-builder create -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! ##### Extra tools. +//! The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`], [`ConvertToRawCmd`], [`UpdateCodeCmd`]. //! //! [`sc-chain-spec`]: ../sc_chain_spec/index.html //! [`node-cli`]: ../node_cli/index.html +//! [`sp-genesis-builder`]: ../sp_genesis_builder/index.html +//! [sp-genesis-builder-create]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.create_default_config +//! [sp-genesis-builder-build]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.build_config -use std::path::{Path, PathBuf}; - -use ansi_term::Style; -use clap::Parser; +use std::{fs, path::PathBuf}; -use node_cli::chain_spec::{self, AccountId}; -use sc_keystore::LocalKeystore; -use sp_core::crypto::{ByteArray, Ss58Codec}; -use sp_keystore::KeystorePtr; +use clap::{Parser, Subcommand}; +use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; +use serde_json::Value; -/// A utility to easily create a testnet chain spec definition with a given set -/// of authorities and endowed accounts and/or generate random accounts. -#[derive(Parser)] +/// A utility to easily create a chain spec definition. +#[derive(Debug, Parser)] #[command(rename_all = "kebab-case")] -pub enum ChainSpecBuilder { - /// Create a new chain spec with the given authorities, endowed and sudo - /// accounts. - New { - /// Authority key seed. - #[arg(long, short, required = true)] - authority_seeds: Vec, - /// Active nominators (SS58 format), each backing a random subset of the aforementioned - /// authorities. - #[arg(long, short, default_value = "0")] - nominator_accounts: Vec, - /// Endowed account address (SS58 format). - #[arg(long, short)] - endowed_accounts: Vec, - /// Sudo account address (SS58 format). - #[arg(long, short)] - sudo_account: String, - /// The path where the chain spec should be saved. - #[arg(long, short, default_value = "./chain_spec.json")] - chain_spec_path: PathBuf, - }, - /// Create a new chain spec with the given number of authorities and endowed - /// accounts. Random keys will be generated as required. - Generate { - /// The number of authorities. - #[arg(long, short)] - authorities: usize, - /// The number of nominators backing the aforementioned authorities. - /// - /// Will nominate a random subset of `authorities`. - #[arg(long, short, default_value_t = 0)] - nominators: usize, - /// The number of endowed accounts. - #[arg(long, short, default_value_t = 0)] - endowed: usize, - /// The path where the chain spec should be saved. - #[arg(long, short, default_value = "./chain_spec.json")] - chain_spec_path: PathBuf, - /// Path to use when saving generated keystores for each authority. - /// - /// At this path, a new folder will be created for each authority's - /// keystore named `auth-$i` where `i` is the authority index, i.e. - /// `auth-0`, `auth-1`, etc. - #[arg(long, short)] - keystore_path: Option, - }, +pub struct ChainSpecBuilder { + #[command(subcommand)] + pub command: ChainSpecBuilderCmd, + /// The path where the chain spec should be saved. + #[arg(long, short, default_value = "./chain_spec.json")] + pub chain_spec_path: PathBuf, } -impl ChainSpecBuilder { - /// Returns the path where the chain spec should be saved. - pub fn chain_spec_path(&self) -> &Path { - match self { - ChainSpecBuilder::New { chain_spec_path, .. } => chain_spec_path.as_path(), - ChainSpecBuilder::Generate { chain_spec_path, .. } => chain_spec_path.as_path(), - } - } +#[derive(Debug, Subcommand)] +#[command(rename_all = "kebab-case")] +pub enum ChainSpecBuilderCmd { + Create(CreateCmd), + Verify(VerifyCmd), + UpdateCode(UpdateCodeCmd), + ConvertToRaw(ConvertToRawCmd), } -fn genesis_constructor( - authority_seeds: &[String], - nominator_accounts: &[AccountId], - endowed_accounts: &[AccountId], - sudo_account: &AccountId, -) -> chain_spec::RuntimeGenesisConfig { - let authorities = authority_seeds - .iter() - .map(AsRef::as_ref) - .map(chain_spec::authority_keys_from_seed) - .collect::>(); - - chain_spec::testnet_genesis( - authorities, - nominator_accounts.to_vec(), - sudo_account.clone(), - Some(endowed_accounts.to_vec()), - ) +/// Create a new chain spec by interacting with the provided runtime wasm blob. +#[derive(Parser, Debug)] +pub struct CreateCmd { + /// The name of chain. + #[arg(long, short = 'n', default_value = "Custom")] + chain_name: String, + /// The chain id. + #[arg(long, short = 'i', default_value = "custom")] + chain_id: String, + /// The path to runtime wasm blob. + #[arg(long, short)] + runtime_wasm_path: PathBuf, + /// Export chainspec as raw storage. + #[arg(long, short = 's')] + raw_storage: bool, + /// Verify the genesis config. This silently generates the raw storage from genesis config. Any + /// errors will be reported. + #[arg(long, short = 'v')] + verify: bool, + #[command(subcommand)] + action: GenesisBuildAction, } -/// Generate the chain spec using the given seeds and accounts. -pub fn generate_chain_spec( - authority_seeds: Vec, - nominator_accounts: Vec, - endowed_accounts: Vec, - sudo_account: String, -) -> Result { - let parse_account = |address: String| { - AccountId::from_string(&address) - .map_err(|err| format!("Failed to parse account address: {:?}", err)) - }; - - let nominator_accounts = nominator_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let endowed_accounts = endowed_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let sudo_account = parse_account(sudo_account)?; - - let chain_spec = chain_spec::ChainSpec::from_genesis( - "Custom", - "custom", - sc_chain_spec::ChainType::Live, - move || { - genesis_constructor( - &authority_seeds, - &nominator_accounts, - &endowed_accounts, - &sudo_account, - ) - }, - vec![], - None, - None, - None, - None, - Default::default(), - ); - - chain_spec.as_json(false) +#[derive(Subcommand, Debug, Clone)] +enum GenesisBuildAction { + Patch(PatchCmd), + Full(FullCmd), + Default(DefaultCmd), } -/// Generate the authority keys and store them in the given `keystore_path`. -pub fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { - for (n, seed) in seeds.iter().enumerate() { - let keystore: KeystorePtr = - LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) - .map_err(|err| err.to_string())? - .into(); - - let (_, _, grandpa, babe, im_online, authority_discovery, mixnet) = - chain_spec::authority_keys_from_seed(seed); - - let insert_key = |key_type, public| { - keystore - .insert(key_type, &format!("//{}", seed), public) - .map_err(|_| format!("Failed to insert key: {}", grandpa)) - }; - - insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; - - insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; - - insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; - - insert_key( - sp_core::crypto::key_types::AUTHORITY_DISCOVERY, - authority_discovery.as_slice(), - )?; - - insert_key(sp_core::crypto::key_types::MIXNET, mixnet.as_slice())?; - } - - Ok(()) +/// Patches the runtime's default genesis config with provided patch. +#[derive(Parser, Debug, Clone)] +struct PatchCmd { + /// The path to the runtime genesis config patch. + patch_path: PathBuf, } -/// Print the given seeds -pub fn print_seeds( - authority_seeds: &[String], - nominator_seeds: &[String], - endowed_seeds: &[String], - sudo_seed: &str, -) { - let header = Style::new().bold().underline(); - let entry = Style::new().bold(); +/// Build the genesis config for runtime using provided json file. No defaults will be used. +#[derive(Parser, Debug, Clone)] +struct FullCmd { + /// The path to the full runtime genesis config json file. + config_path: PathBuf, +} - println!("{}", header.paint("Authority seeds")); +/// Gets the default genesis config for the runtime and uses it in ChainSpec. Please note that +/// default genesis config may not be valid. For some runtimes initial values should be added there +/// (e.g. session keys, babe epoch). +#[derive(Parser, Debug, Clone)] +struct DefaultCmd { + /// If provided stores the default genesis config json file at given path (in addition to + /// chain-spec). + default_config_path: Option, +} - for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed); - } +/// Updates the code in the provided input chain spec. +/// +/// The code field of the chain spec will be updated with the runtime provided in the +/// command line. This operation supports both plain and raw formats. +#[derive(Parser, Debug, Clone)] +pub struct UpdateCodeCmd { + /// Chain spec to be updated. + pub input_chain_spec: PathBuf, + /// The path to new runtime wasm blob to be stored into chain-spec. + pub runtime_wasm_path: PathBuf, +} - println!("{}", header.paint("Nominator seeds")); +/// Converts the given chain spec into the raw format. +#[derive(Parser, Debug, Clone)] +pub struct ConvertToRawCmd { + /// Chain spec to be converted. + pub input_chain_spec: PathBuf, +} - for (n, seed) in nominator_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("nom-{}:", n)), seed); - } +/// Verifies the provided input chain spec. +/// +/// Silently checks if given input chain spec can be converted to raw. It allows to check if all +/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// fields. +#[derive(Parser, Debug, Clone)] +pub struct VerifyCmd { + /// Chain spec to be verified. + pub input_chain_spec: PathBuf, +} - println!(); +/// Processes `CreateCmd` and returns JSON version of `ChainSpec`. +pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result { + let code = fs::read(cmd.runtime_wasm_path.as_path()) + .map_err(|e| format!("wasm blob shall be readable {e}"))?; + + let builder = GenericChainSpec::<()>::builder(&code[..], Default::default()) + .with_name(&cmd.chain_name[..]) + .with_id(&cmd.chain_id[..]) + .with_chain_type(sc_chain_spec::ChainType::Live); + + let builder = match cmd.action { + GenesisBuildAction::Patch(PatchCmd { ref patch_path }) => { + let patch = fs::read(patch_path.as_path()) + .map_err(|e| format!("patch file {patch_path:?} shall be readable: {e}"))?; + builder.with_genesis_config_patch(serde_json::from_slice::(&patch[..]).map_err( + |e| format!("patch file {patch_path:?} shall contain a valid json: {e}"), + )?) + }, + GenesisBuildAction::Full(FullCmd { ref config_path }) => { + let config = fs::read(config_path.as_path()) + .map_err(|e| format!("config file {config_path:?} shall be readable: {e}"))?; + builder.with_genesis_config(serde_json::from_slice::(&config[..]).map_err( + |e| format!("config file {config_path:?} shall contain a valid json: {e}"), + )?) + }, + GenesisBuildAction::Default(DefaultCmd { ref default_config_path }) => { + let caller: GenesisConfigBuilderRuntimeCaller = + GenesisConfigBuilderRuntimeCaller::new(&code[..]); + let default_config = caller + .get_default_config() + .map_err(|e| format!("getting default config from runtime should work: {e}"))?; + default_config_path.clone().map(|path| { + fs::write(path.as_path(), serde_json::to_string_pretty(&default_config).unwrap()) + .map_err(|err| err.to_string()) + }); + builder.with_genesis_config(default_config) + }, + }; - if !endowed_seeds.is_empty() { - println!("{}", header.paint("Endowed seeds")); - for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed); - } + let chain_spec = builder.build(); - println!(); + match (cmd.verify, cmd.raw_storage) { + (_, true) => chain_spec.as_json(true), + (true, false) => { + chain_spec.as_json(true)?; + println!("Genesis config verification: OK"); + chain_spec.as_json(false) + }, + (false, false) => chain_spec.as_json(false), } - - println!("{}", header.paint("Sudo seed")); - println!("//{}", sudo_seed); } diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index 60e5a9ca935008e81518263b2802b3eb1e90d636..3232c82958727555a44168351309b6039a708d89 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -74,7 +74,7 @@ The output above shows a **secret phrase** (also called **mnemonic phrase**) and **Private Key**). Those 2 secrets are the pieces of information you MUST keep safe and secret. All the other information below can be derived from those secrets. -The output above also show the **public key** and the **Account ID**. Those are the independant from the network where +The output above also show the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for @@ -163,7 +163,7 @@ This time, we properly recovered `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6ia ### Inspecting a key -If you have *some data* about a key, `subkey inpsect` will help you discover more information about it. +If you have *some data* about a key, `subkey inspect` will help you discover more information about it. If you have **secrets** that you would like to verify for instance, you can use: diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index b65a591795549115f7f22e4fa9cf409fcf3ecfbd..1d60fc7f53e3b5903153132fc48a6f8c026983dc 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -19,7 +19,6 @@ futures-timer = "3.0.1" log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-block-builder = { path = "../block-builder" } -sc-client-api = { path = "../api" } sc-proposer-metrics = { path = "../proposer-metrics" } sc-telemetry = { path = "../telemetry" } sc-transaction-pool-api = { path = "../transaction-pool/api" } @@ -32,5 +31,6 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] parking_lot = "0.12.1" +sc-client-api = { path = "../api" } sc-transaction-pool = { path = "../transaction-pool" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 57c2996ab4063143694f3c2b44af93aae74bf775..c07f3e639c3e15b572dbb470c1518b9ddfddae32 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -28,11 +28,10 @@ use futures::{ select, }; use log::{debug, error, info, trace, warn}; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; -use sc_client_api::backend; +use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; @@ -60,7 +59,7 @@ const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50); const LOG_TARGET: &'static str = "basic-authorship"; /// [`Proposer`] factory. -pub struct ProposerFactory { +pub struct ProposerFactory { spawn_handle: Box, /// The client instance. client: Arc, @@ -84,11 +83,11 @@ pub struct ProposerFactory { telemetry: Option, /// When estimating the block size, should the proof be included? include_proof_in_block_size_estimation: bool, - /// phantom member to pin the `Backend`/`ProofRecording` type. - _phantom: PhantomData<(B, PR)>, + /// phantom member to pin the `ProofRecording` type. + _phantom: PhantomData, } -impl ProposerFactory { +impl ProposerFactory { /// Create a new proposer factory. /// /// Proof recording will be disabled when using proposers built by this instance to build @@ -114,7 +113,7 @@ impl ProposerFactory { } } -impl ProposerFactory { +impl ProposerFactory { /// Create a new proposer factory with proof recording enabled. /// /// Each proposer created by this instance will record a proof while building a block. @@ -147,7 +146,7 @@ impl ProposerFactory { } } -impl ProposerFactory { +impl ProposerFactory { /// Set the default block size limit in bytes. /// /// The default value for the block size limit is: @@ -176,29 +175,23 @@ impl ProposerFactory { } } -impl ProposerFactory +impl ProposerFactory where A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, Block: BlockT, - C: BlockBuilderProvider - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + 'static, + C: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, { fn init_with_now( &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer { + ) -> Proposer { let parent_hash = parent_header.hash(); info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); - let proposer = Proposer::<_, _, _, _, PR> { + let proposer = Proposer::<_, _, _, PR> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), parent_hash, @@ -217,22 +210,16 @@ where } } -impl sp_consensus::Environment for ProposerFactory +impl sp_consensus::Environment for ProposerFactory where A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, Block: BlockT, - C: BlockBuilderProvider - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + 'static, + C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { type CreateProposer = future::Ready>; - type Proposer = Proposer; + type Proposer = Proposer; type Error = sp_blockchain::Error; fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { @@ -241,7 +228,7 @@ where } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { spawn_handle: Box, client: Arc, parent_hash: Block::Hash, @@ -253,20 +240,14 @@ pub struct Proposer { include_proof_in_block_size_estimation: bool, soft_deadline_percent: Percent, telemetry: Option, - _phantom: PhantomData<(B, PR)>, + _phantom: PhantomData, } -impl sp_consensus::Proposer for Proposer +impl sp_consensus::Proposer for Proposer where A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, Block: BlockT, - C: BlockBuilderProvider - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + 'static, + C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { @@ -313,17 +294,11 @@ where /// It allows us to increase block utilization. const MAX_SKIPPED_TRANSACTIONS: usize = 8; -impl Proposer +impl Proposer where A: TransactionPool, - B: backend::Backend + Send + Sync + 'static, Block: BlockT, - C: BlockBuilderProvider - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + 'static, + C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { @@ -334,16 +309,19 @@ where deadline: time::Instant, block_size_limit: Option, ) -> Result, sp_blockchain::Error> { - let propose_with_timer = time::Instant::now(); - let mut block_builder = - self.client.new_block_at(self.parent_hash, inherent_digests, PR::ENABLED)?; + let block_timer = time::Instant::now(); + let mut block_builder = BlockBuilderBuilder::new(&*self.client) + .on_parent_block(self.parent_hash) + .with_parent_block_number(self.parent_number) + .with_proof_recording(PR::ENABLED) + .with_inherent_digests(inherent_digests) + .build()?; self.apply_inherents(&mut block_builder, inherent_data)?; // TODO call `after_inherents` and check if we should apply extrinsincs here // - let block_timer = time::Instant::now(); let end_reason = self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?; let (block, storage_changes, proof) = block_builder.build()?.into_inner(); @@ -352,14 +330,14 @@ where let proof = PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; - self.print_summary(&block, end_reason, block_took, propose_with_timer.elapsed()); + self.print_summary(&block, end_reason, block_took, block_timer.elapsed()); Ok(Proposal { block, proof, storage_changes }) } /// Apply all inherents to the block. fn apply_inherents( &self, - block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C, B>, + block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>, inherent_data: InherentData, ) -> Result<(), sp_blockchain::Error> { let create_inherents_start = time::Instant::now(); @@ -403,7 +381,7 @@ where /// Apply as many extrinsics as possible to the block. async fn apply_extrinsics( &self, - block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C, B>, + block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>, deadline: time::Instant, block_size_limit: Option, ) -> Result { @@ -443,6 +421,11 @@ where let pending_tx = if let Some(pending_tx) = pending_iterator.next() { pending_tx } else { + debug!( + target: LOG_TARGET, + "No more transactions, proceeding with proposing." + ); + break EndProposingReason::NoMoreTransactions }; @@ -539,19 +522,24 @@ where } /// Prints a summary and does telemetry + metrics. + /// + /// - `block`: The block that was build. + /// - `end_reason`: Why did we stop producing the block? + /// - `block_took`: How long did it took to produce the actual block? + /// - `propose_took`: How long did the entire proposing took? fn print_summary( &self, block: &Block, end_reason: EndProposingReason, block_took: time::Duration, - propose_with_took: time::Duration, + propose_took: time::Duration, ) { let extrinsics = block.extrinsics(); self.metrics.report(|metrics| { metrics.number_of_transactions.set(extrinsics.len() as u64); metrics.block_constructed.observe(block_took.as_secs_f64()); metrics.report_end_proposing_reason(end_reason); - metrics.create_block_proposal_time.observe(propose_with_took.as_secs_f64()); + metrics.create_block_proposal_time.observe(propose_took.as_secs_f64()); }); let extrinsics_summary = if extrinsics.is_empty() { @@ -967,8 +955,12 @@ mod tests { // Exact block_limit, which includes: // 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic) let block_limit = { - let builder = - client.new_block_at(genesis_header.hash(), Default::default(), true).unwrap(); + let builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(genesis_header.hash()) + .with_parent_block_number(0) + .enable_proof_recording() + .build() + .unwrap(); builder.estimate_block_size(true) + extrinsics[0].encoded_size() }; let block = block_on(proposer.propose( diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index ff2f9635b7a2fae718d7a47deaa5e6e280ec2091..2492c4101b29a8fb2e2e49b6bec4bfa38b4a9a52 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 1878e762748040c158fdbe7875af1eb205b6048b..f62b941fdb1848e879e777b28164d45d6686e2df 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -29,56 +29,147 @@ use codec::Encode; use sp_api::{ - ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, + ApiExt, ApiRef, CallApiAt, Core, ProvideRuntimeApi, StorageChanges, StorageProof, + TransactionOutcome, }; -use sp_blockchain::{ApplyExtrinsicFailed, Error}; +use sp_blockchain::{ApplyExtrinsicFailed, Error, HeaderBackend}; use sp_core::traits::CallContext; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, Digest, }; +use std::marker::PhantomData; -use sc_client_api::backend; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; -/// Used as parameter to [`BlockBuilderProvider`] to express if proof recording should be enabled. -/// -/// When `RecordProof::Yes` is given, all accessed trie nodes should be saved. These recorded -/// trie nodes can be used by a third party to proof this proposal without having access to the -/// full storage. -#[derive(Copy, Clone, PartialEq)] -pub enum RecordProof { - /// `Yes`, record a proof. - Yes, - /// `No`, don't record any proof. - No, +/// A builder for creating an instance of [`BlockBuilder`]. +pub struct BlockBuilderBuilder<'a, B, C> { + call_api_at: &'a C, + _phantom: PhantomData, } -impl RecordProof { - /// Returns if `Self` == `Yes`. - pub fn yes(&self) -> bool { - matches!(self, Self::Yes) +impl<'a, B, C> BlockBuilderBuilder<'a, B, C> +where + B: BlockT, +{ + /// Create a new instance of the builder. + /// + /// `call_api_at`: Something that implements [`CallApiAt`]. + pub fn new(call_api_at: &'a C) -> Self { + Self { call_api_at, _phantom: PhantomData } } -} -/// Will return [`RecordProof::No`] as default value. -impl Default for RecordProof { - fn default() -> Self { - Self::No + /// Specify the parent block to build on top of. + pub fn on_parent_block(self, parent_block: B::Hash) -> BlockBuilderBuilderStage1<'a, B, C> { + BlockBuilderBuilderStage1 { call_api_at: self.call_api_at, parent_block } } } -impl From for RecordProof { - fn from(val: bool) -> Self { - if val { - Self::Yes - } else { - Self::No +/// The second stage of the [`BlockBuilderBuilder`]. +/// +/// This type can not be instantiated directly. To get an instance of it +/// [`BlockBuilderBuilder::new`] needs to be used. +pub struct BlockBuilderBuilderStage1<'a, B: BlockT, C> { + call_api_at: &'a C, + parent_block: B::Hash, +} + +impl<'a, B, C> BlockBuilderBuilderStage1<'a, B, C> +where + B: BlockT, +{ + /// Fetch the parent block number from the given `header_backend`. + /// + /// The parent block number is used to initialize the block number of the new block. + /// + /// Returns an error if the parent block specified in + /// [`on_parent_block`](BlockBuilderBuilder::on_parent_block) does not exist. + pub fn fetch_parent_block_number>( + self, + header_backend: &H, + ) -> Result, Error> { + let parent_number = header_backend.number(self.parent_block)?.ok_or_else(|| { + Error::Backend(format!( + "Could not fetch block number for block: {:?}", + self.parent_block + )) + })?; + + Ok(BlockBuilderBuilderStage2 { + call_api_at: self.call_api_at, + enable_proof_recording: false, + inherent_digests: Default::default(), + parent_block: self.parent_block, + parent_number, + }) + } + + /// Provide the block number for the parent block directly. + /// + /// The parent block is specified in [`on_parent_block`](BlockBuilderBuilder::on_parent_block). + /// The parent block number is used to initialize the block number of the new block. + pub fn with_parent_block_number( + self, + parent_number: NumberFor, + ) -> BlockBuilderBuilderStage2<'a, B, C> { + BlockBuilderBuilderStage2 { + call_api_at: self.call_api_at, + enable_proof_recording: false, + inherent_digests: Default::default(), + parent_block: self.parent_block, + parent_number, } } } +/// The second stage of the [`BlockBuilderBuilder`]. +/// +/// This type can not be instantiated directly. To get an instance of it +/// [`BlockBuilderBuilder::new`] needs to be used. +pub struct BlockBuilderBuilderStage2<'a, B: BlockT, C> { + call_api_at: &'a C, + enable_proof_recording: bool, + inherent_digests: Digest, + parent_block: B::Hash, + parent_number: NumberFor, +} + +impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { + /// Enable proof recording for the block builder. + pub fn enable_proof_recording(mut self) -> Self { + self.enable_proof_recording = true; + self + } + + /// Enable/disable proof recording for the block builder. + pub fn with_proof_recording(mut self, enable: bool) -> Self { + self.enable_proof_recording = enable; + self + } + + /// Build the block with the given inherent digests. + pub fn with_inherent_digests(mut self, inherent_digests: Digest) -> Self { + self.inherent_digests = inherent_digests; + self + } + + /// Create the instance of the [`BlockBuilder`]. + pub fn build(self) -> Result, Error> + where + C: CallApiAt + ProvideRuntimeApi, + C::Api: BlockBuilderApi, + { + BlockBuilder::new( + self.call_api_at, + self.parent_block, + self.parent_number, + self.enable_proof_recording, + self.inherent_digests, + ) + } +} + /// A block that was build by [`BlockBuilder`] plus some additional data. /// /// This additional data includes the `storage_changes`, these changes can be applied to the @@ -101,63 +192,34 @@ impl BuiltBlock { } } -/// Block builder provider -pub trait BlockBuilderProvider -where - Block: BlockT, - B: backend::Backend, - Self: Sized, - RA: ProvideRuntimeApi, -{ - /// Create a new block, built on top of `parent`. - /// - /// When proof recording is enabled, all accessed trie nodes are saved. - /// These recorded trie nodes can be used by a third party to proof the - /// output of this block builder without having access to the full storage. - fn new_block_at>( - &self, - parent: Block::Hash, - inherent_digests: Digest, - record_proof: R, - ) -> sp_blockchain::Result>; - - /// Create a new block, built on the head of the chain. - fn new_block( - &self, - inherent_digests: Digest, - ) -> sp_blockchain::Result>; -} - /// Utility for building new (valid) blocks from a stream of extrinsics. -pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { +pub struct BlockBuilder<'a, Block: BlockT, C: ProvideRuntimeApi + 'a> { extrinsics: Vec, - api: ApiRef<'a, A::Api>, + api: ApiRef<'a, C::Api>, + call_api_at: &'a C, version: u32, parent_hash: Block::Hash, - backend: &'a B, /// The estimated size of the block header. estimated_header_size: usize, } -impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> +impl<'a, Block, C> BlockBuilder<'a, Block, C> where Block: BlockT, - A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + ApiExt, - B: backend::Backend, + C: CallApiAt + ProvideRuntimeApi + 'a, + C::Api: BlockBuilderApi, { /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. /// /// While proof recording is enabled, all accessed trie nodes are saved. /// These recorded trie nodes can be used by a third party to prove the /// output of this block builder without having access to the full storage. - pub fn new( - api: &'a A, + fn new( + call_api_at: &'a C, parent_hash: Block::Hash, parent_number: NumberFor, - record_proof: RecordProof, + record_proof: bool, inherent_digests: Digest, - backend: &'a B, ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), @@ -169,9 +231,9 @@ where let estimated_header_size = header.encoded_size(); - let mut api = api.runtime_api(); + let mut api = call_api_at.runtime_api(); - if record_proof.yes() { + if record_proof { api.record_proof(); } @@ -188,8 +250,8 @@ where extrinsics: Vec::new(), api, version, - backend, estimated_header_size, + call_api_at, }) } @@ -241,7 +303,7 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(self.parent_hash)?; + let state = self.call_api_at.state_at(self.parent_hash)?; let storage_changes = self .api @@ -300,22 +362,18 @@ mod tests { #[test] fn block_building_storage_proof_does_not_include_runtime_by_default() { let builder = substrate_test_runtime_client::TestClientBuilder::new(); - let backend = builder.backend(); let client = builder.build(); let genesis_hash = client.info().best_hash; - let block = BlockBuilder::new( - &client, - genesis_hash, - client.info().best_number, - RecordProof::Yes, - Default::default(), - &*backend, - ) - .unwrap() - .build() - .unwrap(); + let block = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .with_parent_block_number(0) + .enable_proof_recording() + .build() + .unwrap() + .build() + .unwrap(); let proof = block.proof.expect("Proof is build on request"); let genesis_state_root = client.header(genesis_hash).unwrap().unwrap().state_root; @@ -333,18 +391,15 @@ mod tests { #[test] fn failing_extrinsic_rolls_back_changes_in_storage_proof() { let builder = substrate_test_runtime_client::TestClientBuilder::new(); - let backend = builder.backend(); let client = builder.build(); + let genesis_hash = client.info().best_hash; - let mut block_builder = BlockBuilder::new( - &client, - client.info().best_hash, - client.info().best_number, - RecordProof::Yes, - Default::default(), - &*backend, - ) - .unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .with_parent_block_number(0) + .enable_proof_recording() + .build() + .unwrap(); block_builder.push(ExtrinsicBuilder::new_read_and_panic(8).build()).unwrap_err(); @@ -352,15 +407,12 @@ mod tests { let proof_with_panic = block.proof.expect("Proof is build on request").encoded_size(); - let mut block_builder = BlockBuilder::new( - &client, - client.info().best_hash, - client.info().best_number, - RecordProof::Yes, - Default::default(), - &*backend, - ) - .unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .with_parent_block_number(0) + .enable_proof_recording() + .build() + .unwrap(); block_builder.push(ExtrinsicBuilder::new_read(8).build()).unwrap(); @@ -368,17 +420,14 @@ mod tests { let proof_without_panic = block.proof.expect("Proof is build on request").encoded_size(); - let block = BlockBuilder::new( - &client, - client.info().best_hash, - client.info().best_number, - RecordProof::Yes, - Default::default(), - &*backend, - ) - .unwrap() - .build() - .unwrap(); + let block = BlockBuilderBuilder::new(&client) + .on_parent_block(genesis_hash) + .with_parent_block_number(0) + .enable_proof_recording() + .build() + .unwrap() + .build() + .unwrap(); let proof_empty_block = block.proof.expect("Proof is build on request").encoded_size(); diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index e326d84ae50c6c6172965160a8ecd203319f3c0a..5b7cdda8ebe4fc6288df89f546a5cb5ade5627de 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -13,15 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } memmap2 = "0.5.0" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" sc-client-api = { path = "../api" } sc-chain-spec-derive = { path = "derive" } sc-executor = { path = "../executor" } +sp-io = { default-features = false, path = "../../primitives/io" } sc-network = { path = "../network" } sc-telemetry = { path = "../telemetry" } sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } +sp-genesis-builder = { path = "../../primitives/genesis-builder" } sp-runtime = { path = "../../primitives/runtime" } sp-state-machine = { path = "../../primitives/state-machine" } +log = { version = "0.4.17", default-features = false } +array-bytes = { version = "6.1" } +docify = "0.2.0" + +[dev-dependencies] +substrate-test-runtime = { path = "../../test-utils/runtime" } +sp-keyring = { path = "../../primitives/keyring" } +sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } diff --git a/substrate/client/chain-spec/README.md b/substrate/client/chain-spec/README.md index dad1662d3230ec11b1bafa39fd78bf7d64d1ac5f..9f746d2d4ce7b5db0614e6864650be87c7d98836 100644 --- a/substrate/client/chain-spec/README.md +++ b/substrate/client/chain-spec/README.md @@ -1,92 +1,6 @@ Substrate chain configurations. -This crate contains structs and utilities to declare -a runtime-specific configuration file (a.k.a chain spec). - -Basic chain spec type containing all required parameters is -[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.GenericChainSpec.html). It can be extended with -additional options that contain configuration specific to your chain. -Usually the extension is going to be an amalgamate of types exposed -by Substrate core modules. To allow the core modules to retrieve -their configuration from your extension you should use `ChainSpecExtension` -macro exposed by this crate. - -```rust -use std::collections::HashMap; -use sc_chain_spec::{GenericChainSpec, ChainSpecExtension}; - -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] -pub struct MyExtension { - pub known_blocks: HashMap, -} - -pub type MyChainSpec = GenericChainSpec; -``` - -Some parameters may require different values depending on the -current blockchain height (a.k.a. forks). You can use `ChainSpecGroup` -macro and provided [`Forks`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.Forks.html) structure to put -such parameters to your chain spec. -This will allow to override a single parameter starting at specific -block number. - -```rust -use sc_chain_spec::{Forks, ChainSpecGroup, ChainSpecExtension, GenericChainSpec}; - -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] -pub struct ClientParams { - max_block_size: usize, - max_extrinsic_size: usize, -} - -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] -pub struct PoolParams { - max_transaction_size: usize, -} - -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup, ChainSpecExtension)] -pub struct Extension { - pub client: ClientParams, - pub pool: PoolParams, -} - -pub type BlockNumber = u64; - -/// A chain spec supporting forkable `ClientParams`. -pub type MyChainSpec1 = GenericChainSpec>; - -/// A chain spec supporting forkable `Extension`. -pub type MyChainSpec2 = GenericChainSpec>; -``` - -It's also possible to have a set of parameters that is allowed to change -with block numbers (i.e. is forkable), and another set that is not subject to changes. -This is also possible by declaring an extension that contains `Forks` within it. - - -```rust -use serde::{Serialize, Deserialize}; -use sc_chain_spec::{Forks, GenericChainSpec, ChainSpecGroup, ChainSpecExtension}; - -#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] -pub struct ClientParams { - max_block_size: usize, - max_extrinsic_size: usize, -} - -#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] -pub struct PoolParams { - max_transaction_size: usize, -} - -#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecExtension)] -pub struct Extension { - pub client: ClientParams, - #[forks] - pub pool: Forks, -} - -pub type MyChainSpec = GenericChainSpec; -``` +This crate contains structs and utilities to declare a runtime-specific configuration file (a.k.a chain spec). +Refer to crate documentation for details. License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/substrate/client/chain-spec/res/raw_no_code.json b/substrate/client/chain-spec/res/raw_no_code.json new file mode 100644 index 0000000000000000000000000000000000000000..bac7c25582f965c0b4684777c55a87c54cc1cd57 --- /dev/null +++ b/substrate/client/chain-spec/res/raw_no_code.json @@ -0,0 +1,18 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x3a636f6465": "0x010101" + }, + "childrenDefault": {} + } + } +} diff --git a/substrate/client/chain-spec/res/raw_with_code.json b/substrate/client/chain-spec/res/raw_with_code.json new file mode 100644 index 0000000000000000000000000000000000000000..c12cec57ba7a276516e228e97c27a749aef54d65 --- /dev/null +++ b/substrate/client/chain-spec/res/raw_with_code.json @@ -0,0 +1,19 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x3a636f6465": "0x010101" + }, + "childrenDefault": {} + } + }, + "code": "0x060708" +} diff --git a/substrate/client/chain-spec/res/substrate_test_runtime_from_config.json b/substrate/client/chain-spec/res/substrate_test_runtime_from_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d16f26d12540ccaf74442b97002e658d2231eb4d --- /dev/null +++ b/substrate/client/chain-spec/res/substrate_test_runtime_from_config.json @@ -0,0 +1,128 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "config": { + "babe": { + "authorities": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1 + ] + ], + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryPlainSlots", + "c": [ + 3, + 10 + ] + } + }, + "balances": { + "balances": [ + [ + "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", + 100000000000000000 + ], + [ + "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", + 100000000000000000 + ], + [ + "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", + 100000000000000000 + ], + [ + "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", + 100000000000000000 + ], + [ + "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", + 100000000000000000 + ], + [ + "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", + 100000000000000000 + ], + [ + "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", + 100000000000000000 + ], + [ + "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", + 100000000000000000 + ], + [ + "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", + 100000000000000000 + ], + [ + "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", + 100000000000000000 + ], + [ + "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", + 100000000000000000 + ], + [ + "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", + 100000000000000000 + ], + [ + "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", + 100000000000000000 + ], + [ + "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", + 100000000000000000 + ], + [ + "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", + 100000000000000000 + ], + [ + "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", + 100000000000000000 + ], + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 100000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 100000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 100000000000000000 + ] + ] + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" + ] + }, + "system": {} + }, + "code": "0x0" + } + } +} diff --git a/substrate/client/chain-spec/res/substrate_test_runtime_from_config_raw.json b/substrate/client/chain-spec/res/substrate_test_runtime_from_config_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..f033163ab19bcd942170d116831c6cff0a8c5abc --- /dev/null +++ b/substrate/client/chain-spec/res/substrate_test_runtime_from_config_raw.json @@ -0,0 +1,53 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a4890b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22", + "0x1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48010000000000000090b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe220100000000000000", + "0x1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4": "0x00000000", + "0x1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48010000000000000090b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe220100000000000000", + "0x1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef": "0x03000000000000000a0000000000000001", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da901cae4e3edfbb32c91ed3f01ab964f4eeeab50338d8e5176d3141802d7b010a55dadcd5f23cf8aaafa724627e967e90e": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da91b614bd4a126f2d5d294e9a8af9da25248d7e931307afb4b68d8d565d4c66e00d856c6d65f5fed6bb82dcfb60e936c67": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da94b21aff9fe1e8b2fc4b0775b8cbeff28ba8e2c7594dd74730f3ca835e95455d199261897edc9735d602ea29615e2b10b": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da94f9aea1afa791265fae359272badc1cf8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95786a2916fcb81e1bd5dcd81e0d2452884617f575372edb5a36d85c04cdf2e4699f96fe33eb5f94a28c041b88e398d0c": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95b8542d9672c7b7e779cc7c1e6b605691c2115d06120ea2bee32dd601d02f36367564e7ddf84ae2717ca3f097459652e": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da996c30bdbfab640838e6b6d3c33ab4adb4211b79e34ee8072eab506edd4b93a7b85a14c9a05e5cdd056d98e7dbca87730": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da99dc65b1339ec388fbf2ca0cdef51253512c6cfd663203ea16968594f24690338befd906856c4d2f4ef32dad578dba20c": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da99e6eb5abd62f5fd54793da91a47e6af6125d57171ff9241f07acaa1bb6a6103517965cf2cd00e643b27e7599ebccba70": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9b0edae20838083f2cde1c4080db8cf8090b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9d0052993b6f3bd0544fd1f5e4125b9fbde3e789ecd53431fe5c06c12b72137153496dace35c695b5f4d7b41f7ed5763b": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9d6b7e9a5f12bc571053265dade10d3b4b606fc73f57f03cdb4c932d475ab426043e429cecc2ffff0d2672b0df8398c48": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9de1e86a9a8c739864cf3cc5ec2bea59fd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9e1a35f56ee295d39287cbffcfc60c4b346f136b564e1fad55031404dd84e5cd3fa76bfe7cc7599b39d38fd06663bbc0a": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9e2c1dc507e2035edbbd8776c440d870460c57f0008067cc01c5ff9eb2e2f9b3a94299a915a91198bd1021a6c55596f57": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9eca0e653a94f4080f6311b4e7b6934eb2afba9278e30ccf6a6ceb3a8b6e336b70068f045c666f2e7f4f9cc5f47db8972": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9ee8bf7ef90fc56a8aa3b90b344c599550c29b161e27ff8ba45bf6bad4711f326fc506a8803453a4d7e3158e993495f10": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9f5d6f1c082fe63eec7a71fcad00f4a892e3d43b7b0d04e776e69e7be35247cecdac65504c579195731eaf64b7940966e": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9fbf0818841edf110e05228a6379763c4fc3c37459d9bdc61f58a5ebc01e9e2305a19d390c0543dc733861ec3cf1de01f": "0x000000000000000000000000010000000000000000008a5d784563010000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x0000", + "0x3a636f6465": "0x0", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x00003ef1ee275e1a" + }, + "childrenDefault": {} + } + } +} diff --git a/substrate/client/chain-spec/res/substrate_test_runtime_from_patch.json b/substrate/client/chain-spec/res/substrate_test_runtime_from_patch.json new file mode 100644 index 0000000000000000000000000000000000000000..6cef1dac7ac3d56df16b9e65f83d0908971c1c86 --- /dev/null +++ b/substrate/client/chain-spec/res/substrate_test_runtime_from_patch.json @@ -0,0 +1,32 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "patch": { + "babe": { + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryPlainSlots", + "c": [ + 7, + 10 + ] + } + }, + "substrateTest": { + "authorities": [ + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + ] + } + }, + "code": "0x0" + } + } +} diff --git a/substrate/client/chain-spec/res/substrate_test_runtime_from_patch_raw.json b/substrate/client/chain-spec/res/substrate_test_runtime_from_patch_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..9f4b2731649f346d33a8f91d448840956786ac72 --- /dev/null +++ b/substrate/client/chain-spec/res/substrate_test_runtime_from_patch_raw.json @@ -0,0 +1,32 @@ +{ + "name": "TestName", + "id": "test_id", + "chainType": "Local", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": null, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d": "0x081cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d", + "0x1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4": "0x00000000", + "0x1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef": "0x07000000000000000a0000000000000001", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x0000", + "0x3a636f6465": "0x0", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x0000000000000000" + }, + "childrenDefault": {} + } + } +} diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 96e36d8399ed5de6328553200d181c0a661c422d..8d97d941022978198e14015aaa365f12d9b218b6 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -18,8 +18,10 @@ //! Substrate chain configurations. #![warn(missing_docs)] - -use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; +use crate::{ + extension::GetExtension, genesis_config_builder::HostFunctions, ChainType, + GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, RuntimeGenesis, +}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; @@ -29,13 +31,32 @@ use sp_core::{ Bytes, }; use sp_runtime::BuildStorage; -use std::{borrow::Cow, collections::BTreeMap, fs::File, path::PathBuf, sync::Arc}; +use std::{ + borrow::Cow, + collections::{BTreeMap, VecDeque}, + fs::File, + marker::PhantomData, + path::PathBuf, + sync::Arc, +}; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +enum GenesisBuildAction { + Patch(json::Value), + Full(json::Value), +} +#[allow(deprecated)] enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), - Factory(Arc G + Send + Sync>), + /// factory function + code + //Factory and G type parameter shall be removed togheter with `ChainSpec::from_genesis` + Factory(Arc G + Send + Sync>, Vec), Storage(Storage), + /// build action + code + GenesisBuilderApi(GenesisBuildAction, Vec), } impl Clone for GenesisSource { @@ -43,14 +64,17 @@ impl Clone for GenesisSource { match *self { Self::File(ref path) => Self::File(path.clone()), Self::Binary(ref d) => Self::Binary(d.clone()), - Self::Factory(ref f) => Self::Factory(f.clone()), + Self::Factory(ref f, ref c) => Self::Factory(f.clone(), c.clone()), Self::Storage(ref s) => Self::Storage(s.clone()), + Self::GenesisBuilderApi(ref s, ref c) => Self::GenesisBuilderApi(s.clone(), c.clone()), } } } impl GenesisSource { fn resolve(&self) -> Result, String> { + /// helper container for deserializing genesis from the JSON file (ChainSpec JSON file is + /// also supported here) #[derive(Serialize, Deserialize)] struct GenesisContainer { genesis: Genesis, @@ -79,39 +103,43 @@ impl GenesisSource { .map_err(|e| format!("Error parsing embedded file: {}", e))?; Ok(genesis.genesis) }, - Self::Factory(f) => Ok(Genesis::Runtime(f())), - Self::Storage(storage) => { - let top = storage - .top - .iter() - .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) - .collect(); - - let children_default = storage - .children_default - .iter() - .map(|(k, child)| { - ( - StorageKey(k.clone()), - child - .data - .iter() - .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) - .collect(), - ) - }) - .collect(); - - Ok(Genesis::Raw(RawGenesis { top, children_default })) - }, + Self::Factory(f, code) => Ok(Genesis::RuntimeAndCode(RuntimeInnerWrapper { + runtime: f(), + code: code.clone(), + })), + Self::Storage(storage) => Ok(Genesis::Raw(RawGenesis::from(storage.clone()))), + Self::GenesisBuilderApi(GenesisBuildAction::Full(config), code) => + Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Config(config.clone()), + code: code.clone(), + })), + Self::GenesisBuilderApi(GenesisBuildAction::Patch(patch), code) => + Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Patch(patch.clone()), + code: code.clone(), + })), } } } -impl BuildStorage for ChainSpec { +impl BuildStorage for ChainSpec +where + EHF: HostFunctions, +{ fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { match self.genesis.resolve()? { - Genesis::Runtime(gc) => gc.assimilate_storage(storage), + #[allow(deprecated)] + Genesis::Runtime(runtime_genesis_config) => { + runtime_genesis_config.assimilate_storage(storage)?; + }, + #[allow(deprecated)] + Genesis::RuntimeAndCode(RuntimeInnerWrapper { + runtime: runtime_genesis_config, + code, + }) => { + runtime_genesis_config.assimilate_storage(storage)?; + storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); + }, Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => { storage.top.extend(map.into_iter().map(|(k, v)| (k.0, v.0))); children_map.into_iter().for_each(|(k, v)| { @@ -123,13 +151,37 @@ impl BuildStorage for ChainSpec { .data .extend(v.into_iter().map(|(k, v)| (k.0, v.0))); }); - Ok(()) }, // The `StateRootHash` variant exists as a way to keep note that other clients support // it, but Substrate itself isn't capable of loading chain specs with just a hash at the // moment. - Genesis::StateRootHash(_) => Err("Genesis storage in hash format not supported".into()), - } + Genesis::StateRootHash(_) => + return Err("Genesis storage in hash format not supported".into()), + Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Config(config), + code, + }) => { + RuntimeCaller::::new(&code[..]) + .get_storage_for_config(config)? + .assimilate_storage(storage)?; + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); + }, + Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Patch(patch), + code, + }) => { + RuntimeCaller::::new(&code[..]) + .get_storage_for_patch(patch)? + .assimilate_storage(storage)?; + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); + }, + }; + + Ok(()) } } @@ -144,20 +196,98 @@ pub struct RawGenesis { pub children_default: BTreeMap, } +impl From for RawGenesis { + fn from(value: sp_core::storage::Storage) -> Self { + Self { + top: value.top.into_iter().map(|(k, v)| (StorageKey(k), StorageData(v))).collect(), + children_default: value + .children_default + .into_iter() + .map(|(sk, child)| { + ( + StorageKey(sk), + child + .data + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + ) + }) + .collect(), + } + } +} + +/// Inner representation of [`Genesis::RuntimeGenesis`] format +#[derive(Serialize, Deserialize, Debug)] +struct RuntimeGenesisInner { + /// Runtime wasm code, expected to be hex-encoded in JSON. + /// The code shall be capable of parsing `json_blob`. + #[serde(default, with = "sp_core::bytes")] + code: Vec, + /// The patch or full representation of runtime's `RuntimeGenesisConfig` struct. + #[serde(flatten)] + json_blob: RuntimeGenesisConfigJson, +} + +/// Represents two possible variants of the contained JSON blob for the +/// [`Genesis::RuntimeGenesis`] format. +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +enum RuntimeGenesisConfigJson { + /// Represents the explicit and comprehensive runtime genesis config in JSON format. + /// The contained object is a JSON blob that can be parsed by a compatible runtime. + /// + /// Using a full config is useful for when someone wants to ensure that a change in the runtime + /// makes the deserialization fail and not silently add some default values. + Config(json::Value), + /// Represents a patch for the default runtime genesis config in JSON format which is + /// essentially a list of keys that are to be customized in runtime genesis config. + /// The contained value is a JSON blob that can be parsed by a compatible runtime. + Patch(json::Value), +} + +/// Inner variant wrapper for deprecated runtime. +#[derive(Serialize, Deserialize, Debug)] +struct RuntimeInnerWrapper { + /// The native `RuntimeGenesisConfig` struct. + runtime: G, + /// Runtime code. + #[serde(with = "sp_core::bytes")] + code: Vec, +} + +/// Represents the different formats of the genesis state within chain spec JSON blob. #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] enum Genesis { + /// (Deprecated) Contains the JSON representation of G (the native type representing the + /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) + /// without the runtime code. It is required to deserialize the legacy chainspecs genereted + /// with `ChainsSpec::from_genesis` method. Runtime(G), + /// (Deprecated) Contains the JSON representation of G (the native type representing the + /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) + /// and the runtime code. It is required to create and deserialize JSON chainspecs created with + /// deprecated `ChainSpec::from_genesis` method. + RuntimeAndCode(RuntimeInnerWrapper), + /// The genesis storage as raw data. Typically raw key-value entries in state. Raw(RawGenesis), /// State root hash of the genesis storage. StateRootHash(StorageData), + /// Represents the runtime genesis config in JSON format toghether with runtime code. + RuntimeGenesis(RuntimeGenesisInner), } /// A configuration of a client. Does not include runtime storage initialization. +/// Note: `genesis` field is ignored due to way how the chain specification is serialized into +/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies uknown +/// fields. #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] +// we cannot #[serde(deny_unknown_fields)]. Otherwise chain-spec-builder will fail on any +// non-standard spec struct ClientSpec { name: String, id: String, @@ -194,19 +324,160 @@ struct ClientSpec { /// We use `Option` here since `()` is not flattenable by serde. pub type NoExtension = Option<()>; +/// Builder for creating [`ChainSpec`] instances. +pub struct ChainSpecBuilder { + code: Vec, + extensions: E, + name: String, + id: String, + chain_type: ChainType, + genesis_build_action: GenesisBuildAction, + boot_nodes: Option>, + telemetry_endpoints: Option, + protocol_id: Option, + fork_id: Option, + properties: Option, + _genesis: PhantomData<(G, EHF)>, +} + +impl ChainSpecBuilder { + /// Creates a new builder instance with no defaults. + pub fn new(code: &[u8], extensions: E) -> Self { + Self { + code: code.into(), + extensions, + name: "Development".to_string(), + id: "dev".to_string(), + chain_type: ChainType::Local, + genesis_build_action: GenesisBuildAction::Patch(Default::default()), + boot_nodes: None, + telemetry_endpoints: None, + protocol_id: None, + fork_id: None, + properties: None, + _genesis: Default::default(), + } + } + + /// Sets the spec name. + pub fn with_name(mut self, name: &str) -> Self { + self.name = name.into(); + self + } + + /// Sets the spec ID. + pub fn with_id(mut self, id: &str) -> Self { + self.id = id.into(); + self + } + + /// Sets the type of the chain. + pub fn with_chain_type(mut self, chain_type: ChainType) -> Self { + self.chain_type = chain_type; + self + } + + /// Sets a list of bootnode addresses. + pub fn with_boot_nodes(mut self, boot_nodes: Vec) -> Self { + self.boot_nodes = Some(boot_nodes); + self + } + + /// Sets telemetry endpoints. + pub fn with_telemetry_endpoints(mut self, telemetry_endpoints: TelemetryEndpoints) -> Self { + self.telemetry_endpoints = Some(telemetry_endpoints); + self + } + + /// Sets the network protocol ID. + pub fn with_protocol_id(mut self, protocol_id: &str) -> Self { + self.protocol_id = Some(protocol_id.into()); + self + } + + /// Sets an optional network fork identifier. + pub fn with_fork_id(mut self, fork_id: &str) -> Self { + self.fork_id = Some(fork_id.into()); + self + } + + /// Sets additional loosely-typed properties of the chain. + pub fn with_properties(mut self, properties: Properties) -> Self { + self.properties = Some(properties); + self + } + + /// Sets chain spec extensions. + pub fn with_extensions(mut self, extensions: E) -> Self { + self.extensions = extensions; + self + } + + /// Sets the code. + pub fn with_code(mut self, code: &[u8]) -> Self { + self.code = code.into(); + self + } + + /// Sets the JSON patch for runtime's GenesisConfig. + pub fn with_genesis_config_patch(mut self, patch: json::Value) -> Self { + self.genesis_build_action = GenesisBuildAction::Patch(patch); + self + } + + /// Sets the full runtime's GenesisConfig JSON. + pub fn with_genesis_config(mut self, config: json::Value) -> Self { + self.genesis_build_action = GenesisBuildAction::Full(config); + self + } + + /// Builds a [`ChainSpec`] instance using the provided settings. + pub fn build(self) -> ChainSpec { + let client_spec = ClientSpec { + name: self.name, + id: self.id, + chain_type: self.chain_type, + boot_nodes: self.boot_nodes.unwrap_or_default(), + telemetry_endpoints: self.telemetry_endpoints, + protocol_id: self.protocol_id, + fork_id: self.fork_id, + properties: self.properties, + extensions: self.extensions, + consensus_engine: (), + genesis: Default::default(), + code_substitutes: BTreeMap::new(), + }; + + ChainSpec { + client_spec, + genesis: GenesisSource::GenesisBuilderApi(self.genesis_build_action, self.code.into()), + _host_functions: Default::default(), + } + } +} + /// A configuration of a chain. Can be used to build a genesis block. -pub struct ChainSpec { +/// +/// The chain spec is generic over the native `RuntimeGenesisConfig` struct (`G`). It is also +/// possible to parametrize chain spec over the extended host functions (EHF). It should be use if +/// runtime is using the non-standard host function during genesis state creation. +pub struct ChainSpec { client_spec: ClientSpec, genesis: GenesisSource, + _host_functions: PhantomData, } -impl Clone for ChainSpec { +impl Clone for ChainSpec { fn clone(&self) -> Self { - ChainSpec { client_spec: self.client_spec.clone(), genesis: self.genesis.clone() } + ChainSpec { + client_spec: self.client_spec.clone(), + genesis: self.genesis.clone(), + _host_functions: self._host_functions, + } } } -impl ChainSpec { +impl ChainSpec { /// A list of bootnode addresses. pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { &self.client_spec.boot_nodes @@ -260,6 +531,10 @@ impl ChainSpec { } /// Create hardcoded spec. + #[deprecated( + note = "`from_genesis` is planned to be removed in May 2024. Use `builder()` instead." + )] + // deprecated note: Genesis::Runtime + GenesisSource::Factory shall also be removed pub fn from_genesis G + 'static + Send + Sync>( name: &str, id: &str, @@ -271,6 +546,7 @@ impl ChainSpec { fork_id: Option<&str>, properties: Option, extensions: E, + code: &[u8], ) -> Self { let client_spec = ClientSpec { name: name.to_owned(), @@ -287,22 +563,36 @@ impl ChainSpec { code_substitutes: BTreeMap::new(), }; - ChainSpec { client_spec, genesis: GenesisSource::Factory(Arc::new(constructor)) } + ChainSpec { + client_spec, + genesis: GenesisSource::Factory(Arc::new(constructor), code.into()), + _host_functions: Default::default(), + } } /// Type of the chain. fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } + + /// Provides a `ChainSpec` builder. + pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { + ChainSpecBuilder::new(code, extensions) + } } -impl ChainSpec { +impl ChainSpec { /// Parse json content into a `ChainSpec` pub fn from_json_bytes(json: impl Into>) -> Result { let json = json.into(); let client_spec = json::from_slice(json.as_ref()) .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { client_spec, genesis: GenesisSource::Binary(json) }) + + Ok(ChainSpec { + client_spec, + genesis: GenesisSource::Binary(json), + _host_functions: Default::default(), + }) } /// Parse json file into a `ChainSpec` @@ -318,60 +608,94 @@ impl ChainSpec { memmap2::Mmap::map(&file) .map_err(|e| format!("Error mmaping spec file `{}`: {}", path.display(), e))? }; - let client_spec = json::from_slice(&bytes).map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { client_spec, genesis: GenesisSource::File(path) }) + + Ok(ChainSpec { + client_spec, + genesis: GenesisSource::File(path), + _host_functions: Default::default(), + }) } } +/// Helper structure for serializing (and only serializing) the ChainSpec into JSON file. It +/// represents the layout of `ChainSpec` JSON file. #[derive(Serialize, Deserialize)] -struct JsonContainer { +// we cannot #[serde(deny_unknown_fields)]. Otherwise chain-spec-builder will fail on any +// non-standard spec. +struct ChainSpecJsonContainer { #[serde(flatten)] client_spec: ClientSpec, genesis: Genesis, } -impl ChainSpec { - fn json_container(&self, raw: bool) -> Result, String> { - let genesis = match (raw, self.genesis.resolve()?) { +impl ChainSpec +where + EHF: HostFunctions, +{ + fn json_container(&self, raw: bool) -> Result, String> { + let raw_genesis = match (raw, self.genesis.resolve()?) { + ( + true, + Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Config(config), + code, + }), + ) => { + let mut storage = + RuntimeCaller::::new(&code[..]).get_storage_for_config(config)?; + storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); + RawGenesis::from(storage) + }, + ( + true, + Genesis::RuntimeGenesis(RuntimeGenesisInner { + json_blob: RuntimeGenesisConfigJson::Patch(patch), + code, + }), + ) => { + let mut storage = + RuntimeCaller::::new(&code[..]).get_storage_for_patch(patch)?; + storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); + RawGenesis::from(storage) + }, + + #[allow(deprecated)] + (true, Genesis::RuntimeAndCode(RuntimeInnerWrapper { runtime: g, code })) => { + let mut storage = g.build_storage()?; + storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); + RawGenesis::from(storage) + }, + #[allow(deprecated)] (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; - let top = - storage.top.into_iter().map(|(k, v)| (StorageKey(k), StorageData(v))).collect(); - let children_default = storage - .children_default - .into_iter() - .map(|(sk, child)| { - ( - StorageKey(sk), - child - .data - .into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - ) - }) - .collect(); - - Genesis::Raw(RawGenesis { top, children_default }) + RawGenesis::from(storage) }, - (_, genesis) => genesis, + (true, Genesis::Raw(raw)) => raw, + + (_, genesis) => + return Ok(ChainSpecJsonContainer { client_spec: self.client_spec.clone(), genesis }), }; - Ok(JsonContainer { client_spec: self.client_spec.clone(), genesis }) + + Ok(ChainSpecJsonContainer { + client_spec: self.client_spec.clone(), + genesis: Genesis::Raw(raw_genesis), + }) } - /// Dump to json string. + /// Dump the chain specification to JSON string. pub fn as_json(&self, raw: bool) -> Result { let container = self.json_container(raw)?; json::to_string_pretty(&container).map_err(|e| format!("Error generating spec json: {}", e)) } } -impl crate::ChainSpec for ChainSpec +impl crate::ChainSpec for ChainSpec where G: RuntimeGenesis + 'static, E: GetExtension + serde::Serialize + Clone + Send + Sync + 'static, + EHF: HostFunctions, { fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { ChainSpec::boot_nodes(self) @@ -442,9 +766,87 @@ where } } +/// The `fun` will be called with the value at `path`. +/// +/// If exists, the value at given `path` will be passed to the `fun` and the result of `fun` +/// call will be returned. Otherwise false is returned. +/// `path` will be modified. +/// +/// # Examples +/// ```ignore +/// use serde_json::{from_str, json, Value}; +/// let doc = json!({"a":{"b":{"c":"5"}}}); +/// let mut path = ["a", "b", "c"].into(); +/// assert!(json_eval_value_at_key(&doc, &mut path, &|v| { assert_eq!(v,"5"); true })); +/// ``` +fn json_eval_value_at_key( + doc: &json::Value, + path: &mut VecDeque<&str>, + fun: &dyn Fn(&json::Value) -> bool, +) -> bool { + let Some(key) = path.pop_front() else { + return false; + }; + + if path.is_empty() { + doc.as_object().map_or(false, |o| o.get(key).map_or(false, |v| fun(v))) + } else { + doc.as_object() + .map_or(false, |o| o.get(key).map_or(false, |v| json_eval_value_at_key(v, path, fun))) + } +} + +macro_rules! json_path { + [ $($x:expr),+ ] => { + VecDeque::<&str>::from([$($x),+]) + }; +} + +fn json_contains_path(doc: &json::Value, path: &mut VecDeque<&str>) -> bool { + json_eval_value_at_key(doc, path, &|_| true) +} + +/// This function updates the code in given chain spec. +/// +/// Function support updating the runtime code in provided JSON chain spec blob. `Genesis::Raw` +/// and `Genesis::RuntimeGenesis` formats are supported. +/// +/// If update was successful `true` is returned, otherwise `false`. Chain spec JSON is modified in +/// place. +pub fn update_code_in_json_chain_spec(chain_spec: &mut json::Value, code: &[u8]) -> bool { + let mut path = json_path!["genesis", "runtimeGenesis", "code"]; + let mut raw_path = json_path!["genesis", "raw", "top"]; + + if json_contains_path(&chain_spec, &mut path) { + #[derive(Serialize)] + struct Container<'a> { + #[serde(with = "sp_core::bytes")] + code: &'a [u8], + } + let code_patch = json::json!({"genesis":{"runtimeGenesis": Container { code }}}); + crate::json_patch::merge(chain_spec, code_patch); + true + } else if json_contains_path(&chain_spec, &mut raw_path) { + #[derive(Serialize)] + struct Container<'a> { + #[serde(with = "sp_core::bytes", rename = "0x3a636f6465")] + code: &'a [u8], + } + let code_patch = json::json!({"genesis":{"raw":{"top": Container { code }}}}); + crate::json_patch::merge(chain_spec, code_patch); + true + } else { + false + } +} + #[cfg(test)] mod tests { use super::*; + use serde_json::{from_str, json, Value}; + use sp_application_crypto::Ss58Codec; + use sp_core::storage::well_known_keys; + use sp_keyring::AccountKeyring; #[derive(Debug, Serialize, Deserialize)] struct Genesis(BTreeMap); @@ -536,4 +938,336 @@ mod tests { ); } } + + #[test] + // some tests for json path utils + fn test_json_eval_value_at_key() { + let doc = json!({"a":{"b1":"20","b":{"c":{"d":"10"}}}}); + + assert!(json_eval_value_at_key(&doc, &mut json_path!["a", "b1"], &|v| { *v == "20" })); + assert!(json_eval_value_at_key(&doc, &mut json_path!["a", "b", "c", "d"], &|v| { + *v == "10" + })); + assert!(!json_eval_value_at_key(&doc, &mut json_path!["a", "c", "d"], &|_| { true })); + assert!(!json_eval_value_at_key(&doc, &mut json_path!["d"], &|_| { true })); + + assert!(json_contains_path(&doc, &mut json_path!["a", "b1"])); + assert!(json_contains_path(&doc, &mut json_path!["a", "b"])); + assert!(json_contains_path(&doc, &mut json_path!["a", "b", "c"])); + assert!(json_contains_path(&doc, &mut json_path!["a", "b", "c", "d"])); + assert!(!json_contains_path(&doc, &mut json_path!["a", "b", "c", "d", "e"])); + assert!(!json_contains_path(&doc, &mut json_path!["a", "b", "b1"])); + assert!(!json_contains_path(&doc, &mut json_path!["d"])); + } + + fn zeroize_code_key_in_json(encoded: bool, json: &str) -> Value { + let mut json = from_str::(json).unwrap(); + let (zeroing_patch, mut path) = if encoded { + ( + json!({"genesis":{"raw":{"top":{"0x3a636f6465":"0x0"}}}}), + json_path!["genesis", "raw", "top", "0x3a636f6465"], + ) + } else { + ( + json!({"genesis":{"runtimeGenesis":{"code":"0x0"}}}), + json_path!["genesis", "runtimeGenesis", "code"], + ) + }; + assert!(json_contains_path(&json, &mut path)); + crate::json_patch::merge(&mut json, zeroing_patch); + json + } + + #[docify::export] + #[test] + fn build_chain_spec_with_patch_works() { + let output = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(json!({ + "babe": { + "epochConfig": { + "c": [ + 7, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() + ], + } + })) + .build(); + + let raw_chain_spec = output.as_json(true); + assert!(raw_chain_spec.is_ok()); + } + + #[docify::export] + #[test] + fn generate_chain_spec_with_patch_works() { + let output = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(json!({ + "babe": { + "epochConfig": { + "c": [ + 7, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() + ], + } + })) + .build(); + + let actual = output.as_json(false).unwrap(); + let actual_raw = output.as_json(true).unwrap(); + + let expected = + from_str::(include_str!("../res/substrate_test_runtime_from_patch.json")) + .unwrap(); + let expected_raw = + from_str::(include_str!("../res/substrate_test_runtime_from_patch_raw.json")) + .unwrap(); + + //wasm blob may change overtime so let's zero it. Also ensure it is there: + let actual = zeroize_code_key_in_json(false, actual.as_str()); + let actual_raw = zeroize_code_key_in_json(true, actual_raw.as_str()); + + assert_eq!(actual, expected); + assert_eq!(actual_raw, expected_raw); + } + + #[test] + fn generate_chain_spec_with_full_config_works() { + let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); + let output = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config(from_str(j).unwrap()) + .build(); + + let actual = output.as_json(false).unwrap(); + let actual_raw = output.as_json(true).unwrap(); + + let expected = + from_str::(include_str!("../res/substrate_test_runtime_from_config.json")) + .unwrap(); + let expected_raw = + from_str::(include_str!("../res/substrate_test_runtime_from_config_raw.json")) + .unwrap(); + + //wasm blob may change overtime so let's zero it. Also ensure it is there: + let actual = zeroize_code_key_in_json(false, actual.as_str()); + let actual_raw = zeroize_code_key_in_json(true, actual_raw.as_str()); + + assert_eq!(actual, expected); + assert_eq!(actual_raw, expected_raw); + } + + #[test] + fn chain_spec_as_json_fails_with_invalid_config() { + let j = + include_str!("../../../test-utils/runtime/res/default_genesis_config_invalid_2.json"); + let output = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config(from_str(j).unwrap()) + .build(); + + assert_eq!( + output.as_json(true), + Err("Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 1 column 8".to_string()) + ); + } + + #[test] + fn chain_spec_as_json_fails_with_invalid_patch() { + let output = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(json!({ + "invalid_pallet": {}, + "substrateTest": { + "authorities": [ + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() + ], + } + })) + .build(); + + assert!(output.as_json(true).unwrap_err().contains("Invalid JSON blob: unknown field `invalid_pallet`, expected one of `system`, `babe`, `substrateTest`, `balances`")); + } + + #[test] + fn check_if_code_is_valid_for_raw_without_code() { + let spec = ChainSpec::<()>::from_json_bytes(Cow::Owned( + include_bytes!("../res/raw_no_code.json").to_vec(), + )) + .unwrap(); + + let j = from_str::(&spec.as_json(true).unwrap()).unwrap(); + + assert!(json_eval_value_at_key( + &j, + &mut json_path!["genesis", "raw", "top", "0x3a636f6465"], + &|v| { *v == "0x010101" } + )); + assert!(!json_contains_path(&j, &mut json_path!["code"])); + } + + #[test] + fn check_code_in_assimilated_storage_for_raw_without_code() { + let spec = ChainSpec::<()>::from_json_bytes(Cow::Owned( + include_bytes!("../res/raw_no_code.json").to_vec(), + )) + .unwrap(); + + let storage = spec.build_storage().unwrap(); + assert!(storage + .top + .get(&well_known_keys::CODE.to_vec()) + .map(|v| *v == vec![1, 1, 1]) + .unwrap()) + } + + #[test] + fn update_code_works_with_runtime_genesis_config() { + let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); + let chain_spec = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config(from_str(j).unwrap()) + .build(); + + let mut chain_spec_json = from_str::(&chain_spec.as_json(false).unwrap()).unwrap(); + assert!(update_code_in_json_chain_spec(&mut chain_spec_json, &[0, 1, 2, 4, 5, 6])); + + assert!(json_eval_value_at_key( + &chain_spec_json, + &mut json_path!["genesis", "runtimeGenesis", "code"], + &|v| { *v == "0x000102040506" } + )); + } + + #[test] + fn update_code_works_for_raw() { + let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); + let chain_spec = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config(from_str(j).unwrap()) + .build(); + + let mut chain_spec_json = from_str::(&chain_spec.as_json(true).unwrap()).unwrap(); + assert!(update_code_in_json_chain_spec(&mut chain_spec_json, &[0, 1, 2, 4, 5, 6])); + + assert!(json_eval_value_at_key( + &chain_spec_json, + &mut json_path!["genesis", "raw", "top", "0x3a636f6465"], + &|v| { *v == "0x000102040506" } + )); + } + + #[test] + fn update_code_works_with_runtime_genesis_patch() { + let chain_spec = ChainSpec::<()>::builder( + substrate_test_runtime::wasm_binary_unwrap().into(), + Default::default(), + ) + .with_name("TestName") + .with_id("test_id") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(json!({})) + .build(); + + let mut chain_spec_json = from_str::(&chain_spec.as_json(false).unwrap()).unwrap(); + assert!(update_code_in_json_chain_spec(&mut chain_spec_json, &[0, 1, 2, 4, 5, 6])); + + assert!(json_eval_value_at_key( + &chain_spec_json, + &mut json_path!["genesis", "runtimeGenesis", "code"], + &|v| { *v == "0x000102040506" } + )); + } + + #[test] + fn generate_from_genesis_is_still_supported() { + #[allow(deprecated)] + let chain_spec: ChainSpec = ChainSpec::from_genesis( + "TestName", + "test", + ChainType::Local, + move || substrate_test_runtime::RuntimeGenesisConfig { + babe: substrate_test_runtime::BabeConfig { + epoch_config: Some( + substrate_test_runtime::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION, + ), + ..Default::default() + }, + ..Default::default() + }, + Vec::new(), + None, + None, + None, + None, + Default::default(), + &vec![0, 1, 2, 4, 5, 6], + ); + + let chain_spec_json = from_str::(&chain_spec.as_json(false).unwrap()).unwrap(); + assert!(json_eval_value_at_key( + &chain_spec_json, + &mut json_path!["genesis", "runtimeAndCode", "code"], + &|v| { *v == "0x000102040506" } + )); + let chain_spec_json = from_str::(&chain_spec.as_json(true).unwrap()).unwrap(); + assert!(json_eval_value_at_key( + &chain_spec_json, + &mut json_path!["genesis", "raw", "top", "0x3a636f6465"], + &|v| { *v == "0x000102040506" } + )); + } } diff --git a/substrate/client/chain-spec/src/genesis.rs b/substrate/client/chain-spec/src/genesis_block.rs similarity index 100% rename from substrate/client/chain-spec/src/genesis.rs rename to substrate/client/chain-spec/src/genesis_block.rs diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ccf6b4efb203c6942cb28f1b8040f2982f3c119 --- /dev/null +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -0,0 +1,196 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A helper module for calling the GenesisBuilder API from arbitrary runtime wasm blobs. + +use codec::{Decode, Encode}; +pub use sc_executor::sp_wasm_interface::HostFunctions; +use sc_executor::{error::Result, WasmExecutor}; +use serde_json::{from_slice, Value}; +use sp_core::{ + storage::Storage, + traits::{CallContext, CodeExecutor, Externalities, FetchRuntimeCode, RuntimeCode}, +}; +use sp_genesis_builder::Result as BuildResult; +use sp_state_machine::BasicExternalities; +use std::borrow::Cow; + +/// A utility that facilitates calling the GenesisBuilder API from the runtime wasm code blob. +/// +/// `EHF` type allows to specify the extended host function required for building runtime's genesis +/// config. The type will be compbined with default `sp_io::SubstrateHostFunctions`. +pub struct GenesisConfigBuilderRuntimeCaller<'a, EHF = ()> +where + EHF: HostFunctions, +{ + code: Cow<'a, [u8]>, + code_hash: Vec, + executor: WasmExecutor<(sp_io::SubstrateHostFunctions, EHF)>, +} + +impl<'a, EHF> FetchRuntimeCode for GenesisConfigBuilderRuntimeCaller<'a, EHF> +where + EHF: HostFunctions, +{ + fn fetch_runtime_code(&self) -> Option> { + Some(self.code.as_ref().into()) + } +} + +impl<'a, EHF> GenesisConfigBuilderRuntimeCaller<'a, EHF> +where + EHF: HostFunctions, +{ + /// Creates new instance using the provided code blob. + /// + /// This code is later referred to as `runtime`. + pub fn new(code: &'a [u8]) -> Self { + GenesisConfigBuilderRuntimeCaller { + code: code.into(), + code_hash: sp_core::blake2_256(code).to_vec(), + executor: WasmExecutor::<(sp_io::SubstrateHostFunctions, EHF)>::builder() + .with_allow_missing_host_functions(true) + .build(), + } + } + + fn call(&self, ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result> { + self.executor + .call( + ext, + &RuntimeCode { heap_pages: None, code_fetcher: self, hash: self.code_hash.clone() }, + method, + data, + false, + CallContext::Offchain, + ) + .0 + } + + /// Returns the default `GenesisConfig` provided by the `runtime`. + /// + /// Calls [`GenesisBuilder::create_default_config`](sp_genesis_builder::GenesisBuilder::create_default_config) in the `runtime`. + pub fn get_default_config(&self) -> core::result::Result { + let mut t = BasicExternalities::new_empty(); + let call_result = self + .call(&mut t, "GenesisBuilder_create_default_config", &[]) + .map_err(|e| format!("wasm call error {e}"))?; + let default_config = Vec::::decode(&mut &call_result[..]) + .map_err(|e| format!("scale codec error: {e}"))?; + Ok(from_slice(&default_config[..]).expect("returned value is json. qed.")) + } + + /// Build the given `GenesisConfig` and returns the genesis state. + /// + /// Calls [`GenesisBuilder::build_config`](sp_genesis_builder::GenesisBuilder::build_config) + /// provided by the `runtime`. + pub fn get_storage_for_config(&self, config: Value) -> core::result::Result { + let mut ext = BasicExternalities::new_empty(); + + let call_result = self + .call(&mut ext, "GenesisBuilder_build_config", &config.to_string().encode()) + .map_err(|e| format!("wasm call error {e}"))?; + + BuildResult::decode(&mut &call_result[..]) + .map_err(|e| format!("scale codec error: {e}"))??; + + Ok(ext.into_storages()) + } + + /// Creates the genesis state by patching the default `GenesisConfig` and applying it. + /// + /// This function generates the `GenesisConfig` for the runtime by applying a provided JSON + /// patch. The patch modifies the default `GenesisConfig` allowing customization of the specific + /// keys. The resulting `GenesisConfig` is then deserialized from the patched JSON + /// representation and stored in the storage. + /// + /// If the provided JSON patch is incorrect or the deserialization fails the error will be + /// returned. + /// + /// The patching process modifies the default `GenesisConfig` according to the following rules: + /// 1. Existing keys in the default configuration will be overridden by the corresponding values + /// in the patch. + /// 2. If a key exists in the patch but not in the default configuration, it will be added to + /// the resulting `GenesisConfig`. + /// 3. Keys in the default configuration that have null values in the patch will be removed from + /// the resulting `GenesisConfig`. This is helpful for changing enum variant value. + /// + /// Please note that the patch may contain full `GenesisConfig`. + pub fn get_storage_for_patch(&self, patch: Value) -> core::result::Result { + let mut config = self.get_default_config()?; + crate::json_patch::merge(&mut config, patch); + self.get_storage_for_config(config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::{from_str, json}; + pub use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; + + #[test] + fn get_default_config_works() { + let config = + ::new(substrate_test_runtime::wasm_binary_unwrap()) + .get_default_config() + .unwrap(); + let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + assert_eq!(from_str::(expected).unwrap(), config); + } + + #[test] + fn get_storage_for_patch_works() { + let patch = json!({ + "babe": { + "epochConfig": { + "c": [ + 69, + 696 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + }); + + let storage = + ::new(substrate_test_runtime::wasm_binary_unwrap()) + .get_storage_for_patch(patch) + .unwrap(); + + //Babe|Authorities + let value: Vec = storage + .top + .get( + &array_bytes::hex2bytes( + "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", + ) + .unwrap(), + ) + .unwrap() + .clone(); + + assert_eq!( + BabeEpochConfiguration::decode(&mut &value[..]).unwrap(), + BabeEpochConfiguration { + c: (69, 696), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots + } + ); + } +} diff --git a/substrate/client/chain-spec/src/json_patch.rs b/substrate/client/chain-spec/src/json_patch.rs new file mode 100644 index 0000000000000000000000000000000000000000..c3930069a60d029114c42fedaaeb1cffa7aa3319 --- /dev/null +++ b/substrate/client/chain-spec/src/json_patch.rs @@ -0,0 +1,191 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A helper module providing json patching functions. + +use serde_json::Value; + +/// Recursively merges two JSON objects, `a` and `b`, into a single object. +/// +/// If a key exists in both objects, the value from `b` will override the value from `a`. +/// If a key exists in `b` with a `null` value, it will be removed from `a`. +/// If a key exists only in `b` and not in `a`, it will be added to `a`. +/// +/// # Arguments +/// +/// * `a` - A mutable reference to the target JSON object to merge into. +/// * `b` - The JSON object to merge with `a`. +pub fn merge(a: &mut Value, b: Value) { + match (a, b) { + (Value::Object(a), Value::Object(b)) => + for (k, v) in b { + if v.is_null() { + a.remove(&k); + } else { + merge(a.entry(k).or_insert(Value::Null), v); + } + }, + (a, b) => *a = b, + }; +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test1_simple_merge() { + let mut j1 = json!({ "a":123 }); + merge(&mut j1, json!({ "b":256 })); + assert_eq!(j1, json!({ "a":123, "b":256 })); + } + + #[test] + fn test2_patch_simple_merge_nested() { + let mut j1 = json!({ + "a": { + "name": "xxx", + "value": 123 + }, + "b": { "c" : { "inner_name": "yyy" } } + }); + + let j2 = json!({ + "a": { + "keys": ["a", "b", "c" ] + } + }); + + merge(&mut j1, j2); + assert_eq!( + j1, + json!({"a":{"keys":["a","b","c"],"name":"xxx","value":123}, "b": { "c" : { "inner_name": "yyy" } }}) + ); + } + + #[test] + fn test3_patch_overrides_existing_keys() { + let mut j1 = json!({ + "a": { + "name": "xxx", + "value": 123, + "keys": ["d"] + + } + }); + + let j2 = json!({ + "a": { + "keys": ["a", "b", "c" ] + } + }); + + merge(&mut j1, j2); + assert_eq!(j1, json!({"a":{"keys":["a","b","c"],"name":"xxx","value":123}})); + } + + #[test] + fn test4_patch_overrides_existing_keys() { + let mut j1 = json!({ + "a": { + "name": "xxx", + "value": 123, + "b" : { + "inner_name": "yyy" + } + } + }); + + let j2 = json!({ + "a": { + "name": "new_name", + "b" : { + "inner_name": "inner_new_name" + } + } + }); + + merge(&mut j1, j2); + assert_eq!( + j1, + json!({ "a": {"name":"new_name", "value":123, "b" : { "inner_name": "inner_new_name" }} }) + ); + } + + #[test] + fn test5_patch_overrides_existing_nested_keys() { + let mut j1 = json!({ + "a": { + "name": "xxx", + "value": 123, + "b": { + "c": { + "d": { + "name": "yyy", + "value": 256 + } + } + } + }, + }); + + let j2 = json!({ + "a": { + "value": 456, + "b": { + "c": { + "d": { + "name": "new_name" + } + } + } + } + }); + + merge(&mut j1, j2); + assert_eq!( + j1, + json!({ "a": {"name":"xxx", "value":456, "b": { "c": { "d": { "name": "new_name", "value": 256 }}}}}) + ); + } + + #[test] + fn test6_patch_removes_keys_if_null() { + let mut j1 = json!({ + "a": { + "name": "xxx", + "value": 123, + "enum_variant_1": { + "name": "yyy", + } + }, + }); + + let j2 = json!({ + "a": { + "value": 456, + "enum_variant_1": null, + "enum_variant_2": 32, + } + }); + + merge(&mut j1, j2); + assert_eq!(j1, json!({ "a": {"name":"xxx", "value":456, "enum_variant_2": 32 }})); + } +} diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index 341c5f28e4a443129986042013366007346d7f58..6a922e7b40b264505f76f4553fcaa0ae211e069a 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -16,38 +16,253 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Substrate chain configurations. +//! This crate includes structs and utilities for defining configuration files (known as chain +//! specification) for both runtime and node. +//! +//! # Intro: Chain Specification +//! +//! The chain specification comprises parameters and settings that define the properties and an +//! initial state of a chain. Users typically interact with the JSON representation of the chain +//! spec. Internally, the chain spec is embodied by the [`GenericChainSpec`] struct, and specific +//! properties can be accessed using the [`ChainSpec`] trait. +//! +//! In summary, although not restricted to, the primary role of the chain spec is to provide a list +//! of well-known boot nodes for the blockchain network and the means for initializing the genesis +//! storage. This initialization is necessary for creating a genesis block upon which subsequent +//! blocks are built. When the node is launched for the first time, it reads the chain spec, +//! initializes the genesis block, and establishes connections with the boot nodes. +//! +//! The JSON chain spec is divided into two main logical sections: +//! - one section details general chain properties, +//! - second explicitly or indirectly defines the genesis storage, which, in turn, determines the +//! genesis hash of the chain, +//! +//! The chain specification consists of the following fields: +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//!
Chain spec keyDescription
nameThe human readable name of the chain.
idThe id of the chain.
chainTypeThe chain type of this chain +//! (refer to +//! +//! ChainType +//! ). +//!
bootNodesA list of +//! multi addresses +//! that belong to boot nodes of the chain.
telemetryEndpointsOptional list of multi address, verbosity of telemetry endpoints. The +//! verbosity goes from 0 to 9. With 0 being the mode with the lowest verbosity.
protocolIdOptional networking protocol id that identifies the chain.
forkIdOptional fork id. Should most likely be left empty. Can be used to signal a fork on +//! the network level when two chains have the same genesis hash.
propertiesCustom properties. Shall be provided in the form of +//! key-value json object. +//!
consensusEngineDeprecated field. Should be ignored.
codeSubstitutesOptional map of block_number to wasm_code. More details in +//! material to follow.
genesisDefines the initial state of the runtime. More details in material to follow.
+//! +//! # `genesis`: Initial Runtime State +//! +//! All nodes in the network must build subsequent blocks upon exactly the same genesis block. +//! +//! The information configured in the `genesis` section of a chain specification is used to build +//! the genesis storage, which is essential for creating the genesis block, since the block header +//! includes the storage root hash. +//! +//! The `genesis` key of the chain specification definition describes the +//! initial state of the runtime. For example, it may contain: +//! - an initial list of funded accounts, +//! - the administrative account that controls the sudo key, +//! - an initial authorities set for consensus, etc. +//! +//! As the compiled WASM blob of the runtime code is stored in the chain's state, the initial +//! runtime must also be provided within the chain specification. +//! +//! In essence, the most important formats of genesis initial state are: +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//!
FormatDescription
+//! runtime / full config +//! A JSON object that provides an explicit and comprehensive representation of the +//! RuntimeGenesisConfig struct, which is generated by frame::runtime::prelude::construct_runtime macro (example of generated struct). Must contain all the keys of +//! the genesis config, no defaults will be used. +//! +//! This format explicitly provides the code of the runtime. +//!
+//! patch +//! A JSON object that offers a partial representation of the +//! RuntimeGenesisConfig provided by the runtime. It contains a patch, which is +//! essentially a list of key-value pairs to customize in the default runtime's +//! RuntimeGenesisConfig. +//! This format explicitly provides the code of the runtime. +//!
+//! raw +//! A JSON object with two fields: top and children_default. +//! Each field is a map of key => value pairs representing entries in a genesis storage +//! trie. The runtime code is one of such entries.
+//! +//! For production or long-lasting blockchains, using the `raw` format in the chain specification is +//! recommended. Only the `raw` format guarantees that storage root hash will remain unchanged when +//! the `RuntimeGenesisConfig` format changes due to software upgrade. +//! +//! JSON examples in the [following section](#json-chain-specification-example) illustrate the `raw` +//! `patch` and full genesis fields. +//! +//! # From Initial State to Raw Genesis. +//! +//! To generate a raw genesis storage from the JSON representation of the runtime genesis config, +//! the node needs to interact with the runtime. +//! +//! This interaction involves passing the runtime genesis config JSON blob to the runtime using the +//! [`sp_genesis_builder::GenesisBuilder::build_config`] function. During this operation, the +//! runtime converts the JSON representation of the genesis config into [`sp_io::storage`] items. It +//! is a crucial step for computing the storage root hash, which is a key component in determining +//! the genesis hash. +//! +//! Consequently, the runtime must support the [`sp_genesis_builder::GenesisBuilder`] API to +//! utilize either `patch` or `full` formats. +//! +//! This entire process is encapsulated within the implementation of the [`BuildStorage`] trait, +//! which can be accessed through the [`ChainSpec::as_storage_builder`] method. There is an +//! intermediate internal helper that facilitates this interaction, +//! [`GenesisConfigBuilderRuntimeCaller`], which serves as a straightforward wrapper for +//! [`sc_executor::WasmExecutor`]. +//! +//! In case of `raw` genesis state the node does not interact with the runtime regarding the +//! computation of initial state. +//! +//! The plain and `raw` chain specification JSON blobs can be found in +//! [JSON examples](#json-chain-specification-example) section. +//! +//! # Optional Code Mapping +//! +//! Optional map of `block_number` to `wasm_code`. +//! +//! The given `wasm_code` will be used to substitute the on-chain wasm code starting with the +//! given block number until the `spec_version` on-chain changes. The given `wasm_code` should +//! be as close as possible to the on-chain wasm code. A substitute should be used to fix a bug +//! that cannot be fixed with a runtime upgrade, if for example the runtime is constantly +//! panicking. Introducing new runtime APIs isn't supported, because the node +//! will read the runtime version from the on-chain wasm code. +//! +//! Use this functionality only when there is no other way around it, and only patch the problematic +//! bug; the rest should be done with an on-chain runtime upgrade. +//! +//! # Building a Chain Specification +//! +//! The [`ChainSpecBuilder`] should be used to create an instance of a chain specification. Its API +//! allows configuration of all fields of the chain spec. To generate a JSON representation of the +//! specification, use [`ChainSpec::as_json`]. +//! +//! The sample code to generate a chain spec is as follows: +#![doc = docify::embed!("src/chain_spec.rs", build_chain_spec_with_patch_works)] +//! # JSON chain specification example +//! +//! The following are the plain and `raw` versions of the chain specification JSON files, resulting +//! from executing of the above [example](#building-a-chain-specification): +//! ```ignore +#![doc = include_str!("../res/substrate_test_runtime_from_patch.json")] +//! ``` +//! ```ignore +#![doc = include_str!("../res/substrate_test_runtime_from_patch_raw.json")] +//! ``` +//! The following example shows the plain full config version of chain spec: +//! ```ignore +#![doc = include_str!("../res/substrate_test_runtime_from_config.json")] +//! ``` +//! The [`ChainSpec`] trait represents the API to access values defined in the JSON chain specification. +//! //! -//! This crate contains structs and utilities to declare -//! a runtime-specific configuration file (a.k.a chain spec). +//! # Custom Chain Spec Extensions //! -//! Basic chain spec type containing all required parameters is -//! [`GenericChainSpec`]. It can be extended with -//! additional options that contain configuration specific to your chain. -//! Usually the extension is going to be an amalgamate of types exposed -//! by Substrate core modules. To allow the core modules to retrieve -//! their configuration from your extension you should use `ChainSpecExtension` -//! macro exposed by this crate. +//! The basic chain spec type containing all required parameters is [`GenericChainSpec`]. It can be +//! extended with additional options containing configuration specific to your chain. Usually, the +//! extension will be a combination of types exposed by Substrate core modules. //! +//! To allow the core modules to retrieve their configuration from your extension, you should use +//! `ChainSpecExtension` macro exposed by this crate. //! ```rust //! use std::collections::HashMap; //! use sc_chain_spec::{GenericChainSpec, ChainSpecExtension}; //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] //! pub struct MyExtension { -//! pub known_blocks: HashMap, +//! pub known_blocks: HashMap, //! } //! //! pub type MyChainSpec = GenericChainSpec; //! ``` -//! -//! Some parameters may require different values depending on the -//! current blockchain height (a.k.a. forks). You can use `ChainSpecGroup` -//! macro and provided [`Forks`](./struct.Forks.html) structure to put -//! such parameters to your chain spec. -//! This will allow to override a single parameter starting at specific -//! block number. -//! +//! Some parameters may require different values depending on the current blockchain height (a.k.a. +//! forks). You can use the [`ChainSpecGroup`](macro@ChainSpecGroup) macro and the provided [`Forks`] +//! structure to add such parameters to your chain spec. This will allow overriding a single +//! parameter starting at a specific block number. //! ```rust //! use sc_chain_spec::{Forks, ChainSpecGroup, ChainSpecExtension, GenericChainSpec}; //! @@ -76,12 +291,9 @@ //! /// A chain spec supporting forkable `Extension`. //! pub type MyChainSpec2 = GenericChainSpec>; //! ``` -//! -//! It's also possible to have a set of parameters that is allowed to change -//! with block numbers (i.e. is forkable), and another set that is not subject to changes. -//! This is also possible by declaring an extension that contains `Forks` within it. -//! -//! +//! It's also possible to have a set of parameters that are allowed to change with block numbers +//! (i.e., they are forkable), and another set that is not subject to changes. This can also be +//! achieved by declaring an extension that contains [`Forks`] within it. //! ```rust //! use serde::{Serialize, Deserialize}; //! use sc_chain_spec::{Forks, GenericChainSpec, ChainSpecGroup, ChainSpecExtension}; @@ -106,98 +318,26 @@ //! //! pub type MyChainSpec = GenericChainSpec; //! ``` -//! -//! # Substrate chain specification format -//! -//! The Substrate chain specification is a `json` file that describes the basics of a chain. Most -//! importantly it lays out the genesis storage which leads to the genesis hash. The default -//! Substrate chain specification format is the following: -//! -//! ```json -//! // The human readable name of the chain. -//! "name": "Flaming Fir", -//! -//! // The id of the chain. -//! "id": "flamingfir9", -//! -//! // The chain type of this chain. -//! // Possible values are `Live`, `Development`, `Local`. -//! "chainType": "Live", -//! -//! // A list of multi addresses that belong to boot nodes of the chain. -//! "bootNodes": [ -//! "/dns/0.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", -//! ], -//! -//! // Optional list of "multi address, verbosity" of telemetry endpoints. -//! // The verbosity goes from `0` to `9`. With `0` being the mode with the lowest verbosity. -//! "telemetryEndpoints": [ -//! [ -//! "/dns/telemetry.polkadot.io/tcp/443/x-parity-wss/%2Fsubmit%2F", -//! 0 -//! ] -//! ], -//! -//! // Optional networking protocol id that identifies the chain. -//! "protocolId": "fir9", -//! -//! // Optional fork id. Should most likely be left empty. -//! // Can be used to signal a fork on the network level when two chains have the -//! // same genesis hash. -//! "forkId": "random_fork", -//! -//! // Custom properties. -//! "properties": { -//! "tokenDecimals": 15, -//! "tokenSymbol": "FIR" -//! }, -//! -//! // Deprecated field. Should be ignored. -//! "consensusEngine": null, -//! -//! // The genesis declaration of the chain. -//! // -//! // `runtime`, `raw`, `stateRootHash` denote the type of the genesis declaration. -//! // -//! // These declarations are in the following formats: -//! // - `runtime` is a `json` object that can be parsed by a compatible `GenesisConfig`. This -//! // `GenesisConfig` is declared by a runtime and opaque to the node. -//! // - `raw` is a `json` object with two fields `top` and `children_default`. Each of these -//! // fields is a map of `key => value`. These key/value pairs represent the genesis storage. -//! // - `stateRootHash` is a single hex encoded hash that represents the genesis hash. The hash -//! // type depends on the hash used by the chain. -//! // -//! "genesis": { "runtime": {} }, -//! -//! /// Optional map of `block_number` to `wasm_code`. -//! /// -//! /// The given `wasm_code` will be used to substitute the on-chain wasm code starting with the -//! /// given block number until the `spec_version` on-chain changes. The given `wasm_code` should -//! /// be as close as possible to the on-chain wasm code. A substitute should be used to fix a bug -//! /// that can not be fixed with a runtime upgrade, if for example the runtime is constantly -//! /// panicking. Introducing new runtime apis isn't supported, because the node -//! /// will read the runtime version from the on-chain wasm code. Use this functionality only when -//! /// there is no other way around it and only patch the problematic bug, the rest should be done -//! /// with a on-chain runtime upgrade. -//! "codeSubstitutes": [], -//! ``` -//! -//! See [`ChainSpec`] for a trait representation of the above. -//! //! The chain spec can be extended with other fields that are opaque to the default chain spec. //! Specific node implementations will need to be able to deserialize these extensions. mod chain_spec; mod extension; -mod genesis; +mod genesis_block; +mod genesis_config_builder; +mod json_patch; pub use self::{ - chain_spec::{ChainSpec as GenericChainSpec, NoExtension}, + chain_spec::{ + update_code_in_json_chain_spec, ChainSpec as GenericChainSpec, ChainSpecBuilder, + NoExtension, + }, extension::{get_extension, get_extension_mut, Extension, Fork, Forks, GetExtension, Group}, - genesis::{ + genesis_block::{ construct_genesis_block, resolve_state_version_from_wasm, BuildGenesisBlock, GenesisBlockBuilder, }, + genesis_config_builder::GenesisConfigBuilderRuntimeCaller, }; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 98928700328fc9837fb6beed721faee473b3f1ba..c4464c5f787d651e66706ad23ad73b86bcc70879 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -15,9 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4.27" -clap = { version = "4.4.6", features = ["derive", "string"] } -fdlimit = "0.2.1" +clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } +fdlimit = "0.3.0" futures = "0.3.21" +itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} log = "0.4.17" names = { version = "0.13.0", default-features = false } @@ -26,9 +27,9 @@ rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0.48" -tiny-bip39 = "1.0.0" +bip39 = "2.0.0" tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] } sc-client-api = { path = "../api" } sc-client-db = { path = "../db", default-features = false} diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index c55b97675da25d871736d517081280508ce5ed1e..d4a4b7cfdf6d13a5bf4006d6365a6e56de48c848 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -225,7 +225,7 @@ pub enum OffchainWorkerEnabled { #[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] #[value(rename_all = "kebab-case")] pub enum SyncMode { - /// Full sync. Download end verify all blocks. + /// Full sync. Download and verify all blocks. Full, /// Download blocks without executing them. Download latest state with proofs. Fast, diff --git a/substrate/client/cli/src/commands/generate.rs b/substrate/client/cli/src/commands/generate.rs index 93b83fcbef51eefe44a6f4e66e3d46cca3ccd689..c465bcc85a47be16a5217677311499ce42573895 100644 --- a/substrate/client/cli/src/commands/generate.rs +++ b/substrate/client/cli/src/commands/generate.rs @@ -20,8 +20,9 @@ use crate::{ utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, NetworkSchemeFlag, OutputTypeFlag, }; -use bip39::{Language, Mnemonic, MnemonicType}; +use bip39::Mnemonic; use clap::Parser; +use itertools::Itertools; /// The `generate` command #[derive(Debug, Clone, Parser)] @@ -52,20 +53,22 @@ impl GenerateCmd { /// Run the command pub fn run(&self) -> Result<(), Error> { let words = match self.words { - Some(words) => MnemonicType::for_word_count(words).map_err(|_| { - Error::Input( - "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), - ) - })?, - None => MnemonicType::Words12, - }; - let mnemonic = Mnemonic::new(words, Language::English); + Some(words_count) if [12, 15, 18, 21, 24].contains(&words_count) => Ok(words_count), + Some(_) => Err(Error::Input( + "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), + )), + None => Ok(12), + }?; + let mnemonic = Mnemonic::generate(words) + .map_err(|e| Error::Input(format!("Mnemonic generation failed: {e}").into()))?; let password = self.keystore_params.read_password()?; let output = self.output_scheme.output_type; + let phrase = mnemonic.word_iter().join(" "); + with_crypto_scheme!( self.crypto_scheme.scheme, - print_from_uri(mnemonic.phrase(), password, self.network_scheme.network, output) + print_from_uri(&phrase, password, self.network_scheme.network, output) ); Ok(()) } diff --git a/substrate/client/cli/src/commands/insert_key.rs b/substrate/client/cli/src/commands/insert_key.rs index 732d874319a83e51477354e7ba8addbad3e1acb2..3d89610b28b1bd0a031ffad6e94fef7fd604e20d 100644 --- a/substrate/client/cli/src/commands/insert_key.rs +++ b/substrate/client/cli/src/commands/insert_key.rs @@ -126,18 +126,14 @@ mod tests { } fn load_spec(&self, _: &str) -> std::result::Result, String> { - Ok(Box::new(GenericChainSpec::from_genesis( - "test", - "test_id", - ChainType::Development, - || unimplemented!("Not required in tests"), - Vec::new(), - None, - None, - None, - None, - NoExtension::None, - ))) + Ok(Box::new( + GenericChainSpec::<()>::builder(Default::default(), NoExtension::None) + .with_name("test") + .with_id("test_id") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(Default::default()) + .build(), + )) } } diff --git a/substrate/client/cli/src/commands/inspect_node_key.rs b/substrate/client/cli/src/commands/inspect_node_key.rs index 19b5a31ca12c046ae112e91d1669f796c29909f5..6cf025a2d115066ed18277e4036a028965e3c32f 100644 --- a/substrate/client/cli/src/commands/inspect_node_key.rs +++ b/substrate/client/cli/src/commands/inspect_node_key.rs @@ -85,7 +85,7 @@ mod tests { fn inspect_node_key() { let path = tempfile::tempdir().unwrap().into_path().join("node-id").into_os_string(); let path = path.to_str().unwrap(); - let cmd = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", path.clone()]); + let cmd = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", path]); assert!(cmd.run().is_ok()); diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index 5dda488b1333052078d104ddc8371719219784f6..bc62dc3324e3256cfd2d17fcd88ed996e8cd6acd 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -40,35 +40,38 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; #[derive(Debug, Clone, Parser)] pub struct RunCmd { /// Enable validator mode. + /// /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). #[arg(long)] pub validator: bool, - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA + /// Disable GRANDPA. + /// + /// Disables voter when running in validator mode, otherwise disable the GRANDPA /// observer. #[arg(long)] pub no_grandpa: bool, - /// Listen to all RPC interfaces. - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC - /// proxy server to filter out dangerous methods. More details: + /// Listen to all RPC interfaces (default: local). + /// + /// Not all RPC methods are safe to be exposed publicly. + /// + /// Use an RPC proxy server to filter out dangerous methods. More details: /// . + /// /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[arg(long)] pub rpc_external: bool, /// Listen to all RPC interfaces. + /// /// Same as `--rpc-external`. #[arg(long)] pub unsafe_rpc_external: bool, /// RPC methods to expose. - /// - `unsafe`: Exposes every RPC method. - /// - `safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. - /// - `auto`: Acts as `safe` if RPC is served externally, e.g. when `--rpc--external` is - /// passed, otherwise acts as `unsafe`. #[arg( long, value_name = "METHOD SET", @@ -79,15 +82,15 @@ pub struct RunCmd { )] pub rpc_methods: RpcMethods, - /// Set the the maximum RPC request payload size for both HTTP and WS in megabytes. + /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. #[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB)] pub rpc_max_request_size: u32, - /// Set the the maximum RPC response payload size for both HTTP and WS in megabytes. + /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. #[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] pub rpc_max_response_size: u32, - /// Set the the maximum concurrent subscriptions per connection. + /// Set the maximum concurrent subscriptions per connection. #[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN)] pub rpc_max_subscriptions_per_connection: u32, @@ -99,15 +102,17 @@ pub struct RunCmd { #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS)] pub rpc_max_connections: u32, - /// Specify browser Origins allowed to access the HTTP & WS RPC servers. - /// A comma-separated list of origins (protocol://domain or special `null` + /// Specify browser *origins* allowed to access the HTTP and WS RPC servers. + /// + /// A comma-separated list of origins (`protocol://domain` or special `null` /// value). Value of `all` will disable origin validation. Default is to /// allow localhost and origins. When running in - /// --dev mode the default is to allow all origins. + /// `--dev` mode the default is to allow all origins. #[arg(long, value_name = "ORIGINS", value_parser = parse_cors)] pub rpc_cors: Option, /// The human-readable name for this node. + /// /// It's used as network node name. #[arg(long, value_name = "NAME")] pub name: Option, @@ -148,36 +153,51 @@ pub struct RunCmd { #[clap(flatten)] pub keystore_params: KeystoreParams, - /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. + /// Shortcut for `--name Alice --validator`. + /// + /// Session keys for `Alice` are added to keystore. #[arg(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub alice: bool, - /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. + /// Shortcut for `--name Bob --validator`. + /// + /// Session keys for `Bob` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to - /// keystore. + /// Shortcut for `--name Charlie --validator`. + /// + /// Session keys for `Charlie` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, - /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. + /// Shortcut for `--name Dave --validator`. + /// + /// Session keys for `Dave` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] pub dave: bool, - /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. + /// Shortcut for `--name Eve --validator`. + /// + /// Session keys for `Eve` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] pub eve: bool, - /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. + /// Shortcut for `--name Ferdie --validator`. + /// + /// Session keys for `Ferdie` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] pub ferdie: bool, - /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. + /// Shortcut for `--name One --validator`. + /// + /// Session keys for `One` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] pub one: bool, - /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. + /// Shortcut for `--name Two --validator`. + /// + /// Session keys for `Two` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] pub two: bool, @@ -186,10 +206,13 @@ pub struct RunCmd { pub force_authoring: bool, /// Run a temporary node. + /// /// A temporary directory will be created to store the configuration and will be deleted /// at the end of the process. + /// /// Note: the directory is random per process execution. This directory is used as base path /// which includes: database, node key and keystore. + /// /// When `--dev` is given and no explicit `--base-path`, this option is implied. #[arg(long, conflicts_with = "base_path")] pub tmp: bool, diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 4d218da6aa89891d3d51c28882978c0b232a8d53..b842df5a690a2a72c51d2fd69bd1324051a07a24 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -605,14 +605,25 @@ pub trait CliConfiguration: Sized { logger.init()?; - if let Some(new_limit) = fdlimit::raise_fd_limit() { - if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { to, .. }) => + if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + }, + Ok(fdlimit::Outcome::Unsupported) => { + // Unsupported platform (non-Linux) + }, + Err(error) => { warn!( - "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", - new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + "Failed to configure file descriptor limit for the process: \ + {}, recommended value: {:?}.", + error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); - } + }, } Ok(()) diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index bfa54a35058f6214fc3da604af15e5b32abf45a8..add7cb4f850500bbd0d8762d2695b0b4062cee01 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -48,6 +48,7 @@ pub struct ImportParams { pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. + /// /// Only has an effect when `wasm-execution` is set to `compiled`. /// The copy-on-write strategies are only supported on Linux. /// If the copy-on-write variant of a strategy is unsupported @@ -65,6 +66,7 @@ pub struct ImportParams { pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Specify the path where local WASM runtimes are stored. + /// /// These runtimes will override on-chain runtimes when the version matches. #[arg(long, value_name = "PATH")] pub wasm_runtime_overrides: Option, @@ -74,12 +76,12 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. + /// /// Providing `0` will disable the cache. #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, - /// DEPRECATED - /// Switch to `--trie-cache-size`. + /// DEPRECATED: switch to `--trie-cache-size`. #[arg(long)] state_cache_size: Option, } @@ -115,26 +117,23 @@ impl ImportParams { /// Execution strategies parameters. #[derive(Debug, Clone, Args)] pub struct ExecutionStrategiesParams { - /// The means of execution used when calling into the runtime for importing blocks as - /// part of an initial sync. + /// Runtime execution strategy for importing blocks during initial sync. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_syncing: Option, - /// The means of execution used when calling into the runtime for general block import - /// (including locally authored blocks). + /// Runtime execution strategy for general block import (including locally authored blocks). #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_import_block: Option, - /// The means of execution used when calling into the runtime while constructing blocks. + /// Runtime execution strategy for constructing blocks. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_block_construction: Option, - /// The means of execution used when calling into the runtime while using an off-chain worker. + /// Runtime execution strategy for offchain workers. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_offchain_worker: Option, - /// The means of execution used when calling into the runtime while not syncing, importing or - /// constructing blocks. + /// Runtime execution strategy when not syncing, importing or constructing blocks. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_other: Option, diff --git a/substrate/client/cli/src/params/keystore_params.rs b/substrate/client/cli/src/params/keystore_params.rs index 87210c3390cae3ee3860398ce74f041465f76caf..5a5d0f94999596b86a8b0a38dc2048399fcfe9b8 100644 --- a/substrate/client/cli/src/params/keystore_params.rs +++ b/substrate/client/cli/src/params/keystore_params.rs @@ -39,8 +39,9 @@ pub struct KeystoreParams { #[arg(long, conflicts_with_all = &["password", "password_filename"])] pub password_interactive: bool, - /// Password used by the keystore. This allows appending an extra user-defined secret to the - /// seed. + /// Password used by the keystore. + /// + /// This allows appending an extra user-defined secret to the seed. #[arg( long, value_parser = secret_string_from_str, diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 84db218cc51da04fd382fa69c6e5c3bf6ad91ded..12f19df2a68549be318bedf1cc86875a292ee86b 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -42,6 +42,7 @@ pub struct NetworkParams { pub reserved_nodes: Vec, /// Whether to only synchronize the chain with reserved nodes. + /// /// Also disables automatic peer discovery. /// TCP connections might still be established with non-reserved nodes. /// In particular, if you are a validator your node might still connect to other @@ -50,7 +51,8 @@ pub struct NetworkParams { #[arg(long)] pub reserved_only: bool, - /// The public address that other nodes will use to connect to it. + /// Public address that other nodes will use to connect to this node. + /// /// This can be used if there's a proxy in front of this node. #[arg(long, value_name = "PUBLIC_ADDR", num_args = 1..)] pub public_addr: Vec, @@ -67,20 +69,28 @@ pub struct NetworkParams { #[arg(long, value_name = "PORT", conflicts_with_all = &[ "listen_addr" ])] pub port: Option, - /// Always forbid connecting to private IPv4/IPv6 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in - /// their chain specifications. + /// Always forbid connecting to private IPv4/IPv6 addresses. + /// + /// The option doesn't apply to addresses passed with `--reserved-nodes` or + /// `--bootnodes`. Enabled by default for chains marked as "live" in their chain + /// specifications. + /// + /// Address allocation for private networks is specified by + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). #[arg(long, alias = "no-private-ipv4", conflicts_with_all = &["allow_private_ip"])] pub no_private_ip: bool, - /// Always accept connecting to private IPv4/IPv6 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as - /// "local" in their chain specifications, or when `--dev` is passed. + /// Always accept connecting to private IPv4/IPv6 addresses. + /// + /// Enabled by default for chains marked as "local" in their chain specifications, + /// or when `--dev` is passed. + /// + /// Address allocation for private networks is specified by + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). #[arg(long, alias = "allow-private-ipv4", conflicts_with_all = &["no_private_ip"])] pub allow_private_ip: bool, - /// Specify the number of outgoing connections we're trying to maintain. + /// Number of outgoing connections we're trying to maintain. #[arg(long, value_name = "COUNT", default_value_t = 8)] pub out_peers: u32, @@ -92,15 +102,17 @@ pub struct NetworkParams { #[arg(long, value_name = "COUNT", default_value_t = 100)] pub in_peers_light: u32, - /// Disable mDNS discovery. + /// Disable mDNS discovery (default: true). + /// /// By default, the network will use mDNS to discover other nodes on the /// local network. This disables it. Automatically implied when using --dev. #[arg(long)] pub no_mdns: bool, /// Maximum number of peers from which to ask for the same blocks in parallel. - /// This allows downloading announced blocks from multiple peers. Decrease to save - /// traffic and risk increased latency. + /// + /// This allows downloading announced blocks from multiple peers. + /// Decrease to save traffic and risk increased latency. #[arg(long, value_name = "COUNT", default_value_t = 5)] pub max_parallel_downloads: u32, @@ -109,19 +121,24 @@ pub struct NetworkParams { pub node_key_params: NodeKeyParams, /// Enable peer discovery on local networks. + /// /// By default this option is `true` for `--dev` or when the chain type is /// `Local`/`Development` and false otherwise. #[arg(long)] pub discover_local: bool, - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in - /// the presence of potentially adversarial nodes. + /// Require iterative Kademlia DHT queries to use disjoint paths. + /// + /// Disjoint paths increase resiliency in the presence of potentially adversarial nodes. + /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. #[arg(long)] pub kademlia_disjoint_query_paths: bool, - /// Kademlia replication factor determines to how many closest peers a record is replicated to. + /// Kademlia replication factor. + /// + /// Determines to how many closest peers a record is replicated to. /// /// Discovery mechanism requires successful replication to all /// `kademlia_replication_factor` peers to consider record successfully put. diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index 8c5579eaec4947d0ae18581e77b577186b3d7b9c..53f19f58e1fb0d657d387fd50a8a2346c238ead4 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -32,39 +32,49 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// used for libp2p networking. #[derive(Debug, Clone, Args)] pub struct NodeKeyParams { - /// The secret key to use for libp2p networking. + /// Secret key to use for p2p networking. + /// /// The value is a string that is parsed according to the choice of /// `--node-key-type` as follows: - /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, - /// i.e. 64 hex characters. + /// + /// - `ed25519`: the value is parsed as a hex-encoded Ed25519 32 byte secret key (64 hex + /// chars) + /// /// The value of this option takes precedence over `--node-key-file`. + /// /// WARNING: Secrets provided as command-line arguments are easily exposed. /// Use of this option should be limited to development and testing. To use /// an externally managed secret key, use `--node-key-file` instead. #[arg(long, value_name = "KEY")] pub node_key: Option, - /// The type of secret key to use for libp2p networking. + /// Crypto primitive to use for p2p networking. + /// /// The secret key of the node is obtained as follows: - /// * If the `--node-key` option is given, the value is parsed as a secret key according to - /// the type. See the documentation for `--node-key`. - /// * If the `--node-key-file` option is given, the secret key is read from the specified - /// file. See the documentation for `--node-key-file`. - /// * Otherwise, the secret key is read from a file with a predetermined, type-specific name - /// from the chain-specific network config directory inside the base directory specified by - /// `--base-dir`. If this file does not exist, it is created with a newly generated secret - /// key of the chosen type. + /// + /// - If the `--node-key` option is given, the value is parsed as a secret key according to the + /// type. See the documentation for `--node-key`. + /// + /// - If the `--node-key-file` option is given, the secret key is read from the specified file. + /// See the documentation for `--node-key-file`. + /// + /// - Otherwise, the secret key is read from a file with a predetermined, type-specific name + /// from the chain-specific network config directory inside the base directory specified by + /// `--base-dir`. If this file does not exist, it is created with a newly generated secret + /// key of the chosen type. + /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. #[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)] pub node_key_type: NodeKeyType, - /// The file from which to read the node's secret key to use for libp2p networking. + /// File from which to read the node's secret key to use for p2p networking. + /// /// The contents of the file are parsed according to the choice of `--node-key-type` /// as follows: - /// `ed25519`: - /// The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. + /// + /// - `ed25519`: the file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. + /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. #[arg(long, value_name = "FILE")] diff --git a/substrate/client/cli/src/params/offchain_worker_params.rs b/substrate/client/cli/src/params/offchain_worker_params.rs index d1fedab4cb2eba46e8c56c2043224e90a1dc597b..3583d85c00a7a2d73c9b392201a38dc5c6fadef3 100644 --- a/substrate/client/cli/src/params/offchain_worker_params.rs +++ b/substrate/client/cli/src/params/offchain_worker_params.rs @@ -32,8 +32,7 @@ use crate::{error, OffchainWorkerEnabled}; /// Offchain worker related parameters. #[derive(Debug, Clone, Args)] pub struct OffchainWorkerParams { - /// Should execute offchain workers on every block. - /// By default it's only enabled for nodes that are authoring new blocks. + /// Execute offchain workers on every block. #[arg( long = "offchain-worker", value_name = "ENABLED", @@ -43,8 +42,9 @@ pub struct OffchainWorkerParams { )] pub enabled: OffchainWorkerEnabled, - /// Enable Offchain Indexing API, which allows block import to write to Offchain DB. - /// Enables a runtime to write directly to a offchain workers DB during block import. + /// Enable offchain indexing API. + /// + /// Allows the runtime to write directly to offchain workers DB during block import. #[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING", default_value_t = false, action = ArgAction::Set)] pub indexing_enabled: bool, } diff --git a/substrate/client/cli/src/params/prometheus_params.rs b/substrate/client/cli/src/params/prometheus_params.rs index 4d234ea33c20d0a1ea523636474581638852c63d..69199ad5b260329bb49f4b8e8f0781daa17e15fb 100644 --- a/substrate/client/cli/src/params/prometheus_params.rs +++ b/substrate/client/cli/src/params/prometheus_params.rs @@ -27,10 +27,12 @@ pub struct PrometheusParams { #[arg(long, value_name = "PORT")] pub prometheus_port: Option, /// Expose Prometheus exporter on all interfaces. + /// /// Default is local. #[arg(long)] pub prometheus_external: bool, /// Do not expose a Prometheus exporter endpoint. + /// /// Prometheus metric endpoint is enabled by default. #[arg(long)] pub no_prometheus: bool, diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 1b5bf247d942e66fa521aa867aace58db53cca1e..25b17b53289818ba02e839e98b9738f380fc1067 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -24,6 +24,7 @@ use sc_service::{BlocksPruning, PruningMode}; #[derive(Debug, Clone, Args)] pub struct PruningParams { /// Specify the state pruning mode. + /// /// This mode specifies when the block's state (ie, storage) /// should be pruned (ie, removed) from the database. /// This setting can only be set on the first creation of the database. Every subsequent run @@ -38,6 +39,7 @@ pub struct PruningParams { pub state_pruning: Option, /// Specify the blocks pruning mode. + /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. /// Possible values: diff --git a/substrate/client/cli/src/params/runtime_params.rs b/substrate/client/cli/src/params/runtime_params.rs index 79035fc7d4c5d7cf082969a4d709b8181bb176eb..a130d808418e629a1e222f790dd2977172814f96 100644 --- a/substrate/client/cli/src/params/runtime_params.rs +++ b/substrate/client/cli/src/params/runtime_params.rs @@ -22,7 +22,9 @@ use std::str::FromStr; /// Parameters used to config runtime. #[derive(Debug, Clone, Args)] pub struct RuntimeParams { - /// The size of the instances cache for each runtime. The values higher than 256 are illegal. + /// The size of the instances cache for each runtime [max: 32]. + /// + /// Values higher than 32 are illegal. #[arg(long, default_value_t = 8, value_parser = parse_max_runtime_instances)] pub max_runtime_instances: usize, @@ -35,8 +37,8 @@ fn parse_max_runtime_instances(s: &str) -> Result { let max_runtime_instances = usize::from_str(s) .map_err(|_err| format!("Illegal `--max-runtime-instances` value: {s}"))?; - if max_runtime_instances > 256 { - Err(format!("Illegal `--max-runtime-instances` value: {max_runtime_instances} is more than the allowed maximum of `256` ")) + if max_runtime_instances > 32 { + Err(format!("Illegal `--max-runtime-instances` value: {max_runtime_instances} is more than the allowed maximum of `32` ")) } else { Ok(max_runtime_instances) } diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 3d20ca504a6915207f37bda71ac8116fba3dd025..465372fba17d4bb8036477d587e37f12cdd414d6 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -25,14 +25,17 @@ use std::path::PathBuf; #[derive(Debug, Clone, Args)] pub struct SharedParams { /// Specify the chain specification. - /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file - /// with the chainspec (such as one exported by the `build-spec` subcommand). + /// + /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to + /// a file with the chainspec (such as one exported by the `build-spec` subcommand). #[arg(long, value_name = "CHAIN_SPEC")] pub chain: Option, /// Specify the development chain. + /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. + /// It also disables local peer discovery (see --no-mdns and --discover-local) #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, @@ -40,14 +43,23 @@ pub struct SharedParams { #[arg(long, short = 'd', value_name = "PATH")] pub base_path: Option, - /// Sets a custom logging filter. Syntax is `=`, e.g. -lsync=debug. - /// Log levels (least to most verbose) are error, warn, info, debug, and trace. + /// Sets a custom logging filter (syntax: `=`). + /// + /// Log levels (least to most verbose) are `error`, `warn`, `info`, `debug`, and `trace`. + /// /// By default, all targets log `info`. The global log level can be set with `-l`. + /// + /// Multiple `=` entries can be specified and separated by a comma. + /// + /// *Example*: `--log error,sync=debug,grandpa=warn`. + /// Sets Global log level to `error`, sets `sync` target to debug and grandpa target to `warn`. #[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)] pub log: Vec, /// Enable detailed log output. - /// This includes displaying the log target, log level and thread name. + /// + /// Includes displaying the log target, log level and thread name. + /// /// This is automatically enabled when something is logged with any higher level than `info`. #[arg(long)] pub detailed_log_output: bool, @@ -57,14 +69,18 @@ pub struct SharedParams { pub disable_log_color: bool, /// Enable feature to dynamically update and reload the log filter. + /// /// Be aware that enabling this feature can lead to a performance decrease up to factor six or /// more. Depending on the global logging level the performance decrease changes. + /// /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this /// option not being set. #[arg(long)] pub enable_log_reloading: bool, - /// Sets a custom profiling filter. Syntax is the same as for logging: `=`. + /// Sets a custom profiling filter. + /// + /// Syntax is the same as for logging (`--log`). #[arg(long, value_name = "TARGETS")] pub tracing_targets: Option, diff --git a/substrate/client/cli/src/params/telemetry_params.rs b/substrate/client/cli/src/params/telemetry_params.rs index 67f441071410cae7c6f78be6e18944d3201b8f0d..3b3d91e6b04ed1c6891d56be47ec4da27e8a633f 100644 --- a/substrate/client/cli/src/params/telemetry_params.rs +++ b/substrate/client/cli/src/params/telemetry_params.rs @@ -22,14 +22,17 @@ use clap::Args; #[derive(Debug, Clone, Args)] pub struct TelemetryParams { /// Disable connecting to the Substrate telemetry server. + /// /// Telemetry is on by default on global chains. #[arg(long)] pub no_telemetry: bool, /// The URL of the telemetry server to connect to. + /// /// This flag can be passed multiple times as a means to specify multiple /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting /// the least verbosity. + /// /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] pub telemetry_endpoints: Vec<(String, u8)>, diff --git a/substrate/client/cli/src/params/transaction_pool_params.rs b/substrate/client/cli/src/params/transaction_pool_params.rs index b2bf0b9b364c801c12d6aee8ee1d28007b24cc4a..48b2e5b1572baa0942e695993d149531cc7e4358 100644 --- a/substrate/client/cli/src/params/transaction_pool_params.rs +++ b/substrate/client/cli/src/params/transaction_pool_params.rs @@ -30,7 +30,9 @@ pub struct TransactionPoolParams { #[arg(long, value_name = "COUNT", default_value_t = 20480)] pub pool_kbytes: usize, - /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. + /// How long a transaction is banned for. + /// + /// If it is considered invalid. Defaults to 1800s. #[arg(long, value_name = "SECONDS")] pub tx_ban_seconds: Option, } diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index 59f53200a192bce80a0e5b151ce78db26f87c195..1707a76cbe78955ff6fb641866db07e6324c06e8 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -251,18 +251,14 @@ mod tests { trie_cache_maximum_size: None, state_pruning: None, blocks_pruning: sc_client_db::BlocksPruning::KeepAll, - chain_spec: Box::new(GenericChainSpec::from_genesis( - "test", - "test_id", - ChainType::Development, - || unimplemented!("Not required in tests"), - Vec::new(), - None, - None, - None, - None, - NoExtension::None, - )), + chain_spec: Box::new( + GenericChainSpec::<()>::builder(Default::default(), NoExtension::None) + .with_name("test") + .with_id("test_id") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(Default::default()) + .build(), + ), wasm_method: Default::default(), wasm_runtime_overrides: None, rpc_addr: None, diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 2ed451ef663ed87b6c61efbd6e1714118e0235ac..1be7be8eeeaa60a75ae8b6ef95fd8cbbd5b3998e 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -548,7 +548,7 @@ where mod tests { use super::*; use parking_lot::Mutex; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::BlockchainEvents; use sc_consensus::BoxJustificationImport; use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; @@ -604,7 +604,14 @@ mod tests { _: Duration, _: Option, ) -> Self::Proposal { - let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into()); + let r = BlockBuilderBuilder::new(&*self.1) + .on_parent_block(self.1.chain_info().best_hash) + .fetch_parent_block_number(&*self.1) + .unwrap() + .with_inherent_digests(digests) + .build() + .unwrap() + .build(); future::ready(r.map(|b| Proposal { block: b.block, diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index f54edb296842ddf9b9f4390aa8e307c0bef69398..c95d95ae427a4a96a358c1cc772dc5ff76437f6b 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -30,7 +30,7 @@ sp-keystore = { path = "../../../../primitives/keystore" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" tokio = "1.22.0" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 02882d8baaeda2d8cf8ed2fdf2682c31989d1ab3..d097f37c325790f134c879d7a38e7a9ba12c559a 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use authorship::claim_slot; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; use sc_client_api::{BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition}; @@ -98,8 +98,13 @@ impl DummyProposer { &mut self, pre_digests: Digest, ) -> future::Ready, Error>> { - let block_builder = - self.factory.client.new_block_at(self.parent_hash, pre_digests, false).unwrap(); + let block_builder = BlockBuilderBuilder::new(&*self.factory.client) + .on_parent_block(self.parent_hash) + .fetch_parent_block_number(&*self.factory.client) + .unwrap() + .with_inherent_digests(pre_digests) + .build() + .unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, @@ -297,7 +302,7 @@ impl TestNetFactory for BabeTestNet { async fn rejects_empty_block() { sp_tracing::try_init_simple(); let mut net = BabeTestNet::new(3); - let block_builder = |builder: BlockBuilder<_, _, _>| builder.build().unwrap().block; + let block_builder = |builder: BlockBuilder<_, _>| builder.build().unwrap().block; net.mut_peers(|peer| { peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); }) diff --git a/substrate/client/consensus/beefy/README.md b/substrate/client/consensus/beefy/README.md index 1a5a9667fdbb74d9dbd872629b19974ed6b08497..13f88303a972ac35f2bbcb11a443619b603149a7 100644 --- a/substrate/client/consensus/beefy/README.md +++ b/substrate/client/consensus/beefy/README.md @@ -104,7 +104,7 @@ shortcuts: ## Mental Model BEEFY should be considered as an extra voting round done by GRANDPA validators for the current -best finalized block. Similarily to how GRANDPA is lagging behind best produced (non-finalized) +best finalized block. Similarly to how GRANDPA is lagging behind best produced (non-finalized) block, BEEFY is going to lag behind best GRANDPA (finalized) block. ``` @@ -302,7 +302,7 @@ periodically on the global topic. Let's now dive into description of the message ## Misbehavior -Similarily to other PoS protocols, BEEFY considers casting two different votes in the same round a +Similarly to other PoS protocols, BEEFY considers casting two different votes in the same round a misbehavior. I.e. for a particular `round_number`, the validator produces signatures for 2 different `Commitment`s and broadcasts them. This is called **equivocation**. diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 74733ea9edd9d274a9578248f607b68fe1aacee4..c7464fdc653230100a937408b518c9638b82a976 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -23,7 +23,7 @@ sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" sc-rpc = { path = "../../../rpc", features = ["test-helpers"]} substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs index 8c025ca0676197203035d652646a727e89354085..342cd0511a511bc0fc520c438fc23abbbd3a9897 100644 --- a/substrate/client/consensus/beefy/src/communication/gossip.rs +++ b/substrate/client/consensus/beefy/src/communication/gossip.rs @@ -16,11 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeMap, sync::Arc, time::Duration}; +use std::{collections::BTreeSet, sync::Arc, time::Duration}; use sc_network::{PeerId, ReputationChange}; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; -use sp_core::hashing::twox_64; use sp_runtime::traits::{Block, Hash, Header, NumberFor}; use codec::{Decode, DecodeAll, Encode}; @@ -115,9 +114,6 @@ where <::Hashing as Hash>::hash(b"beefy-justifications") } -/// A type that represents hash of the message. -pub type MessageHash = [u8; 8]; - #[derive(Clone, Debug)] pub(crate) struct GossipFilterCfg<'a, B: Block> { pub start: NumberFor, @@ -133,18 +129,21 @@ struct FilterInner { } struct Filter { + // specifies live rounds inner: Option>, - live_votes: BTreeMap, fnv::FnvHashSet>, + // cache of seen valid justifications in active rounds + rounds_with_valid_proofs: BTreeSet>, } impl Filter { pub fn new() -> Self { - Self { inner: None, live_votes: BTreeMap::new() } + Self { inner: None, rounds_with_valid_proofs: BTreeSet::new() } } /// Update filter to new `start` and `set_id`. fn update(&mut self, cfg: GossipFilterCfg) { - self.live_votes.retain(|&round, _| round >= cfg.start && round <= cfg.end); + self.rounds_with_valid_proofs + .retain(|&round| round >= cfg.start && round <= cfg.end); // only clone+overwrite big validator_set if set_id changed match self.inner.as_mut() { Some(f) if f.validator_set.id() == cfg.validator_set.id() => { @@ -203,14 +202,14 @@ impl Filter { .unwrap_or(Consider::RejectOutOfScope) } - /// Add new _known_ `hash` to the round's known votes. - fn add_known_vote(&mut self, round: NumberFor, hash: MessageHash) { - self.live_votes.entry(round).or_default().insert(hash); + /// Add new _known_ `round` to the set of seen valid justifications. + fn mark_round_as_proven(&mut self, round: NumberFor) { + self.rounds_with_valid_proofs.insert(round); } - /// Check if `hash` is already part of round's known votes. - fn is_known_vote(&self, round: NumberFor, hash: &MessageHash) -> bool { - self.live_votes.get(&round).map(|known| known.contains(hash)).unwrap_or(false) + /// Check if `round` is already part of seen valid justifications. + fn is_already_proven(&self, round: NumberFor) -> bool { + self.rounds_with_valid_proofs.contains(&round) } fn validator_set(&self) -> Option<&ValidatorSet> { @@ -273,16 +272,13 @@ where &self, vote: VoteMessage, AuthorityId, Signature>, sender: &PeerId, - data: &[u8], ) -> Action { - let msg_hash = twox_64(data); let round = vote.commitment.block_number; let set_id = vote.commitment.validator_set_id; self.known_peers.lock().note_vote_for(*sender, round); // Verify general usefulness of the message. - // We are going to discard old votes right away (without verification) - // Also we keep track of already received votes to avoid verifying duplicates. + // We are going to discard old votes right away (without verification). { let filter = self.gossip_filter.read(); @@ -293,10 +289,6 @@ where Consider::Accept => {}, } - if filter.is_known_vote(round, &msg_hash) { - return Action::Keep(self.votes_topic, benefit::KNOWN_VOTE_MESSAGE) - } - // ensure authority is part of the set. if !filter .validator_set() @@ -309,7 +301,6 @@ where } if BeefyKeystore::verify(&vote.id, &vote.signature, &vote.commitment.encode()) { - self.gossip_filter.write().add_known_vote(round, msg_hash); Action::Keep(self.votes_topic, benefit::VOTE_MESSAGE) } else { debug!( @@ -328,34 +319,46 @@ where let (round, set_id) = proof_block_num_and_set_id::(&proof); self.known_peers.lock().note_vote_for(*sender, round); - let guard = self.gossip_filter.read(); - // Verify general usefulness of the justification. - match guard.consider_finality_proof(round, set_id) { - Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), - Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), - Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), - Consider::Accept => {}, + let action = { + let guard = self.gossip_filter.read(); + + // Verify general usefulness of the justification. + match guard.consider_finality_proof(round, set_id) { + Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), + Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), + Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + Consider::Accept => {}, + } + + if guard.is_already_proven(round) { + return Action::Discard(benefit::NOT_INTERESTED) + } + + // Verify justification signatures. + guard + .validator_set() + .map(|validator_set| { + if let Err((_, signatures_checked)) = + verify_with_validator_set::(round, validator_set, &proof) + { + debug!( + target: LOG_TARGET, + "🥩 Bad signatures on message: {:?}, from: {:?}", proof, sender + ); + let mut cost = cost::INVALID_PROOF; + cost.value += + cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked as i32); + Action::Discard(cost) + } else { + Action::Keep(self.justifs_topic, benefit::VALIDATED_PROOF) + } + }) + .unwrap_or(Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)) + }; + if matches!(action, Action::Keep(_, _)) { + self.gossip_filter.write().mark_round_as_proven(round); } - // Verify justification signatures. - guard - .validator_set() - .map(|validator_set| { - if let Err((_, signatures_checked)) = - verify_with_validator_set::(round, validator_set, &proof) - { - debug!( - target: LOG_TARGET, - "🥩 Bad signatures on message: {:?}, from: {:?}", proof, sender - ); - let mut cost = cost::INVALID_PROOF; - cost.value += - cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked as i32); - Action::Discard(cost) - } else { - Action::Keep(self.justifs_topic, benefit::VALIDATED_PROOF) - } - }) - .unwrap_or(Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)) + action } } @@ -375,7 +378,7 @@ where ) -> ValidationResult { let raw = data; let action = match GossipMessage::::decode_all(&mut data) { - Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender, raw), + Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender), Ok(GossipMessage::FinalityProof(proof)) => self.validate_finality_proof(proof, sender), Err(e) => { debug!(target: LOG_TARGET, "Error decoding message: {}", e); @@ -483,41 +486,6 @@ pub(crate) mod tests { }; use sp_keystore::{testing::MemoryKeystore, Keystore}; - #[test] - fn known_votes_insert_remove() { - let mut filter = Filter::::new(); - let msg_hash = twox_64(b"data"); - let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 1).unwrap(); - - filter.add_known_vote(1, msg_hash); - filter.add_known_vote(1, msg_hash); - filter.add_known_vote(2, msg_hash); - assert_eq!(filter.live_votes.len(), 2); - - filter.add_known_vote(3, msg_hash); - assert!(filter.is_known_vote(3, &msg_hash)); - assert!(!filter.is_known_vote(3, &twox_64(b"other"))); - assert!(!filter.is_known_vote(4, &msg_hash)); - assert_eq!(filter.live_votes.len(), 3); - - assert!(filter.inner.is_none()); - assert_eq!(filter.consider_vote(1, 1), Consider::RejectOutOfScope); - - filter.update(GossipFilterCfg { start: 3, end: 10, validator_set: &validator_set }); - assert_eq!(filter.live_votes.len(), 1); - assert!(filter.live_votes.contains_key(&3)); - assert_eq!(filter.consider_vote(2, 1), Consider::RejectPast); - assert_eq!(filter.consider_vote(3, 1), Consider::Accept); - assert_eq!(filter.consider_vote(4, 1), Consider::Accept); - assert_eq!(filter.consider_vote(20, 1), Consider::RejectFuture); - assert_eq!(filter.consider_vote(4, 2), Consider::RejectFuture); - - let validator_set = ValidatorSet::::new(keys, 2).unwrap(); - filter.update(GossipFilterCfg { start: 5, end: 10, validator_set: &validator_set }); - assert!(filter.live_votes.is_empty()); - } - struct TestContext; impl ValidatorContext for TestContext { fn broadcast_topic(&mut self, _topic: B::Hash, _force: bool) { @@ -610,20 +578,6 @@ pub(crate) mod tests { assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); expected_report.cost_benefit = benefit::VOTE_MESSAGE; assert_eq!(report_stream.try_recv().unwrap(), expected_report); - assert_eq!( - gv.gossip_filter - .read() - .live_votes - .get(&vote.commitment.block_number) - .map(|x| x.len()), - Some(1) - ); - - // second time we should hit the cache - let res = gv.validate(&mut context, &sender, &encoded); - assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); - expected_report.cost_benefit = benefit::KNOWN_VOTE_MESSAGE; - assert_eq!(report_stream.try_recv().unwrap(), expected_report); // reject vote, voter not in validator set let mut bad_vote = vote.clone(); @@ -692,7 +646,7 @@ pub(crate) mod tests { // reject proof, bad signatures (Bob instead of Alice) let bad_validator_set = ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); - let proof = dummy_proof(20, &bad_validator_set); + let proof = dummy_proof(21, &bad_validator_set); let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 7f9535bfc23f1322fbe4995f596e83360c85fc68..10a6071aae658d2988874092dcaade873870ca37 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -102,7 +102,7 @@ mod cost { mod benefit { use sc_network::ReputationChange as Rep; pub(super) const VOTE_MESSAGE: Rep = Rep::new(100, "BEEFY: Round vote message"); - pub(super) const KNOWN_VOTE_MESSAGE: Rep = Rep::new(50, "BEEFY: Known vote"); + pub(super) const NOT_INTERESTED: Rep = Rep::new(10, "BEEFY: Not interested in round"); pub(super) const VALIDATED_PROOF: Rep = Rep::new(100, "BEEFY: Justification"); } diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 72df3cab85503960936f161165a8fadb3d7742a4..89a5d51c88702df76f7e3f0e7bb4f65076531066 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -33,7 +33,7 @@ use crate::{ worker::PersistedState, }; use futures::{stream::Fuse, StreamExt}; -use log::{error, info}; +use log::{debug, error, info}; use parking_lot::Mutex; use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; @@ -428,7 +428,7 @@ where let best_beefy = *header.number(); // If no session boundaries detected so far, just initialize new rounds here. if sessions.is_empty() { - let active_set = expect_validator_set(runtime, backend, &header, beefy_genesis)?; + let active_set = expect_validator_set(runtime, backend, &header)?; let mut rounds = Rounds::new(best_beefy, active_set); // Mark the round as already finalized. rounds.conclude(best_beefy); @@ -447,7 +447,7 @@ where if *header.number() == beefy_genesis { // We've reached BEEFY genesis, initialize voter here. - let genesis_set = expect_validator_set(runtime, backend, &header, beefy_genesis)?; + let genesis_set = expect_validator_set(runtime, backend, &header)?; info!( target: LOG_TARGET, "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ @@ -532,7 +532,6 @@ fn expect_validator_set( runtime: &R, backend: &BE, at_header: &B::Header, - beefy_genesis: NumberFor, ) -> ClientResult> where B: Block, @@ -540,6 +539,7 @@ where R: ProvideRuntimeApi, R::Api: BeefyApi, { + debug!(target: LOG_TARGET, "🥩 Try to find validator set active at header: {:?}", at_header); runtime .runtime_api() .validator_set(at_header.hash()) @@ -550,14 +550,14 @@ where // Digest emitted when validator set active 'at_header' was enacted. let blockchain = backend.blockchain(); let mut header = at_header.clone(); - while *header.number() >= beefy_genesis { + loop { + debug!(target: LOG_TARGET, "🥩 look for auth set change digest in header number: {:?}", *header.number()); match worker::find_authorities_change::(&header) { Some(active) => return Some(active), // Move up the chain. None => header = blockchain.expect_header(*header.parent_hash()).ok()?, } } - None }) .ok_or_else(|| ClientError::Backend("Could not find initial validator set".into())) } diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 3bb65e9d57f435b78b9faccffb687bc4ab64757d..3aaa59cbfa1c1c99fdb19c4628b1df19448e0f75 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -35,6 +35,7 @@ use crate::{ }; use futures::{future, stream::FuturesUnordered, Future, FutureExt, StreamExt}; use parking_lot::Mutex; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{Backend as BackendT, BlockchainEvents, FinalityNotifications, HeaderBackend}; use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, @@ -247,7 +248,7 @@ impl TestNetFactory for BeefyTestNet { #[derive(Clone)] pub(crate) struct TestApi { pub beefy_genesis: u64, - pub validator_set: BeefyValidatorSet, + pub validator_set: Option, pub mmr_root_hash: MmrRootHash, pub reported_equivocations: Option, AuthorityId, Signature>>>>>, @@ -261,7 +262,7 @@ impl TestApi { ) -> Self { TestApi { beefy_genesis, - validator_set: validator_set.clone(), + validator_set: Some(validator_set.clone()), mmr_root_hash, reported_equivocations: None, } @@ -270,7 +271,7 @@ impl TestApi { pub fn with_validator_set(validator_set: &BeefyValidatorSet) -> Self { TestApi { beefy_genesis: 1, - validator_set: validator_set.clone(), + validator_set: Some(validator_set.clone()), mmr_root_hash: GOOD_MMR_ROOT, reported_equivocations: None, } @@ -300,7 +301,7 @@ sp_api::mock_impl_runtime_apis! { } fn validator_set() -> Option { - Some(self.inner.validator_set.clone()) + self.inner.validator_set.clone() } fn submit_report_equivocation_unsigned_extrinsic( @@ -741,7 +742,6 @@ async fn correct_beefy_payload() { #[tokio::test] async fn beefy_importing_justifications() { use futures::{future::poll_fn, task::Poll}; - use sc_block_builder::BlockBuilderProvider; use sc_client_api::BlockBackend; sp_tracing::try_init_simple(); @@ -774,8 +774,10 @@ async fn beefy_importing_justifications() { .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()) }; - let builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(full_client.genesis_hash()) + .with_parent_block_number(0) + .build() .unwrap(); let block = builder.build().unwrap().block; let hashof1 = block.header.hash(); @@ -792,7 +794,11 @@ async fn beefy_importing_justifications() { // Import block 2 with "valid" justification (beefy pallet genesis block not yet reached). let block_num = 2; - let builder = full_client.new_block_at(hashof1, Default::default(), false).unwrap(); + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(hashof1) + .with_parent_block_number(1) + .build() + .unwrap(); let block = builder.build().unwrap().block; let hashof2 = block.header.hash(); @@ -824,7 +830,11 @@ async fn beefy_importing_justifications() { // Import block 3 with valid justification. let block_num = 3; - let builder = full_client.new_block_at(hashof2, Default::default(), false).unwrap(); + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(hashof2) + .with_parent_block_number(2) + .build() + .unwrap(); let block = builder.build().unwrap().block; let hashof3 = block.header.hash(); let proof = crate::justification::tests::new_finality_proof(block_num, &good_set, keys); @@ -858,7 +868,11 @@ async fn beefy_importing_justifications() { // Import block 4 with invalid justification (incorrect validator set). let block_num = 4; - let builder = full_client.new_block_at(hashof3, Default::default(), false).unwrap(); + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(hashof3) + .with_parent_block_number(3) + .build() + .unwrap(); let block = builder.build().unwrap().block; let hashof4 = block.header.hash(); let keys = &[BeefyKeyring::Alice]; @@ -1188,6 +1202,54 @@ async fn should_initialize_voter_at_latest_finalized() { assert_eq!(state, persisted_state); } +#[tokio::test] +async fn should_initialize_voter_at_custom_genesis_when_state_unavailable() { + let keys = &[BeefyKeyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1); + let backend = net.peer(0).client().as_backend(); + // custom pallet genesis is block number 7 + let custom_pallet_genesis = 7; + let mut api = TestApi::new(custom_pallet_genesis, &validator_set, GOOD_MMR_ROOT); + // remove validator set from `TestApi`, practically simulating unavailable/pruned runtime state + api.validator_set = None; + + // push 30 blocks with `AuthorityChange` digests every 5 blocks + let hashes = net.generate_blocks_and_sync(30, 5, &validator_set, false).await; + let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); + // finalize 30 without justifications + net.peer(0).client().as_client().finalize_block(hashes[30], None).unwrap(); + + // load persistent state - nothing in DB, should init at genesis + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + + // Test initialization at session boundary. + // verify voter initialized with all sessions pending, first one starting at block 5 (start of + // session containing `custom_pallet_genesis`). + let sessions = persisted_state.voting_oracle().sessions(); + // should have enqueued 6 sessions (every 5 blocks from 5 to 30) + assert_eq!(sessions.len(), 6); + assert_eq!(sessions[0].session_start(), 7); + assert_eq!(sessions[1].session_start(), 10); + assert_eq!(sessions[2].session_start(), 15); + assert_eq!(sessions[3].session_start(), 20); + assert_eq!(sessions[4].session_start(), 25); + assert_eq!(sessions[5].session_start(), 30); + let rounds = persisted_state.active_round().unwrap(); + assert_eq!(rounds.session_start(), custom_pallet_genesis); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + + // verify next vote target is mandatory block 7 (genesis) + assert_eq!(persisted_state.best_beefy_block(), 0); + assert_eq!(persisted_state.best_grandpa_number(), 30); + assert_eq!(persisted_state.voting_oracle().voting_target(), Some(custom_pallet_genesis)); + + // verify state also saved to db + assert!(verify_persisted_version(&*backend)); + let state = load_persistent(&*backend).unwrap().unwrap(); + assert_eq!(state, persisted_state); +} + #[tokio::test] async fn beefy_finalizing_after_pallet_genesis() { sp_tracing::try_init_simple(); @@ -1302,7 +1364,7 @@ async fn gossipped_finality_proofs() { // Only Alice and Bob are running the voter -> finality threshold not reached let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(&validators), 0).unwrap(); - let session_len = 30; + let session_len = 10; let min_block_delta = 1; let mut net = BeefyTestNet::new(3); @@ -1332,14 +1394,8 @@ async fn gossipped_finality_proofs() { let net = Arc::new(Mutex::new(net)); - // Pump net + Charlie gossip to see peers. - let timeout = Box::pin(tokio::time::sleep(Duration::from_millis(200))); - let gossip_engine_pump = &mut charlie_gossip_engine; - let pump_with_timeout = future::select(gossip_engine_pump, timeout); - run_until(pump_with_timeout, &net).await; - - // push 10 blocks - let hashes = net.lock().generate_blocks_and_sync(10, session_len, &validator_set, true).await; + // push 42 blocks + let hashes = net.lock().generate_blocks_and_sync(42, session_len, &validator_set, true).await; let peers = peers.into_iter().enumerate(); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index a239e34030c27f79ff6c8468e0e847851ef8792a..966c74103657b37b5f51234aae5b8c94b80400d2 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -456,6 +456,7 @@ where .filter(|genesis| *genesis == self.persisted_state.pallet_genesis) .ok_or(Error::ConsensusReset)?; + let mut new_session_added = false; if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen self.persisted_state.set_best_grandpa(header.clone()); @@ -475,9 +476,15 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); + new_session_added = true; } } + if new_session_added { + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; + } + // Update gossip validator votes filter. if let Err(e) = self .persisted_state @@ -604,6 +611,11 @@ where VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, }; + if block_num <= self.persisted_state.voting_oracle.best_beefy_block { + // we've already finalized this round before, short-circuit. + return Ok(()) + } + // Finalize inner round and update voting_oracle state. self.persisted_state.voting_oracle.finalize(block_num)?; @@ -629,7 +641,7 @@ where self.backend .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) }) { - error!( + debug!( target: LOG_TARGET, "🥩 Error {:?} on appending justification: {:?}", e, finality_proof ); @@ -648,7 +660,7 @@ where } /// Handle previously buffered justifications, that now land in the voting interval. - fn try_pending_justififactions(&mut self) -> Result<(), Error> { + fn try_pending_justifications(&mut self) -> Result<(), Error> { // Interval of blocks for which we can process justifications and votes right now. let (start, end) = self.voting_oracle().accepted_interval()?; // Process pending justifications. @@ -782,7 +794,7 @@ where fn process_new_state(&mut self) { // Handle pending justifications and/or votes for now GRANDPA finalized blocks. - if let Err(err) = self.try_pending_justififactions() { + if let Err(err) = self.try_pending_justifications() { debug!(target: LOG_TARGET, "🥩 {}", err); } @@ -843,15 +855,10 @@ where .fuse(), ); + self.process_new_state(); let error = loop { - // Act on changed 'state'. - self.process_new_state(); - // Mutable reference used to drive the gossip engine. let mut gossip_engine = &mut self.comms.gossip_engine; - // Use temp val and report after async section, - // to avoid having to Mutex-wrap `gossip_engine`. - let mut gossip_report: Option = None; // Wait for, and handle external events. // The branches below only change 'state', actual voting happens afterwards, @@ -879,10 +886,15 @@ where if let Err(err) = self.triage_incoming_justif(justif) { debug!(target: LOG_TARGET, "🥩 {}", err); } - gossip_report = Some(peer_report); + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + }, + ResponseInfo::PeerReport(peer_report) => { + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + continue; + }, + ResponseInfo::Pending => { + continue; }, - ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report), - ResponseInfo::Pending => (), } }, justif = block_import_justif.next() => { @@ -919,12 +931,15 @@ where }, // Process peer reports. report = self.comms.gossip_report_stream.next() => { - gossip_report = report; + if let Some(PeerReport { who, cost_benefit }) = report { + self.comms.gossip_engine.report(who, cost_benefit); + } + continue; }, } - if let Some(PeerReport { who, cost_benefit }) = gossip_report { - self.comms.gossip_engine.report(who, cost_benefit); - } + + // Act on changed 'state'. + self.process_new_state(); }; // return error _and_ `comms` that can be reused diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 472bdd1c5b820e5590d6d38c1e15add9c94e6fd6..85f98e7546e038fffa55802e8355105a93b1f64a 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -25,7 +25,7 @@ log = "0.4.17" parity-scale-codec = { version = "3.6.1", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0" fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } @@ -37,6 +37,7 @@ sc-consensus = { path = "../common" } sc-network = { path = "../../network" } sc-network-gossip = { path = "../../network-gossip" } sc-network-common = { path = "../../network/common" } +sc-network-sync = { path = "../../network/sync" } sc-telemetry = { path = "../../telemetry" } sc-utils = { path = "../../utils" } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index c6298bff969bd8db2818a255436e8993954addf0..a7daefaab8eb6d94583852621e5d114ee5d5fa4b 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -142,7 +142,7 @@ mod tests { RpcModule, }; use parity_scale_codec::{Decode, Encode}; - use sc_block_builder::{BlockBuilder, RecordProof}; + use sc_block_builder::BlockBuilderBuilder; use sc_consensus_grandpa::{ report, AuthorityId, FinalityProof, GrandpaJustification, GrandpaJustificationSender, }; @@ -335,21 +335,16 @@ mod tests { let peers = &[Ed25519Keyring::Alice]; let builder = TestClientBuilder::new(); - let backend = builder.backend(); let client = builder.build(); let client = Arc::new(client); - let built_block = BlockBuilder::new( - &*client, - client.info().best_hash, - client.info().best_number, - RecordProof::No, - Default::default(), - &*backend, - ) - .unwrap() - .build() - .unwrap(); + let built_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.info().best_hash) + .with_parent_block_number(client.info().best_number) + .build() + .unwrap() + .build() + .unwrap(); let block = built_block.block; let block_hash = block.hash(); diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index c0749858568f5a09bb25243de7c0c983fcecbd13..6d9e956b41beedf777d994ea6cb21ca8bdd2856c 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -59,7 +59,7 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; -use sc_network_common::sync::SyncEventStream; +use sc_network_sync::SyncEventStream; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 10c4772fc76d64078c1f0d5ac413d09c0da91bac..4a869d0f51520387a75ddb7be58c92b64e6b02f1 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -33,11 +33,9 @@ use sc_network::{ NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender, PeerId, ReputationChange, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent as SyncStreamEvent, SyncEventStream}, -}; +use sc_network_common::role::ObservedRole; use sc_network_gossip::Validator; +use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream}; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_consensus_grandpa::AuthorityList; diff --git a/substrate/client/consensus/grandpa/src/finality_proof.rs b/substrate/client/consensus/grandpa/src/finality_proof.rs index 8a8a688583e34740b4bf49ca500ff15ee7239b1c..80b6249ade86ca9d44e9ed1284c0e6189731c86c 100644 --- a/substrate/client/consensus/grandpa/src/finality_proof.rs +++ b/substrate/client/consensus/grandpa/src/finality_proof.rs @@ -261,7 +261,7 @@ mod tests { use super::*; use crate::{authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId}; use futures::executor::block_on; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{apply_aux, LockImportRun}; use sp_consensus::BlockOrigin; use sp_consensus_grandpa::GRANDPA_ENGINE_ID as ID; @@ -323,7 +323,14 @@ mod tests { let mut blocks = Vec::new(); for _ in 0..number_of_blocks { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); blocks.push(block); } diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index da621abd254caf90abf5dfab0511d5e868218168..a4584e6fc80791ae0de66f45bbfe49e155e1f5f9 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -471,9 +471,6 @@ where Client: ExecutorProvider + HeaderBackend, { fn get(&self) -> Result { - // This implementation uses the Grandpa runtime API instead of reading directly from the - // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of - // the chain, whereas the runtime API is backwards compatible. self.executor() .call( self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 0175f7d1b473cb8be88da088df4792099f015a31..644befe98853e260ffa5f31ebad5bb839d312248 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -54,7 +54,7 @@ use tokio::runtime::Handle; use authorities::AuthoritySet; use communication::grandpa_protocol_name; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; use sc_consensus::LongestChain; use sp_application_crypto::key_types::GRANDPA; @@ -897,8 +897,11 @@ async fn allows_reimporting_change_blocks() { let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_client(); - let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + let mut builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(full_client.chain_info().best_hash) + .fetch_parent_block_number(&*full_client) + .unwrap() + .build() .unwrap(); add_scheduled_change( @@ -942,8 +945,11 @@ async fn test_bad_justification() { let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_client(); - let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + let mut builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(full_client.chain_info().best_hash) + .fetch_parent_block_number(&*full_client) + .unwrap() + .build() .unwrap(); add_scheduled_change( @@ -1913,7 +1919,12 @@ async fn imports_justification_for_regular_blocks_on_import() { // create a new block (without importing it) let generate_block = |parent| { - let builder = full_client.new_block_at(parent, Default::default(), false).unwrap(); + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(parent) + .fetch_parent_block_number(&*full_client) + .unwrap() + .build() + .unwrap(); builder.build().unwrap().block }; @@ -2042,8 +2053,7 @@ async fn revert_prunes_authority_changes() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - type TestBlockBuilder<'a> = - BlockBuilder<'a, Block, PeersFullClient, substrate_test_runtime_client::Backend>; + type TestBlockBuilder<'a> = BlockBuilder<'a, Block, PeersFullClient>; let edit_block = |mut builder: TestBlockBuilder| { add_scheduled_change( &mut builder, diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index 27a91d54783705e1ff2beeac72bfa1621062a04e..e09780739c7353418a81488a25060eba0231ebfc 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -330,7 +330,7 @@ where #[cfg(test)] mod tests { use super::*; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sp_consensus::BlockOrigin; use sp_runtime::traits::Header as _; @@ -371,7 +371,14 @@ mod tests { let mut hashes = Vec::with_capacity(200); for _ in 0..200 { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; hashes.push(block.hash()); futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -414,7 +421,14 @@ mod tests { let n = 5; let mut hashes = Vec::with_capacity(n); for _ in 0..n { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; hashes.push(block.hash()); futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index ea8114eafd789ec00a909b5535f0659ceea1fbfe..a0fae6998f5a78fc300df1d2ec1d0d1121e5f15e 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -23,7 +23,7 @@ use crate::{ BlockNumberOps, GrandpaJustification, SharedAuthoritySet, }; use sc_client_api::Backend as ClientBackend; -use sc_network_common::sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sc_network_sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ @@ -322,7 +322,7 @@ mod tests { use crate::{AuthoritySetChanges, GrandpaJustification}; use parity_scale_codec::Encode; use rand::prelude::*; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_consensus_grandpa::GRANDPA_ENGINE_ID; @@ -348,7 +348,11 @@ mod tests { let mut authority_set_changes = Vec::new(); for n in 1..=100 { - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); let mut new_authorities = None; // we will trigger an authority set change every 10 blocks diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 41cd5f3127e8ead7508bb2a7c375aab9bc355892..e3608f6716c2631e5079c08e59ef991903b12d73 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -236,7 +236,7 @@ pub async fn run_instant_seal( // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { - create_empty: false, + create_empty: true, finalize: false, parent_hash: None, sender: None, diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index fee1afc96666bc55b11351bc200ca213ded7f346..261d52c0ede3292292cff4f47b1196f7ea55d8ff 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.17" cfg-if = "1.0" libc = "0.2.121" +parking_lot = "0.12.1" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index acc799061c27ca9ab7b96bde403f7e092d127bd6..8852532adbcaa40293f2d7dfd0a326572d3c1ca4 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -19,7 +19,9 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::runtime::{Store, StoreData}; +use std::sync::Arc; + +use crate::runtime::{InstanceCounter, ReleaseInstanceHandle, Store, StoreData}; use sc_executor_common::{ error::{Backtrace, Error, MessageWithBacktrace, Result, WasmError}, wasm_runtime::InvokeMethod, @@ -154,10 +156,19 @@ impl sc_allocator::Memory for MemoryWrapper<'_, C> { pub struct InstanceWrapper { instance: Instance, store: Store, + // NOTE: We want to decrement the instance counter *after* the store has been dropped + // to avoid a potential race condition, so this field must always be kept + // as the last field in the struct! + _release_instance_handle: ReleaseInstanceHandle, } impl InstanceWrapper { - pub(crate) fn new(engine: &Engine, instance_pre: &InstancePre) -> Result { + pub(crate) fn new( + engine: &Engine, + instance_pre: &InstancePre, + instance_counter: Arc, + ) -> Result { + let _release_instance_handle = instance_counter.acquire_instance(); let mut store = Store::new(engine, Default::default()); let instance = instance_pre.instantiate(&mut store).map_err(|error| { WasmError::Other(format!( @@ -172,7 +183,7 @@ impl InstanceWrapper { store.data_mut().memory = Some(memory); store.data_mut().table = table; - Ok(InstanceWrapper { instance, store }) + Ok(InstanceWrapper { instance, store, _release_instance_handle }) } /// Resolves a substrate entrypoint by the given name. diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index ae78137959be1a4799bce16d23d2654931be4c47..ac88663f4e7915e49262ee8186bfaf7d92f6064d 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -24,6 +24,7 @@ use crate::{ util::{self, replace_strategy_if_broken}, }; +use parking_lot::Mutex; use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::{Error, Result, WasmError}, @@ -42,6 +43,8 @@ use std::{ }; use wasmtime::{AsContext, Engine, Memory, Table}; +const MAX_INSTANCE_COUNT: u32 = 64; + #[derive(Default)] pub(crate) struct StoreData { /// This will only be set when we call into the runtime. @@ -73,11 +76,59 @@ enum Strategy { struct InstanceCreator { engine: Engine, instance_pre: Arc>, + instance_counter: Arc, } impl InstanceCreator { fn instantiate(&mut self) -> Result { - InstanceWrapper::new(&self.engine, &self.instance_pre) + InstanceWrapper::new(&self.engine, &self.instance_pre, self.instance_counter.clone()) + } +} + +/// A handle for releasing an instance acquired by [`InstanceCounter::acquire_instance`]. +pub(crate) struct ReleaseInstanceHandle { + counter: Arc, +} + +impl Drop for ReleaseInstanceHandle { + fn drop(&mut self) { + { + let mut counter = self.counter.counter.lock(); + *counter = counter.saturating_sub(1); + } + + self.counter.wait_for_instance.notify_one(); + } +} + +/// Keeps track on the number of parallel instances. +/// +/// The runtime cache keeps track on the number of parallel instances. The maximum number in the +/// cache is less than what we have configured as [`MAX_INSTANCE_COUNT`] for wasmtime. However, the +/// cache will create on demand instances if required. This instance counter will ensure that we are +/// blocking when we are trying to create too many instances. +#[derive(Default)] +pub(crate) struct InstanceCounter { + counter: Mutex, + wait_for_instance: parking_lot::Condvar, +} + +impl InstanceCounter { + /// Acquire an instance. + /// + /// Blocks if there is no free instance available. + /// + /// The returned [`ReleaseInstanceHandle`] should be dropped when the instance isn't used + /// anymore. + pub fn acquire_instance(self: Arc) -> ReleaseInstanceHandle { + let mut counter = self.counter.lock(); + + while *counter >= MAX_INSTANCE_COUNT { + self.wait_for_instance.wait(&mut counter); + } + *counter += 1; + + ReleaseInstanceHandle { counter: self.clone() } } } @@ -87,6 +138,7 @@ pub struct WasmtimeRuntime { engine: Engine, instance_pre: Arc>, instantiation_strategy: InternalInstantiationStrategy, + instance_counter: Arc, } impl WasmModule for WasmtimeRuntime { @@ -95,6 +147,7 @@ impl WasmModule for WasmtimeRuntime { InternalInstantiationStrategy::Builtin => Strategy::RecreateInstance(InstanceCreator { engine: self.engine.clone(), instance_pre: self.instance_pre.clone(), + instance_counter: self.instance_counter.clone(), }), }; @@ -277,7 +330,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result std::result::Result, TraitError> { self.sign::(key_type, public, msg) } + + fn ecdsa_bls377_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + /// Generate a new pair of paired-keys compatible with the '(ecdsa,bls377)' signature scheme. + /// + /// If `[seed]` is `Some` then the key will be ephemeral and stored in memory. + fn ecdsa_bls377_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + self.generate_new::(key_type, seed) + } + + fn ecdsa_bls377_sign( + &self, + key_type: KeyTypeId, + public: &ecdsa_bls377::Public, + msg: &[u8], + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) + } + } } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 05d8547d243ffa35f74783accddb7a0deac8e5db..e75c5f1baa86f641bdcf839563c5a995d19adb35 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -23,4 +23,4 @@ sp-runtime = { path = "../../../primitives/runtime" } anyhow = "1" [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index 010b48bb3d7da36914f40d106f08bb1938974e7c..5775b4cfe67cd49878508521ec72ba9af413a187 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -18,7 +18,7 @@ use crate::MmrGadget; use parking_lot::Mutex; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{ Backend as BackendT, BlockchainEvents, FinalityNotifications, ImportNotifications, StorageEventStream, StorageKey, @@ -125,7 +125,12 @@ impl MockClient { let mut client = self.client.lock(); let hash = client.expect_block_hash_from_id(&at).unwrap(); - let mut block_builder = client.new_block_at(hash, Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash) + .fetch_parent_block_number(&*client) + .unwrap() + .build() + .unwrap(); // Make sure the block has a different hash than its siblings block_builder .push_storage_change(b"name".to_vec(), Some(name.to_vec())) diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 73d2d3fa051e72725688b69469f431cfa9b90776..95e26a232c1dcf58d792c649db882722407077bd 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -24,6 +24,7 @@ tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-network = { path = "../network" } sc-network-common = { path = "../network/common" } +sc-network-sync = { path = "../network/sync" } sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 6a3790ee2b2b2b098dd0f039a67c281b9da0bf95..8f7d490757b3ec1400d0dce44a4d70e642ef45be 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -22,7 +22,7 @@ use crate::{ }; use sc_network::{event::Event, types::ProtocolName, ReputationChange}; -use sc_network_common::sync::SyncEvent; +use sc_network_sync::SyncEvent; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -338,7 +338,8 @@ mod tests { config::MultiaddrWithPeerId, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSenderError, NotificationSenderT as NotificationSender, }; - use sc_network_common::{role::ObservedRole, sync::SyncEventStream}; + use sc_network_common::role::ObservedRole; + use sc_network_sync::SyncEventStream; use sp_runtime::{ testing::H256, traits::{Block as BlockT, NumberFor}, diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index 5b02be5c23f69f630957434190d2da6a8a654309..a77141ec6f63b52cd04e8dd60a00243b49ec0570 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -71,7 +71,7 @@ use libp2p::{multiaddr, PeerId}; use sc_network::{ types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::sync::SyncEventStream; +use sc_network_sync::SyncEventStream; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::iter; diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 8b188176fc5e57f487ac74aaa7f9b3c179183846..7b0536addda4337eacfd9a116ced47253e1d7089 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -34,7 +34,7 @@ partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" smallvec = "1.11.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } diff --git a/substrate/client/network/bitswap/src/lib.rs b/substrate/client/network/bitswap/src/lib.rs index 7bb8b00306534c991a5e7fb317d9255952f24bc6..9200c2fa7726bbae679a884bdd119955364a585e 100644 --- a/substrate/client/network/bitswap/src/lib.rs +++ b/substrate/client/network/bitswap/src/lib.rs @@ -290,7 +290,7 @@ pub enum BitswapError { mod tests { use super::*; use futures::channel::oneshot; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use schema::bitswap::{ message::{wantlist::Entry, Wantlist}, Message as BitswapMessage, @@ -468,7 +468,11 @@ mod tests { #[tokio::test] async fn transaction_found() { let mut client = TestClientBuilder::with_tx_storage(u32::MAX).build(); - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); // encoded extrinsic: [161, .. , 2, 6, 16, 19, 55, 19, 56] let ext = ExtrinsicBuilder::new_indexed_call(vec![0x13, 0x37, 0x13, 0x38]).build(); diff --git a/substrate/client/network/common/src/role.rs b/substrate/client/network/common/src/role.rs index cd43f6655b72cc2d5f1b35c0187ebcfb7a2a9b8c..fd02c00e2324afb59e10c441b5018ea292149fad 100644 --- a/substrate/client/network/common/src/role.rs +++ b/substrate/client/network/common/src/role.rs @@ -16,6 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +// file-level lint whitelist to avoid problem with bitflags macro below +// TODO: can be dropped after an update to bitflags 2.4 +#![allow(clippy::bad_bit_mask)] + use codec::{self, Encode, EncodeLike, Input, Output}; /// Role that the peer sent to us during the handshake, with the addition of what our local node diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs index 4ca21221f87febc84231b34af73433a0e29d33c5..a910740aef64b0614f8eb61fa4c502ac0b2aec6f 100644 --- a/substrate/client/network/common/src/sync.rs +++ b/substrate/client/network/common/src/sync.rs @@ -19,150 +19,6 @@ //! Abstract interfaces and data structures related to network sync. pub mod message; -pub mod metrics; -pub mod warp; - -use crate::{role::Roles, types::ReputationChange}; -use futures::Stream; - -use libp2p_identity::PeerId; - -use message::{BlockAnnounce, BlockRequest, BlockResponse}; -use sc_consensus::{import_queue::RuntimeOrigin, IncomingBlock}; -use sp_consensus::BlockOrigin; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - Justifications, -}; -use warp::WarpSyncProgress; - -use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; - -/// The sync status of a peer we are trying to sync with -#[derive(Debug)] -pub struct PeerInfo { - /// Their best block hash. - pub best_hash: Block::Hash, - /// Their best block number. - pub best_number: NumberFor, -} - -/// Info about a peer's known state (both full and light). -#[derive(Clone, Debug)] -pub struct ExtendedPeerInfo { - /// Roles - pub roles: Roles, - /// Peer best block hash - pub best_hash: B::Hash, - /// Peer best block number - pub best_number: NumberFor, -} - -/// Reported sync state. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading { target: BlockNumber }, - /// All blocks are downloaded and are being imported. - Importing { target: BlockNumber }, -} - -impl SyncState { - /// Are we actively catching up with the chain? - pub fn is_major_syncing(&self) -> bool { - !matches!(self, SyncState::Idle) - } -} - -/// Reported state download progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct StateDownloadProgress { - /// Estimated download percentage. - pub percentage: u32, - /// Total state size in bytes downloaded so far. - pub size: u64, -} - -/// Syncing status and statistics. -#[derive(Debug, Clone)] -pub struct SyncStatus { - /// Current global sync state. - pub state: SyncState>, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, - /// Number of peers known to `SyncingEngine` (both full and light). - pub num_connected_peers: u32, - /// Number of blocks queued for import - pub queued_blocks: u32, - /// State sync status in progress, if any. - pub state_sync: Option, - /// Warp sync in progress, if any. - pub warp_sync: Option>, -} - -/// A peer did not behave as expected and should be reported. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BadPeer(pub PeerId, pub ReputationChange); - -impl fmt::Display for BadPeer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) - } -} - -impl std::error::Error for BadPeer {} - -/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ImportBlocksAction { - pub origin: BlockOrigin, - pub blocks: Vec>, -} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockData { - /// The block should be imported. - Import(ImportBlocksAction), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, -} - -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { - peer_id: PeerId, - hash: Block::Hash, - number: NumberFor, - justifications: Justifications, - }, -} - -/// Result of `ChainSync::on_state_data`. -#[derive(Debug)] -pub enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Block or justification request polled from `ChainSync` -#[derive(Debug)] -pub enum ImportResult { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), -} /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -197,201 +53,3 @@ impl Default for SyncMode { Self::Full } } -#[derive(Debug)] -pub struct Metrics { - pub queued_blocks: u32, - pub fork_targets: u32, - pub justifications: metrics::Metrics, -} - -#[derive(Debug)] -pub enum PeerRequest { - Block(BlockRequest), - State, - WarpProof, -} - -#[derive(Debug)] -pub enum PeerRequestType { - Block, - State, - WarpProof, -} - -impl PeerRequest { - pub fn get_type(&self) -> PeerRequestType { - match self { - PeerRequest::Block(_) => PeerRequestType::Block, - PeerRequest::State => PeerRequestType::State, - PeerRequest::WarpProof => PeerRequestType::WarpProof, - } - } -} - -/// Wrapper for implementation-specific state request. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateRequest(pub Box); - -impl fmt::Debug for OpaqueStateRequest { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateRequest").finish() - } -} - -/// Wrapper for implementation-specific state response. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateResponse(pub Box); - -impl fmt::Debug for OpaqueStateResponse { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateResponse").finish() - } -} - -/// Provides high-level status of syncing. -#[async_trait::async_trait] -pub trait SyncStatusProvider: Send + Sync { - /// Get high-level view of the syncing status. - async fn status(&self) -> Result, ()>; -} - -#[async_trait::async_trait] -impl SyncStatusProvider for Arc -where - T: ?Sized, - T: SyncStatusProvider, - Block: BlockT, -{ - async fn status(&self) -> Result, ()> { - T::status(self).await - } -} - -/// Syncing-related events that other protocols can subscribe to. -pub enum SyncEvent { - /// Peer that the syncing implementation is tracking connected. - PeerConnected(PeerId), - - /// Peer that the syncing implementation was tracking disconnected. - PeerDisconnected(PeerId), -} - -pub trait SyncEventStream: Send + Sync { - /// Subscribe to syncing-related events. - fn event_stream(&self, name: &'static str) -> Pin + Send>>; -} - -impl SyncEventStream for Arc -where - T: ?Sized, - T: SyncEventStream, -{ - fn event_stream(&self, name: &'static str) -> Pin + Send>> { - T::event_stream(self, name) - } -} - -/// Something that represents the syncing strategy to download past and future blocks of the chain. -pub trait ChainSync: Send { - /// Returns the state of the sync of the given peer. - /// - /// Returns `None` if the peer is unknown. - fn peer_info(&self, who: &PeerId) -> Option>; - - /// Returns the current sync status. - fn status(&self) -> SyncStatus; - - /// Number of active forks requests. This includes - /// requests that are pending or could be issued right away. - fn num_sync_requests(&self) -> usize; - - /// Number of downloaded blocks. - fn num_downloaded_blocks(&self) -> usize; - - /// Returns the current number of peers stored within this state machine. - fn num_peers(&self) -> usize; - - /// Handle a new connected peer. - /// - /// Call this method whenever we connect to a new peer. - #[must_use] - fn new_peer( - &mut self, - who: PeerId, - best_hash: Block::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer>; - - /// Signal that a new best block has been imported. - fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); - - /// Schedule a justification request for the given block. - fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); - - /// Clear all pending justification requests. - fn clear_justification_requests(&mut self); - - /// Request syncing for the given block from given set of peers. - fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &Block::Hash, - number: NumberFor, - ); - - /// Handle a response from the remote to a block request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// or `None` if data comes from the block announcement. - /// - /// If this corresponds to a valid block, this outputs the block that - /// must be imported in the import queue. - #[must_use] - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer>; - - /// Handle a response from the remote to a justification request that we made. - /// - /// `request` must be the original request that triggered `response`. - #[must_use] - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer>; - - /// Call this when a justification has been processed by the import queue, - /// with or without errors. - fn on_justification_import( - &mut self, - hash: Block::Hash, - number: NumberFor, - success: bool, - ); - - /// Notify about finalization of the given block. - fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - - /// Notify about pre-validated block announcement. - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ); - - /// Call when a peer has disconnected. - /// Canceled obsolete block request may result in some blocks being ready for - /// import, so this functions checks for such blocks and returns them. - #[must_use] - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; - - /// Return some key metrics. - fn metrics(&self) -> Metrics; -} diff --git a/substrate/client/network/common/src/sync/warp.rs b/substrate/client/network/common/src/sync/warp.rs deleted file mode 100644 index f4e39f438512889742c13f3081fe66554f2688ae..0000000000000000000000000000000000000000 --- a/substrate/client/network/common/src/sync/warp.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use codec::{Decode, Encode}; -pub use sp_consensus_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::fmt; - -/// Scale-encoded warp sync proof response. -pub struct EncodedProof(pub Vec); - -/// Warp sync request -#[derive(Encode, Decode, Debug)] -pub struct WarpProofRequest { - /// Start collecting proofs from this block. - pub begin: B::Hash, -} - -/// Proof verification result. -pub enum VerificationResult { - /// Proof is valid, but the target was not reached. - Partial(SetId, AuthorityList, Block::Hash), - /// Target finality is proved. - Complete(SetId, AuthorityList, Block::Header), -} - -/// Warp sync backend. Handles retrieving and verifying warp sync proofs. -pub trait WarpSyncProvider: Send + Sync { - /// Generate proof starting at given block hash. The proof is accumulated until maximum proof - /// size is reached. - fn generate( - &self, - start: Block::Hash, - ) -> Result>; - /// Verify warp proof against current set of authorities. - fn verify( - &self, - proof: &EncodedProof, - set_id: SetId, - authorities: AuthorityList, - ) -> Result, Box>; - /// Get current list of authorities. This is supposed to be genesis authorities when starting - /// sync. - fn current_authorities(&self) -> AuthorityList; -} - -/// Reported warp sync phase. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum WarpSyncPhase { - /// Waiting for peers to connect. - AwaitingPeers { required_peers: usize }, - /// Waiting for target block to be received. - AwaitingTargetBlock, - /// Downloading and verifying grandpa warp proofs. - DownloadingWarpProofs, - /// Downloading target block. - DownloadingTargetBlock, - /// Downloading state data. - DownloadingState, - /// Importing state. - ImportingState, - /// Downloading block history. - DownloadingBlocks(NumberFor), -} - -impl fmt::Display for WarpSyncPhase { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::AwaitingPeers { required_peers } => - write!(f, "Waiting for {required_peers} peers to be connected"), - Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"), - Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), - Self::DownloadingTargetBlock => write!(f, "Downloading target block"), - Self::DownloadingState => write!(f, "Downloading state"), - Self::ImportingState => write!(f, "Importing state"), - Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), - } - } -} - -/// Reported warp sync progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct WarpSyncProgress { - /// Estimated download percentage. - pub phase: WarpSyncPhase, - /// Total bytes downloaded so far. - pub total_bytes: u64, -} diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index d069c3f458ff5d0391286687f10a08aefc5793fc..124d73a74dbce43b4c4f004c93ef0c43d31c7262 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -30,7 +30,11 @@ pub use crate::{ types::ProtocolName, }; -pub use libp2p::{identity::Keypair, multiaddr, Multiaddr, PeerId}; +pub use libp2p::{ + build_multiaddr, + identity::{self, ed25519, Keypair}, + multiaddr, Multiaddr, PeerId, +}; use crate::peer_store::PeerStoreHandle; use codec::Encode; @@ -39,9 +43,10 @@ use zeroize::Zeroize; pub use sc_network_common::{ role::{Role, Roles}, - sync::{warp::WarpSyncProvider, SyncMode}, + sync::SyncMode, ExHashT, }; + use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; @@ -58,11 +63,6 @@ use std::{ str::{self, FromStr}, }; -pub use libp2p::{ - build_multiaddr, - identity::{self, ed25519}, -}; - /// Protocol name prefix, transmitted on the wire for legacy protocol names. /// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8. /// Deprecated in favour of genesis hash & fork ID based protocol names. diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index ee307596878417c0de2e58305ce73f49c2744944..4dc9bdb4cc1c7244fd562acc01c18aa3f6c43f21 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -266,14 +266,7 @@ pub use event::{DhtEvent, Event, SyncEvent}; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use request_responses::{Config, IfDisconnected, RequestFailure}; -pub use sc_network_common::{ - role::ObservedRole, - sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, - }, - types::ReputationChange, -}; +pub use sc_network_common::{role::ObservedRole, types::ReputationChange}; pub use service::{ signature::Signature, traits::{ diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 89513e004c6df701d6bfc93f023282629abac782..b78f15f8529c6e1ef7bb338a2d41811c47be8700 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -1423,7 +1423,6 @@ impl NetworkBehaviour for Notifications { let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(ban_duration); - let peer_id = peer_id; self.delays.push( async move { delay.await; diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index adfb2d6a05fb97c87b7bf1ff653b641ee1acc52c..ef974b4f33f1931b632f8760cd79f975b3970941 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -21,6 +21,7 @@ libp2p = "0.51.3" log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network-common = { path = "../common" } +sc-network-sync = { path = "../sync" } sc-network = { path = ".." } sp-consensus = { path = "../../../primitives/consensus/common" } sp-statement-store = { path = "../../../primitives/statement-store" } diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs index c5d83b59b260ae7edb1bd1d93b8e7002cf97cb8b..69d4faa13ef287ac41427cd87e2c1e846e728bab 100644 --- a/substrate/client/network/statement/src/lib.rs +++ b/substrate/client/network/statement/src/lib.rs @@ -39,10 +39,8 @@ use sc_network::{ utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent, SyncEventStream}, -}; +use sc_network_common::role::ObservedRole; +use sc_network_sync::{SyncEvent, SyncEventStream}; use sp_statement_store::{ Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, }; diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 39312cc4b327dbc024a8cc3d3a9b934ae6bee315..a1ea39a852fc18e087234cdffa4dcad7af663eb4 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -30,6 +30,7 @@ schnellru = "0.2.1" smallvec = "1.11.0" thiserror = "1.0" tokio-stream = "0.1.14" +tokio = { version = "1.32.0", features = ["time", "macros"] } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/network/sync/src/block_announce_validator.rs b/substrate/client/network/sync/src/block_announce_validator.rs index f083f9e29e44f46c038efd849c292aff9ad720c7..961b581cddcefa2d2f641ca53c37d9fedbde52c1 100644 --- a/substrate/client/network/sync/src/block_announce_validator.rs +++ b/substrate/client/network/sync/src/block_announce_validator.rs @@ -16,10 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! `BlockAnnounceValidator` is responsible for async validation of block announcements. +//! [`BlockAnnounceValidator`] is responsible for async validation of block announcements. +//! [`Stream`] implemented by [`BlockAnnounceValidator`] never terminates. use crate::futures_stream::FuturesStream; -use futures::{Future, FutureExt, Stream, StreamExt}; +use futures::{stream::FusedStream, Future, FutureExt, Stream, StreamExt}; use libp2p::PeerId; use log::{debug, error, trace, warn}; use sc_network_common::sync::message::BlockAnnounce; @@ -300,6 +301,13 @@ impl Stream for BlockAnnounceValidator { } } +// As [`BlockAnnounceValidator`] never terminates, we can easily implement [`FusedStream`] for it. +impl FusedStream for BlockAnnounceValidator { + fn is_terminated(&self) -> bool { + false + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/network/sync/src/block_request_handler.rs b/substrate/client/network/sync/src/block_request_handler.rs index c24083f6328783ef51f81a6806eca91772992820..f363dda3a2d18b3f98d00011e32ac0c3b15c80c0 100644 --- a/substrate/client/network/sync/src/block_request_handler.rs +++ b/substrate/client/network/sync/src/block_request_handler.rs @@ -24,7 +24,6 @@ use crate::{ BlockResponse as BlockResponseSchema, BlockResponse, Direction, }, service::network::NetworkServiceHandle, - MAX_BLOCKS_IN_RESPONSE, }; use codec::{Decode, DecodeAll, Encode}; @@ -54,6 +53,9 @@ use std::{ time::Duration, }; +/// Maximum blocks per response. +pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; + const LOG_TARGET: &str = "sync"; const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs index 240c1ca1f8b26aba92f2d8db53be9684d69fee5d..539a8a5d612cb7f210aa7349660e9b39ac6f570c 100644 --- a/substrate/client/network/sync/src/blocks.rs +++ b/substrate/client/network/sync/src/blocks.rs @@ -123,20 +123,36 @@ impl BlockCollection { let first_different = common + >::one(); let count = (count as u32).into(); let (mut range, downloading) = { + // Iterate through the ranges in `self.blocks` looking for a range to download let mut downloading_iter = self.blocks.iter().peekable(); let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; loop { let next = downloading_iter.next(); break match (prev, next) { + // If we are already downloading this range, request it from `max_parallel` + // peers (`max_parallel = 5` by default). + // Do not request already downloading range from peers with common number above + // the range start. (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) - if downloading < max_parallel => + if downloading < max_parallel && *start >= first_different => (*start..*start + *len, downloading), - (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap - (Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */ - (None, None) => (first_different..first_different + count, 0), /* empty */ + // If there is a gap between ranges requested, download this gap unless the peer + // has common number above the gap start + (Some((start, r)), Some((next_start, _))) + if *start + r.len() < *next_start && + *start + r.len() >= first_different => + (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), + // Download `count` blocks after the last range requested unless the peer + // has common number above this new range + (Some((start, r)), None) if *start + r.len() >= first_different => + (*start + r.len()..*start + r.len() + count, 0), + // If there are no ranges currently requested, download `count` blocks after + // `common` number + (None, None) => (first_different..first_different + count, 0), + // If the first range starts above `common + 1`, download the gap at the start (None, Some((start, _))) if *start > first_different => - (first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */ + (first_different..cmp::min(first_different + count, *start), 0), + // Move on to the next range pair _ => { prev = next; continue @@ -365,10 +381,10 @@ mod test { bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0, 128, 10000, 000, 1, 200), Some(1..100)); - assert_eq!(bc.needed_blocks(peer0, 128, 10000, 600, 1, 200), None); // too far ahead + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 0, 1, 200), Some(1..100)); + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 0, 1, 200), None); // too far ahead assert_eq!( - bc.needed_blocks(peer0, 128, 10000, 600, 1, 200000), + bc.needed_blocks(peer0, 128, 10000, 0, 1, 200000), Some(100 + 128..100 + 128 + 128) ); } @@ -431,4 +447,202 @@ mod test { assert!(bc.blocks.is_empty()); assert!(bc.queued_blocks.is_empty()); } + + #[test] + fn downloaded_range_is_requested_from_max_parallel_peers() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + // identical ranges requested from 2 peers + let max_parallel = 2; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + + // common for all peers + let best = 100; + let common = 10; + + assert_eq!( + bc.needed_blocks(peer1, count, best, common, max_parallel, max_ahead), + Some(11..16) + ); + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(11..16) + ); + assert_eq!( + bc.needed_blocks(peer3, count, best, common, max_parallel, max_ahead), + Some(16..21) + ); + } + #[test] + fn downloaded_range_not_requested_from_peers_with_higher_common_number() { + // A peer connects with a common number falling behind our best number + // (either a fork or lagging behind). + // We request a range from this peer starting at its common number + 1. + // Even though we have less than `max_parallel` downloads, we do not request + // this range from peers with a common number above the start of this range. + + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + let max_parallel = 2; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer1_best = 20; + let peer1_common = 10; + + // `peer2` has first different above the start of the range downloaded from `peer1` + let peer2 = PeerId::random(); + let peer2_best = 20; + let peer2_common = 11; // first_different = 12 + + assert_eq!( + bc.needed_blocks(peer1, count, peer1_best, peer1_common, max_parallel, max_ahead), + Some(11..16), + ); + assert_eq!( + bc.needed_blocks(peer2, count, peer2_best, peer2_common, max_parallel, max_ahead), + Some(16..21), + ); + } + + #[test] + fn gap_above_common_number_requested() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + let best = 30; + // We need at least 3 ranges requested to have a gap, so to minimize the number of peers + // set `max_parallel = 1` + let max_parallel = 1; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + + let common = 10; + assert_eq!( + bc.needed_blocks(peer1, count, best, common, max_parallel, max_ahead), + Some(11..16), + ); + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(16..21), + ); + assert_eq!( + bc.needed_blocks(peer3, count, best, common, max_parallel, max_ahead), + Some(21..26), + ); + + // For some reason there is now a gap at 16..21. We just disconnect `peer2`, but it might + // also happen that 16..21 received first and got imported if our best is actually >= 15. + bc.clear_peer_download(&peer2); + + // Some peer connects with common number below the gap. The gap is requested from it. + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(16..21), + ); + } + + #[test] + fn gap_below_common_number_not_requested() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + let best = 30; + // We need at least 3 ranges requested to have a gap, so to minimize the number of peers + // set `max_parallel = 1` + let max_parallel = 1; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + + let common = 10; + assert_eq!( + bc.needed_blocks(peer1, count, best, common, max_parallel, max_ahead), + Some(11..16), + ); + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(16..21), + ); + assert_eq!( + bc.needed_blocks(peer3, count, best, common, max_parallel, max_ahead), + Some(21..26), + ); + + // For some reason there is now a gap at 16..21. We just disconnect `peer2`, but it might + // also happen that 16..21 received first and got imported if our best is actually >= 15. + bc.clear_peer_download(&peer2); + + // Some peer connects with common number above the gap. The gap is not requested from it. + let common = 23; + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(26..31), // not 16..21 + ); + } + + #[test] + fn range_at_the_end_above_common_number_requested() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + let best = 30; + let max_parallel = 1; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let common = 10; + assert_eq!( + bc.needed_blocks(peer1, count, best, common, max_parallel, max_ahead), + Some(11..16), + ); + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(16..21), + ); + } + + #[test] + fn range_at_the_end_below_common_number_not_requested() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + + let count = 5; + let best = 30; + let max_parallel = 1; + let max_ahead = 200; + + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let common = 10; + assert_eq!( + bc.needed_blocks(peer1, count, best, common, max_parallel, max_ahead), + Some(11..16), + ); + + let common = 20; + assert_eq!( + bc.needed_blocks(peer2, count, best, common, max_parallel, max_ahead), + Some(21..26), // not 16..21 + ); + } } diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs new file mode 100644 index 0000000000000000000000000000000000000000..3825cfa33f73bd4a77f51521d048ae4d512e621e --- /dev/null +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -0,0 +1,2422 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Contains the state of the chain synchronization process +//! +//! At any given point in time, a running node tries as much as possible to be at the head of the +//! chain. This module handles the logic of which blocks to request from remotes, and processing +//! responses. It yields blocks to check and potentially move to the database. +//! +//! # Usage +//! +//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on +//! the network, or whenever a block has been successfully verified, call the appropriate method in +//! order to update it. + +use crate::{ + blocks::BlockCollection, + extra_requests::ExtraRequests, + schema::v1::StateResponse, + state::{ImportResult, StateSync}, + types::{ + BadPeer, Metrics, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, SyncState, + SyncStatus, + }, + warp::{ + self, EncodedProof, WarpProofImportResult, WarpProofRequest, WarpSync, WarpSyncConfig, + WarpSyncPhase, WarpSyncProgress, + }, +}; + +use codec::Encode; +use libp2p::PeerId; +use log::{debug, error, info, trace, warn}; + +use sc_client_api::{BlockBackend, ProofProvider}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_network_common::sync::message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, +}; +use sp_arithmetic::traits::Saturating; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{BlockOrigin, BlockStatus}; +use sp_runtime::{ + traits::{ + Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, + }, + EncodedJustification, Justifications, +}; + +use std::{ + collections::{HashMap, HashSet}, + ops::Range, + sync::Arc, +}; + +#[cfg(test)] +mod test; + +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + +/// Maximum blocks to store in the import queue. +const MAX_IMPORTING_BLOCKS: usize = 2048; + +/// Maximum blocks to download ahead of any gap. +const MAX_DOWNLOAD_AHEAD: u32 = 2048; + +/// Maximum blocks to look backwards. The gap is the difference between the highest block and the +/// common block of a node. +const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; + +/// Pick the state to sync as the latest finalized number minus this. +const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; + +/// We use a heuristic that with a high likelihood, by the time +/// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same +/// chain as (or at least closer to) the peer so we want to delay +/// the ancestor search to not waste time doing that when we are +/// so far behind. +const MAJOR_SYNC_BLOCKS: u8 = 5; + +/// Number of peers that need to be connected before warp sync is started. +const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; + +mod rep { + use sc_network::ReputationChange as Rep; + /// Reputation change when a peer sent us a message that led to a + /// database read error. + pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Reputation change when a peer sent us a status message with a different + /// genesis than us. + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); + + /// Reputation change for peers which send us a block with an incomplete header. + pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); + + /// Reputation change for peers which send us a block which we fail to verify. + pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); + + /// Reputation change for peers which send us a known bad block. + pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); + + /// Peer did not provide us with advertised block data. + pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); + + /// Reputation change for peers which send us non-requested block data. + pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); + + /// Reputation change for peers which send us a block with bad justifications. + pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); + + /// Reputation change when a peer sent us invlid ancestry result. + pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); +} + +enum AllowedRequests { + Some(HashSet), + All, +} + +impl AllowedRequests { + fn add(&mut self, id: &PeerId) { + if let Self::Some(ref mut set) = self { + set.insert(*id); + } + } + + fn take(&mut self) -> Self { + std::mem::take(self) + } + + fn set_all(&mut self) { + *self = Self::All; + } + + fn contains(&self, id: &PeerId) -> bool { + match self { + Self::Some(set) => set.contains(id), + Self::All => true, + } + } + + fn is_empty(&self) -> bool { + match self { + Self::Some(set) => set.is_empty(), + Self::All => false, + } + } + + fn clear(&mut self) { + std::mem::take(self); + } +} + +impl Default for AllowedRequests { + fn default() -> Self { + Self::Some(HashSet::default()) + } +} + +struct GapSync { + blocks: BlockCollection, + best_queued_number: NumberFor, + target: NumberFor, +} + +/// Action that the parent of [`ChainSync`] should perform after reporting a network or block event. +#[derive(Debug)] +pub enum ChainSyncAction { + /// Send block request to peer. Always implies dropping a stale block request to the same peer. + SendBlockRequest { peer_id: PeerId, request: BlockRequest }, + /// Drop stale block request. + CancelBlockRequest { peer_id: PeerId }, + /// Send state request to peer. + SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, + /// Send warp proof request to peer. + SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest }, + /// Peer misbehaved. Disconnect, report it and cancel the block request to it. + DropPeer(BadPeer), + /// Import blocks. + ImportBlocks { origin: BlockOrigin, blocks: Vec> }, + /// Import justifications. + ImportJustifications { + peer_id: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + }, +} + +/// The main data structure which contains all the state for a chains +/// active syncing strategy. +pub struct ChainSync { + /// Chain client. + client: Arc, + /// The active peers that we are using to sync and their PeerSync status + peers: HashMap>, + /// A `BlockCollection` of blocks that are being downloaded from peers + blocks: BlockCollection, + /// The best block number in our queue of blocks to import + best_queued_number: NumberFor, + /// The best block hash in our queue of blocks to import + best_queued_hash: B::Hash, + /// Current mode (full/light) + mode: SyncMode, + /// Any extra justification requests. + extra_justifications: ExtraRequests, + /// A set of hashes of blocks that are being downloaded or have been + /// downloaded and are queued for import. + queue_blocks: HashSet, + /// Fork sync targets. + fork_targets: HashMap>, + /// A set of peers for which there might be potential block requests + allowed_requests: AllowedRequests, + /// Maximum number of peers to ask the same blocks in parallel. + max_parallel_downloads: u32, + /// Maximum blocks per request. + max_blocks_per_request: u32, + /// Total number of downloaded blocks. + downloaded_blocks: usize, + /// State sync in progress, if any. + state_sync: Option>, + /// Warp sync in progress, if any. + warp_sync: Option>, + /// Warp sync configuration. + /// + /// Will be `None` after `self.warp_sync` is `Some(_)`. + warp_sync_config: Option>, + /// A temporary storage for warp sync target block until warp sync is initialized. + warp_sync_target_block_header: Option, + /// Enable importing existing blocks. This is used used after the state download to + /// catch up to the latest state while re-importing blocks. + import_existing: bool, + /// Gap download process. + gap_sync: Option>, + /// Pending actions. + actions: Vec>, +} + +/// All the data we have about a Peer that we are trying to sync with +#[derive(Debug, Clone)] +pub(crate) struct PeerSync { + /// Peer id of this peer. + pub peer_id: PeerId, + /// The common number is the block number that is a common point of + /// ancestry for both our chains (as far as we know). + pub common_number: NumberFor, + /// The hash of the best block that we've seen for this peer. + pub best_hash: B::Hash, + /// The number of the best block that we've seen for this peer. + pub best_number: NumberFor, + /// The state of syncing this peer is in for us, generally categories + /// into `Available` or "busy" with something as defined by `PeerSyncState`. + pub state: PeerSyncState, +} + +impl PeerSync { + /// Update the `common_number` iff `new_common > common_number`. + fn update_common_number(&mut self, new_common: NumberFor) { + if self.common_number < new_common { + trace!( + target: LOG_TARGET, + "Updating peer {} common number from={} => to={}.", + self.peer_id, + self.common_number, + new_common, + ); + self.common_number = new_common; + } + } +} + +struct ForkTarget { + number: NumberFor, + parent_hash: Option, + peers: HashSet, +} + +/// The state of syncing between a Peer and ourselves. +/// +/// Generally two categories, "busy" or `Available`. If busy, the enum +/// defines what we are busy with. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum PeerSyncState { + /// Available for sync requests. + Available, + /// Searching for ancestors the Peer has in common with us. + AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, + /// Actively downloading new blocks, starting from the given Number. + DownloadingNew(NumberFor), + /// Downloading a stale block with given Hash. Stale means that it is a + /// block with a number that is lower than our best number. It might be + /// from a fork and not necessarily already imported. + DownloadingStale(B::Hash), + /// Downloading justification for given block hash. + DownloadingJustification(B::Hash), + /// Downloading state. + DownloadingState, + /// Downloading warp proof. + DownloadingWarpProof, + /// Downloading warp sync target block. + DownloadingWarpTargetBlock, + /// Actively downloading block history after warp sync. + DownloadingGap(NumberFor), +} + +impl PeerSyncState { + pub fn is_available(&self) -> bool { + matches!(self, Self::Available) + } +} + +impl ChainSync +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Create a new instance. + pub fn new( + mode: SyncMode, + client: Arc, + max_parallel_downloads: u32, + max_blocks_per_request: u32, + warp_sync_config: Option>, + ) -> Result { + let mut sync = Self { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), + extra_justifications: ExtraRequests::new("justification"), + mode, + queue_blocks: Default::default(), + fork_targets: Default::default(), + allowed_requests: Default::default(), + max_parallel_downloads, + max_blocks_per_request, + downloaded_blocks: 0, + state_sync: None, + warp_sync: None, + import_existing: false, + gap_sync: None, + warp_sync_config, + warp_sync_target_block_header: None, + actions: Vec::new(), + }; + + sync.reset_sync_start_point()?; + Ok(sync) + } + + /// Get peer's best hash & number. + pub fn peer_info(&self, peer_id: &PeerId) -> Option> { + self.peers + .get(peer_id) + .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) + } + + /// Returns the current sync status. + pub fn status(&self) -> SyncStatus { + let median_seen = self.median_seen(); + let best_seen_block = + median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); + let sync_state = if let Some(target) = median_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing + // if the same can be said about queued blocks. + let best_block = self.client.info().best_number; + if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { + // If target is not queued, we're downloading, otherwise importing. + if target > self.best_queued_number { + SyncState::Downloading { target } + } else { + SyncState::Importing { target } + } + } else { + SyncState::Idle + } + } else { + SyncState::Idle + }; + + let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) { + (_, _, Some(gap_sync)) => Some(WarpSyncProgress { + phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), + total_bytes: 0, + }), + (None, SyncMode::Warp, _) => Some(WarpSyncProgress { + phase: WarpSyncPhase::AwaitingPeers { + required_peers: MIN_PEERS_TO_START_WARP_SYNC, + }, + total_bytes: 0, + }), + (Some(sync), _, _) => Some(sync.progress()), + _ => None, + }; + + SyncStatus { + state: sync_state, + best_seen_block, + num_peers: self.peers.len() as u32, + num_connected_peers: 0u32, + queued_blocks: self.queue_blocks.len() as u32, + state_sync: self.state_sync.as_ref().map(|s| s.progress()), + warp_sync: warp_sync_progress, + } + } + + /// Get an estimate of the number of parallel sync requests. + pub fn num_sync_requests(&self) -> usize { + self.fork_targets + .values() + .filter(|f| f.number <= self.best_queued_number) + .count() + } + + /// Get the total number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.downloaded_blocks + } + + /// Get the number of peers known to the syncing state machine. + pub fn num_peers(&self) -> usize { + self.peers.len() + } + + /// Notify syncing state machine that a new sync peer has connected. + pub fn new_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + match self.new_peer_inner(peer_id, best_hash, best_number) { + Ok(Some(request)) => + self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), + Ok(None) => {}, + Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), + } + } + + #[must_use] + fn new_peer_inner( + &mut self, + peer_id: PeerId, + best_hash: B::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer> { + // There is nothing sync can get from the node that has no blockchain data. + match self.block_status(&best_hash) { + Err(e) => { + debug!(target:LOG_TARGET, "Error reading blockchain: {e}"); + Err(BadPeer(peer_id, rep::BLOCKCHAIN_READ_ERROR)) + }, + Ok(BlockStatus::KnownBad) => { + info!( + "💔 New peer {peer_id} with known bad best block {best_hash} ({best_number})." + ); + Err(BadPeer(peer_id, rep::BAD_BLOCK)) + }, + Ok(BlockStatus::Unknown) => { + if best_number.is_zero() { + info!( + "💔 New peer {} with unknown genesis hash {} ({}).", + peer_id, best_hash, best_number, + ); + return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)) + } + + // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have + // enough to do in the import queue that it's not worth kicking off + // an ancestor search, which is what we do in the next match case below. + if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { + debug!( + target:LOG_TARGET, + "New peer {} with unknown best hash {} ({}), assuming common block.", + peer_id, + self.best_queued_hash, + self.best_queued_number + ); + self.peers.insert( + peer_id, + PeerSync { + peer_id, + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); + return Ok(None) + } + + // If we are at genesis, just start downloading. + let (state, req) = if self.best_queued_number.is_zero() { + debug!( + target:LOG_TARGET, + "New peer {peer_id} with best hash {best_hash} ({best_number}).", + ); + + (PeerSyncState::Available, None) + } else { + let common_best = std::cmp::min(self.best_queued_number, best_number); + + debug!( + target:LOG_TARGET, + "New peer {} with unknown best hash {} ({}), searching for common ancestor.", + peer_id, + best_hash, + best_number + ); + + ( + PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + Some(ancestry_request::(common_best)), + ) + }; + + self.allowed_requests.add(&peer_id); + self.peers.insert( + peer_id, + PeerSync { + peer_id, + common_number: Zero::zero(), + best_hash, + best_number, + state, + }, + ); + + if let SyncMode::Warp = self.mode { + if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() + { + log::debug!(target: LOG_TARGET, "Starting warp state sync."); + + if let Some(config) = self.warp_sync_config.take() { + let mut warp_sync = WarpSync::new(self.client.clone(), config); + if let Some(header) = self.warp_sync_target_block_header.take() { + warp_sync.set_target_block(header); + } + self.warp_sync = Some(warp_sync); + } + } + } + Ok(req) + }, + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { + debug!( + target: LOG_TARGET, + "New peer {peer_id} with known best hash {best_hash} ({best_number}).", + ); + self.peers.insert( + peer_id, + PeerSync { + peer_id, + common_number: std::cmp::min(self.best_queued_number, best_number), + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); + self.allowed_requests.add(&peer_id); + Ok(None) + }, + } + } + + /// Inform sync about a new best imported block. + pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + self.on_block_queued(best_hash, best_number); + } + + /// Request extra justification. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_justifications + .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) + } + + /// Clear extra justification requests. + pub fn clear_justification_requests(&mut self) { + self.extra_justifications.reset(); + } + + /// Configure an explicit fork sync request in case external code has detected that there is a + /// stale fork missing. + /// + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// + /// Passing empty `peers` set effectively removes the sync request. + // The implementation is similar to `on_validated_block_announce` with unknown parent hash. + pub fn set_sync_fork_request( + &mut self, + mut peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + if peers.is_empty() { + peers = self + .peers + .iter() + // Only request blocks from peers who are ahead or on a par. + .filter(|(_, peer)| peer.best_number >= number) + .map(|(id, _)| *id) + .collect(); + + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with no peers specified. \ + Syncing from these peers {peers:?} instead.", + ); + } else { + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with {peers:?}", + ); + } + + if self.is_known(hash) { + debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); + return + } + + trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); + for peer_id in &peers { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue + } + + if number > peer.best_number { + peer.best_number = number; + peer.best_hash = *hash; + } + self.allowed_requests.add(peer_id); + } + } + + self.fork_targets + .entry(*hash) + .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) + .peers + .extend(peers); + } + + /// Submit a block response for processing. + #[must_use] + fn on_block_data( + &mut self, + peer_id: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result<(), BadPeer> { + self.downloaded_blocks += response.blocks.len(); + let mut gap = false; + let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(peer_id) { + let mut blocks = response.blocks; + if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { + trace!(target: LOG_TARGET, "Reversing incoming block list"); + blocks.reverse() + } + self.allowed_requests.add(peer_id); + if let Some(request) = request { + match &mut peer.state { + PeerSyncState::DownloadingNew(_) => { + self.blocks.clear_peer_download(peer_id); + peer.state = PeerSyncState::Available; + if let Some(start_block) = + validate_blocks::(&blocks, peer_id, Some(request))? + { + self.blocks.insert(start_block, blocks, *peer_id); + } + self.ready_blocks() + }, + PeerSyncState::DownloadingGap(_) => { + peer.state = PeerSyncState::Available; + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(peer_id); + if let Some(start_block) = + validate_blocks::(&blocks, peer_id, Some(request))? + { + gap_sync.blocks.insert(start_block, blocks, *peer_id); + } + gap = true; + let blocks: Vec<_> = gap_sync + .blocks + .ready_blocks(gap_sync.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = + block_data.block.justifications.or_else(|| { + legacy_justification_mapping( + block_data.block.justification, + ) + }); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: true, + state: None, + } + }) + .collect(); + debug!( + target: LOG_TARGET, + "Drained {} gap blocks from {}", + blocks.len(), + gap_sync.best_queued_number, + ); + blocks + } else { + debug!(target: LOG_TARGET, "Unexpected gap block response from {peer_id}"); + return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + } + }, + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + if blocks.is_empty() { + debug!(target: LOG_TARGET, "Empty block response from {peer_id}"); + return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + } + validate_blocks::(&blocks, peer_id, Some(request))?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or_else(|| legacy_justification_mapping(b.justification)); + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + indexed_body: None, + justifications, + origin: Some(*peer_id), + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, + } + }) + .collect() + }, + PeerSyncState::AncestorSearch { current, start, state } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!( + target: LOG_TARGET, + "Got ancestry block #{} ({}) from peer {}", + current, + block.hash, + peer_id, + ); + maybe_our_block_hash.filter(|x| x == &block.hash) + }, + (None, _) => { + debug!( + target: LOG_TARGET, + "Invalid response when searching for ancestor from {peer_id}", + ); + return Err(BadPeer(*peer_id, rep::UNKNOWN_ANCESTOR)) + }, + (_, Err(e)) => { + info!( + target: LOG_TARGET, + "❌ Error answering legitimate blockchain query: {e}", + ); + return Err(BadPeer(*peer_id, rep::BLOCKCHAIN_READ_ERROR)) + }, + }; + if matching_hash.is_some() { + if *start < self.best_queued_number && + self.best_queued_number <= peer.best_number + { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + trace!( + target: LOG_TARGET, + "Ancestry search: opportunistically updating peer {} common number from={} => to={}.", + *peer_id, + peer.common_number, + self.best_queued_number, + ); + peer.common_number = self.best_queued_number; + } else if peer.common_number < *current { + trace!( + target: LOG_TARGET, + "Ancestry search: updating peer {} common number from={} => to={}.", + *peer_id, + peer.common_number, + *current, + ); + peer.common_number = *current; + } + } + if matching_hash.is_none() && current.is_zero() { + trace!( + target:LOG_TARGET, + "Ancestry search: genesis mismatch for peer {peer_id}", + ); + return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH)) + } + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, + }; + let request = ancestry_request::(next_num); + self.actions.push(ChainSyncAction::SendBlockRequest { + peer_id: *peer_id, + request, + }); + return Ok(()) + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown + // to us and add it to sync targets if necessary. + trace!( + target: LOG_TARGET, + "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number && + peer.best_number < self.best_queued_number + { + trace!( + target: LOG_TARGET, + "Added fork target {} for {}", + peer.best_hash, + peer_id, + ); + self.fork_targets + .entry(peer.best_hash) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers + .insert(*peer_id); + } + peer.state = PeerSyncState::Available; + return Ok(()) + } + }, + PeerSyncState::DownloadingWarpTargetBlock => { + peer.state = PeerSyncState::Available; + if let Some(warp_sync) = &mut self.warp_sync { + if blocks.len() == 1 { + validate_blocks::(&blocks, peer_id, Some(request))?; + match warp_sync.import_target_block( + blocks.pop().expect("`blocks` len checked above."), + ) { + warp::TargetBlockImportResult::Success => return Ok(()), + warp::TargetBlockImportResult::BadResponse => + return Err(BadPeer(*peer_id, rep::VERIFICATION_FAIL)), + } + } else if blocks.is_empty() { + debug!(target: LOG_TARGET, "Empty block response from {peer_id}"); + return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + } else { + debug!( + target: LOG_TARGET, + "Too many blocks ({}) in warp target block response from {}", + blocks.len(), + peer_id, + ); + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + } + } else { + debug!( + target: LOG_TARGET, + "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", + peer_id, + ); + return Ok(()) + } + }, + PeerSyncState::Available | + PeerSyncState::DownloadingJustification(..) | + PeerSyncState::DownloadingState | + PeerSyncState::DownloadingWarpProof => Vec::new(), + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + validate_blocks::(&blocks, peer_id, None)?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or_else(|| legacy_justification_mapping(b.justification)); + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + indexed_body: None, + justifications, + origin: Some(*peer_id), + allow_missing_state: true, + import_existing: false, + skip_execution: true, + state: None, + } + }) + .collect() + } + } else { + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + }; + + self.validate_and_queue_blocks(new_blocks, gap); + + Ok(()) + } + + /// Submit a justification response for processing. + #[must_use] + fn on_block_justification( + &mut self, + peer_id: PeerId, + response: BlockResponse, + ) -> Result<(), BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { + peer + } else { + error!( + target: LOG_TARGET, + "💔 Called on_block_justification with a peer ID of an unknown peer", + ); + return Ok(()) + }; + + self.allowed_requests.add(&peer_id); + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one justification at a time + let justification = if let Some(block) = response.blocks.into_iter().next() { + if hash != block.hash { + warn!( + target: LOG_TARGET, + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", + peer_id, + hash, + block.hash, + ); + return Err(BadPeer(peer_id, rep::BAD_JUSTIFICATION)) + } + + block + .justifications + .or_else(|| legacy_justification_mapping(block.justification)) + } else { + // we might have asked the peer for a justification on a block that we assumed it + // had but didn't (regardless of whether it had a justification for it or not). + trace!( + target: LOG_TARGET, + "Peer {peer_id:?} provided empty response for justification request {hash:?}", + ); + + None + }; + + if let Some((peer_id, hash, number, justifications)) = + self.extra_justifications.on_response(peer_id, justification) + { + self.actions.push(ChainSyncAction::ImportJustifications { + peer_id, + hash, + number, + justifications, + }); + return Ok(()) + } + } + + Ok(()) + } + + /// Report a justification import (successful or not). + pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); + self.allowed_requests.set_all(); + } + + /// Notify sync that a block has been finalized. + pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let SyncMode::LightState { skip_proofs, .. } = &self.mode { + if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { + // Finalized a recent block. + let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(*hash) { + log::debug!( + target: LOG_TARGET, + "Starting state sync for #{number} ({hash})", + ); + self.state_sync = Some(StateSync::new( + self.client.clone(), + header, + None, + None, + *skip_proofs, + )); + self.allowed_requests.set_all(); + } + } + } + } + + if let Err(err) = r { + warn!( + target: LOG_TARGET, + "💔 Error cleaning up pending extra justification data requests: {err}", + ); + } + } + + /// Submit a validated block announcement. + pub fn on_validated_block_announce( + &mut self, + is_best: bool, + peer_id: PeerId, + announce: &BlockAnnounce, + ) { + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { + peer + } else { + error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); + return + }; + + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", peer_id); + return + } + + if is_best { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + } + + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + self.update_peer_common_number(&peer_id, number); + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number + { + self.update_peer_common_number(&peer_id, number.saturating_sub(One::one())); + } + } + self.allowed_requests.add(&peer_id); + + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: "sync", "Known block announce from {}: {}", peer_id, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(peer_id); + } + return + } + + if ancient_parent { + trace!( + target: "sync", + "Ignored ancient block announced from {}: {} {:?}", + peer_id, + hash, + announce.header, + ); + return + } + + if self.status().state == SyncState::Idle { + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", + peer_id, + hash, + announce.summary(), + ); + self.fork_targets + .entry(hash) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + }) + .peers + .insert(peer_id); + } + } + + /// Notify that a sync peer has disconnected. + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { + self.blocks.clear_peer_download(peer_id); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(peer_id) + } + self.peers.remove(peer_id); + self.extra_justifications.peer_disconnected(peer_id); + self.allowed_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(peer_id); + !target.peers.is_empty() + }); + + let blocks = self.ready_blocks(); + + if !blocks.is_empty() { + self.validate_and_queue_blocks(blocks, false); + } + } + + /// Get prometheus metrics. + pub fn metrics(&self) -> Metrics { + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + justifications: self.extra_justifications.metrics(), + } + } + + /// Returns the median seen block number. + fn median_seen(&self) -> Option> { + let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); + + if best_seens.is_empty() { + None + } else { + let middle = best_seens.len() / 2; + + // Not the "perfect median" when we have an even number of peers. + Some(*best_seens.select_nth_unstable(middle).1) + } + } + + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, + } + } + + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::LightState { .. } => true, + SyncMode::Warp => true, + } + } + + fn validate_and_queue_blocks(&mut self, mut new_blocks: Vec>, gap: bool) { + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!( + target: LOG_TARGET, + "Ignoring {} blocks that are already queued", + orig_len - new_blocks.len(), + ); + } + + let origin = if !gap && !self.status().state.is_major_syncing() { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!( + target:LOG_TARGET, + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); + self.on_block_queued(h, n) + } + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) + } + + fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.update_common_number(new_common); + } + } + + /// Called when a block has been queued for import. + /// + /// Updates our internal state for best queued block and then goes + /// through all peers to update our view of their state as well. + fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + if self.fork_targets.remove(hash).is_some() { + trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); + } + if let Some(gap_sync) = &mut self.gap_sync { + if number > gap_sync.best_queued_number && number <= gap_sync.target { + gap_sync.best_queued_number = number; + } + } + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + // Wait for ancestry search to complete first. + continue + } + let new_common_number = + if peer.best_number >= number { number } else { peer.best_number }; + trace!( + target: LOG_TARGET, + "Updating peer {} info, ours={}, common={}->{}, their best={}", + n, + number, + peer.common_number, + new_common_number, + peer.best_number, + ); + peer.common_number = new_common_number; + } + } + self.allowed_requests.set_all(); + } + + /// Restart the sync process. This will reset all pending block requests and return an iterator + /// of new block requests to make to peers. Peers that were downloading finality data (i.e. + /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. + fn restart(&mut self) { + self.blocks.clear(); + if let Err(e) = self.reset_sync_start_point() { + warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); + } + self.allowed_requests.set_all(); + debug!( + target: LOG_TARGET, + "Restarted with {} ({})", + self.best_queued_number, + self.best_queued_hash, + ); + let old_peers = std::mem::take(&mut self.peers); + + old_peers.into_iter().for_each(|(peer_id, mut p)| { + // peers that were downloading justifications + // should be kept in that state. + if let PeerSyncState::DownloadingJustification(_) = p.state { + // We make sure our commmon number is at least something we have. + trace!( + target: LOG_TARGET, + "Keeping peer {} after restart, updating common number from={} => to={} (our best).", + peer_id, + p.common_number, + self.best_queued_number, + ); + p.common_number = self.best_queued_number; + self.peers.insert(peer_id, p); + return + } + + // handle peers that were in other states. + let action = match self.new_peer_inner(peer_id, p.best_hash, p.best_number) { + // since the request is not a justification, remove it from pending responses + Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id }, + // update the request if the new one is available + Ok(Some(request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, + // this implies that we need to drop pending response from the peer + Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), + }; + + self.actions.push(action); + }); + } + + /// Find a block to start sync from. If we sync with state, that's the latest block we have + /// state for. + fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { + let info = self.client.info(); + if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { + warn!( + target: LOG_TARGET, + "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { + warn!( + target: LOG_TARGET, + "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + self.import_existing = false; + self.best_queued_hash = info.best_hash; + self.best_queued_number = info.best_number; + + if self.mode == SyncMode::Full && + self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState + { + self.import_existing = true; + // Latest state is missing, start with the last finalized state or genesis instead. + if let Some((hash, number)) = info.finalized_state { + debug!(target: LOG_TARGET, "Starting from finalized state #{number}"); + self.best_queued_hash = hash; + self.best_queued_number = number; + } else { + debug!(target: LOG_TARGET, "Restarting from genesis"); + self.best_queued_hash = Default::default(); + self.best_queued_number = Zero::zero(); + } + } + + if let Some((start, end)) = info.block_gap { + debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); + self.gap_sync = Some(GapSync { + best_queued_number: start - One::one(), + target: end, + blocks: BlockCollection::new(), + }); + } + trace!( + target: LOG_TARGET, + "Restarted sync at #{} ({:?})", + self.best_queued_number, + self.best_queued_hash, + ); + Ok(()) + } + + /// What is the status of the block corresponding to the given hash? + fn block_status(&self, hash: &B::Hash) -> Result { + if self.queue_blocks.contains(hash) { + return Ok(BlockStatus::Queued) + } + self.client.block_status(*hash) + } + + /// Is the block corresponding to the given hash known? + fn is_known(&self, hash: &B::Hash) -> bool { + self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) + } + + /// Is any peer downloading the given hash? + fn is_already_downloading(&self, hash: &B::Hash) -> bool { + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + } + + /// Get the set of downloaded blocks that are ready to be queued for import. + fn ready_blocks(&mut self) -> Vec> { + self.blocks + .ready_blocks(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = block_data + .block + .justifications + .or_else(|| legacy_justification_mapping(block_data.block.justification)); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, + } + }) + .collect() + } + + /// Set the warp sync target block externally in case we skip warp proofs downloading. + pub fn set_warp_sync_target_block(&mut self, header: B::Header) { + if let Some(ref mut warp_sync) = self.warp_sync { + warp_sync.set_target_block(header); + } else { + self.warp_sync_target_block_header = Some(header); + } + } + + /// Generate block request for downloading of the target block body during warp sync. + fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { + let sync = &self.warp_sync.as_ref()?; + + if self.allowed_requests.is_empty() || + sync.is_complete() || + self.peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) + { + // Only one pending warp target block request is allowed. + return None + } + + if let Some((target_number, request)) = sync.next_target_block_request() { + // Find a random peer that has a block with the target number. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target_number { + trace!(target: LOG_TARGET, "New warp target block request for {id}"); + peer.state = PeerSyncState::DownloadingWarpTargetBlock; + self.allowed_requests.clear(); + return Some((*id, request)) + } + } + } + + None + } + + /// Submit blocks received in a response. + pub fn on_block_response( + &mut self, + peer_id: PeerId, + request: BlockRequest, + blocks: Vec>, + ) { + let block_response = BlockResponse:: { id: request.id, blocks }; + + let blocks_range = || match ( + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!( + target: LOG_TARGET, + "BlockResponse {} from {} with {} blocks {}", + block_response.id, + peer_id, + block_response.blocks.len(), + blocks_range(), + ); + + let res = if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(peer_id, block_response) + } else { + self.on_block_data(&peer_id, Some(request), block_response) + }; + + if let Err(bad_peer) = res { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); + } + } + + /// Submit a state received in a response. + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { + if let Err(bad_peer) = self.on_state_data(&peer_id, response) { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); + } + } + + /// Get justification requests scheduled by sync to be sent out. + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + let peers = &mut self.peers; + let mut matcher = self.extra_justifications.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(peers) { + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) + .state = PeerSyncState::DownloadingJustification(request.0); + let req = BlockRequest:: { + id: 0, + fields: BlockAttributes::JUSTIFICATION, + from: FromBlock::Hash(request.0), + direction: Direction::Ascending, + max: Some(1), + }; + Some((peer, req)) + } else { + None + } + }) + .collect() + } + + /// Get block requests scheduled by sync to be sent out. + fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + if self.mode == SyncMode::Warp { + return self + .warp_target_block_request() + .map_or_else(|| Vec::new(), |req| Vec::from([req])) + } + + if self.allowed_requests.is_empty() || self.state_sync.is_some() { + return Vec::new() + } + + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { + trace!(target: LOG_TARGET, "Too many blocks in the queue."); + return Vec::new() + } + let is_major_syncing = self.status().state.is_major_syncing(); + let attrs = self.required_block_attributes(); + let blocks = &mut self.blocks; + let fork_targets = &mut self.fork_targets; + let last_finalized = + std::cmp::min(self.best_queued_number, self.client.info().finalized_number); + let best_queued = self.best_queued_number; + let client = &self.client; + let queue = &self.queue_blocks; + let allowed_requests = self.allowed_requests.take(); + let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; + let max_blocks_per_request = self.max_blocks_per_request; + let gap_sync = &mut self.gap_sync; + self.peers + .iter_mut() + .filter_map(move |(&id, peer)| { + if !peer.state.is_available() || !allowed_requests.contains(&id) { + return None + } + + // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from + // the common number, the peer best number is higher than our best queued and the + // common number is smaller than the last finalized block number, we should do an + // ancestor search to find a better common block. If the queue is full we wait till + // all blocks are imported though. + if best_queued.saturating_sub(peer.common_number) > + MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && + best_queued < peer.best_number && + peer.common_number < last_finalized && + queue.len() <= MAJOR_SYNC_BLOCKS.into() + { + trace!( + target: LOG_TARGET, + "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", + id, + peer.common_number, + best_queued, + ); + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((id, ancestry_request::(current))) + } else if let Some((range, req)) = peer_block_request( + &id, + peer, + blocks, + attrs, + max_parallel, + max_blocks_per_request, + last_finalized, + best_queued, + ) { + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: LOG_TARGET, + "New block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) + } else if let Some((hash, req)) = fork_sync_request( + &id, + fork_targets, + best_queued, + last_finalized, + attrs, + |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(*hash).unwrap_or(BlockStatus::Unknown) + } + }, + max_blocks_per_request, + ) { + trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); + peer.state = PeerSyncState::DownloadingStale(hash); + Some((id, req)) + } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { + peer_gap_block_request( + &id, + peer, + &mut sync.blocks, + attrs, + sync.target, + sync.best_queued_number, + max_blocks_per_request, + ) + }) { + peer.state = PeerSyncState::DownloadingGap(range.start); + trace!( + target: LOG_TARGET, + "New gap block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) + } else { + None + } + }) + .collect() + } + + /// Get a state request scheduled by sync to be sent out (if any). + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + if self.allowed_requests.is_empty() { + return None + } + if (self.state_sync.is_some() || self.warp_sync.is_some()) && + self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) + { + // Only one pending state request is allowed. + return None + } + if let Some(sync) = &self.state_sync { + if sync.is_complete() { + return None + } + + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.common_number >= sync.target_block_num() { + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); + self.allowed_requests.clear(); + return Some((*id, OpaqueStateRequest(Box::new(request)))) + } + } + } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let (Some(request), Some(target)) = + (sync.next_state_request(), sync.target_block_number()) + { + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target { + trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}"); + peer.state = PeerSyncState::DownloadingState; + self.allowed_requests.clear(); + return Some((*id, OpaqueStateRequest(Box::new(request)))) + } + } + } + } + None + } + + /// Get a warp proof request scheduled by sync to be sent out (if any). + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + if let Some(sync) = &self.warp_sync { + if self.allowed_requests.is_empty() || + sync.is_complete() || + self.peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) + { + // Only one pending state request is allowed. + return None + } + if let Some(request) = sync.next_warp_proof_request() { + let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); + if !targets.is_empty() { + targets.sort(); + let median = targets[targets.len() / 2]; + // Find a random peer that is synced as much as peer majority. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= median { + trace!(target: LOG_TARGET, "New WarpProofRequest for {id}"); + peer.state = PeerSyncState::DownloadingWarpProof; + self.allowed_requests.clear(); + return Some((*id, request)) + } + } + } + } + } + None + } + + #[must_use] + fn on_state_data( + &mut self, + peer_id: &PeerId, + response: OpaqueStateResponse, + ) -> Result<(), BadPeer> { + let response: Box = response.0.downcast().map_err(|_error| { + error!( + target: LOG_TARGET, + "Failed to downcast opaque state response, this is an implementation bug." + ); + + BadPeer(*peer_id, rep::BAD_RESPONSE) + })?; + + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::DownloadingState = peer.state { + peer.state = PeerSyncState::Available; + self.allowed_requests.set_all(); + } + } + let import_result = if let Some(sync) = &mut self.state_sync { + debug!( + target: LOG_TARGET, + "Importing state data from {} with {} keys, {} proof nodes.", + peer_id, + response.entries.len(), + response.proof.len(), + ); + sync.import(*response) + } else if let Some(sync) = &mut self.warp_sync { + debug!( + target: LOG_TARGET, + "Importing state data from {} with {} keys, {} proof nodes.", + peer_id, + response.entries.len(), + response.proof.len(), + ); + sync.import_state(*response) + } else { + debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}"); + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + }; + + match import_result { + ImportResult::Import(hash, header, state, body, justifications) => { + let origin = BlockOrigin::NetworkInitialSync; + let block = IncomingBlock { + hash, + header: Some(header), + body, + indexed_body: None, + justifications, + origin: None, + allow_missing_state: true, + import_existing: true, + skip_execution: self.skip_execution(), + state: Some(state), + }; + debug!(target: LOG_TARGET, "State download is complete. Import is queued"); + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: vec![block] }); + Ok(()) + }, + ImportResult::Continue => Ok(()), + ImportResult::BadResponse => { + debug!(target: LOG_TARGET, "Bad state data received from {peer_id}"); + Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + }, + } + } + + /// Submit a warp proof response received. + pub fn on_warp_sync_response(&mut self, peer_id: &PeerId, response: EncodedProof) { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::DownloadingWarpProof = peer.state { + peer.state = PeerSyncState::Available; + self.allowed_requests.set_all(); + } + } + let import_result = if let Some(sync) = &mut self.warp_sync { + debug!( + target: LOG_TARGET, + "Importing warp proof data from {}, {} bytes.", + peer_id, + response.0.len(), + ); + sync.import_warp_proof(response) + } else { + debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {peer_id}"); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::NOT_REQUESTED))); + return + }; + + match import_result { + WarpProofImportResult::Success => {}, + WarpProofImportResult::BadResponse => { + debug!(target: LOG_TARGET, "Bad proof data received from {peer_id}"); + self.actions.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_BLOCK))); + }, + } + } + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. If an error is returned, the pending response + /// from the peer must be dropped. + pub fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + trace!(target: LOG_TARGET, "Imported {imported} of {count}"); + + let mut has_error = false; + for (_, hash) in &results { + self.queue_blocks.remove(hash); + self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } + } + for (result, hash) in results { + if has_error { + break + } + + has_error |= result.is_err(); + + match result { + Ok(BlockImportStatus::ImportedKnown(number, peer_id)) => + if let Some(peer) = peer_id { + self.update_peer_common_number(&peer, number); + }, + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer_id)) => { + if aux.clear_justification_requests { + trace!( + target: LOG_TARGET, + "Block imported clears all pending justification requests {number}: {hash:?}", + ); + self.clear_justification_requests(); + } + + if aux.needs_justification { + trace!( + target: LOG_TARGET, + "Block imported but requires justification {number}: {hash:?}", + ); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(ref peer) = peer_id { + warn!("💔 Sent block with bad justification to import"); + self.actions.push(ChainSyncAction::DropPeer(BadPeer( + *peer, + rep::BAD_JUSTIFICATION, + ))); + } + } + + if let Some(peer) = peer_id { + self.update_peer_common_number(&peer, number); + } + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + if state_sync_complete { + info!( + target: LOG_TARGET, + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = SyncMode::Full; + self.restart(); + } + let warp_sync_complete = self + .warp_sync + .as_ref() + .map_or(false, |s| s.target_block_hash() == Some(hash)); + if warp_sync_complete { + info!( + target: LOG_TARGET, + "Warp sync is complete ({} MiB), restarting block sync.", + self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), + ); + self.warp_sync = None; + self.mode = SyncMode::Full; + self.restart(); + } + let gap_sync_complete = + self.gap_sync.as_ref().map_or(false, |s| s.target == number); + if gap_sync_complete { + info!( + target: LOG_TARGET, + "Block history download is complete." + ); + self.gap_sync = None; + } + }, + Err(BlockImportError::IncompleteHeader(peer_id)) => + if let Some(peer) = peer_id { + warn!( + target: LOG_TARGET, + "💔 Peer sent block with incomplete header to import", + ); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); + self.restart(); + }, + Err(BlockImportError::VerificationFailed(peer_id, e)) => { + let extra_message = peer_id + .map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); + + warn!( + target: LOG_TARGET, + "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", + ); + + if let Some(peer) = peer_id { + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); + } + + self.restart(); + }, + Err(BlockImportError::BadBlock(peer_id)) => + if let Some(peer) = peer_id { + warn!( + target: LOG_TARGET, + "💔 Block {hash:?} received from peer {peer} has been blacklisted", + ); + self.actions.push(ChainSyncAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); + }, + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); + }, + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { + warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); + self.state_sync = None; + self.warp_sync = None; + self.restart(); + }, + Err(BlockImportError::Cancelled) => {}, + }; + } + + self.allowed_requests.set_all(); + } + + /// Get pending actions to perform. + #[must_use] + pub fn actions(&mut self) -> impl Iterator> { + let block_requests = self + .block_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(block_requests); + + let justification_requests = self + .justification_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(justification_requests); + + let state_request = self + .state_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request }); + self.actions.extend(state_request); + + let warp_proof_request = self + .warp_sync_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendWarpProofRequest { peer_id, request }); + self.actions.extend(warp_proof_request); + + std::mem::take(&mut self.actions).into_iter() + } + + /// A version of `actions()` that doesn't schedule extra requests. For testing only. + #[cfg(test)] + #[must_use] + fn take_actions(&mut self) -> impl Iterator> { + std::mem::take(&mut self.actions).into_iter() + } +} + +// This is purely during a backwards compatible transitionary period and should be removed +// once we can assume all nodes can send and receive multiple Justifications +// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. +// See: https://github.com/paritytech/substrate/issues/8172 +fn legacy_justification_mapping( + justification: Option, +) -> Option { + justification.map(|just| (*b"FRNK", just).into()) +} + +/// Request the ancestry for a block. Sends a request for header and justification for the given +/// block number. Used during ancestry search. +fn ancestry_request(block: NumberFor) -> BlockRequest { + BlockRequest:: { + id: 0, + fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + from: FromBlock::Number(block), + direction: Direction::Ascending, + max: Some(1), + } +} + +/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using +/// to try to find an ancestor block +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum AncestorSearchState { + /// Use exponential backoff to find an ancestor, then switch to binary search. + /// We keep track of the exponent. + ExponentialBackoff(NumberFor), + /// Using binary search to find the best ancestor. + /// We keep track of left and right bounds. + BinarySearch(NumberFor, NumberFor), +} + +/// This function handles the ancestor search strategy used. The goal is to find a common point +/// that both our chains agree on that is as close to the tip as possible. +/// The way this works is we first have an exponential backoff strategy, where we try to step +/// forward until we find a block hash mismatch. The size of the step doubles each step we take. +/// +/// When we've found a block hash mismatch we then fall back to a binary search between the two +/// last known points to find the common block closest to the tip. +fn handle_ancestor_search_state( + state: &AncestorSearchState, + curr_block_num: NumberFor, + block_hash_match: bool, +) -> Option<(AncestorSearchState, NumberFor)> { + let two = >::one() + >::one(); + match state { + AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { + let next_distance_to_tip = *next_distance_to_tip; + if block_hash_match && next_distance_to_tip == One::one() { + // We found the ancestor in the first step so there is no need to execute binary + // search. + return None + } + if block_hash_match { + let left = curr_block_num; + let right = left + next_distance_to_tip / two; + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } else { + let next_block_num = + curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); + let next_distance_to_tip = next_distance_to_tip * two; + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) + } + }, + AncestorSearchState::BinarySearch(mut left, mut right) => { + if left >= curr_block_num { + return None + } + if block_hash_match { + left = curr_block_num; + } else { + right = curr_block_num; + } + assert!(right >= left); + let middle = left + (right - left) / two; + if middle == curr_block_num { + None + } else { + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } + }, + } +} + +/// Get a new block request for the peer if any. +fn peer_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: BlockAttributes, + max_parallel_downloads: u32, + max_blocks_per_request: u32, + finalized: NumberFor, + best_num: NumberFor, +) -> Option<(Range>, BlockRequest)> { + if best_num >= peer.best_number { + // Will be downloaded as alternative fork instead. + return None + } else if peer.common_number < finalized { + trace!( + target: LOG_TARGET, + "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", + id, peer.common_number, finalized, peer.best_number, best_num, + ); + } + let range = blocks.needed_blocks( + *id, + max_blocks_per_request, + peer.best_number, + peer.common_number, + max_parallel_downloads, + MAX_DOWNLOAD_AHEAD, + )?; + + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + + let from = if peer.best_number == last { + FromBlock::Hash(peer.best_hash) + } else { + FromBlock::Number(last) + }; + + let request = BlockRequest:: { + id: 0, + fields: attrs, + from, + direction: Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + + Some((range, request)) +} + +/// Get a new block request for the peer if any. +fn peer_gap_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: BlockAttributes, + target: NumberFor, + common_number: NumberFor, + max_blocks_per_request: u32, +) -> Option<(Range>, BlockRequest)> { + let range = blocks.needed_blocks( + *id, + max_blocks_per_request, + std::cmp::min(peer.best_number, target), + common_number, + 1, + MAX_DOWNLOAD_AHEAD, + )?; + + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + let from = FromBlock::Number(last); + + let request = BlockRequest:: { + id: 0, + fields: attrs, + from, + direction: Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + Some((range, request)) +} + +/// Get pending fork sync targets for a peer. +fn fork_sync_request( + id: &PeerId, + targets: &mut HashMap>, + best_num: NumberFor, + finalized: NumberFor, + attributes: BlockAttributes, + check_block: impl Fn(&B::Hash) -> BlockStatus, + max_blocks_per_request: u32, +) -> Option<(B::Hash, BlockRequest)> { + targets.retain(|hash, r| { + if r.number <= finalized { + trace!( + target: LOG_TARGET, + "Removed expired fork sync request {:?} (#{})", + hash, + r.number, + ); + return false + } + if check_block(hash) != BlockStatus::Unknown { + trace!( + target: LOG_TARGET, + "Removed obsolete fork sync request {:?} (#{})", + hash, + r.number, + ); + return false + } + true + }); + for (hash, r) in targets { + if !r.peers.contains(&id) { + continue + } + // Download the fork only if it is behind or not too far ahead our tip of the chain + // Otherwise it should be downloaded in full sync mode. + if r.number <= best_num || + (r.number - best_num).saturated_into::() < max_blocks_per_request as u32 + { + let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); + let count = if parent_status == BlockStatus::Unknown { + (r.number - finalized).saturated_into::() // up to the last finalized block + } else { + // request only single block + 1 + }; + trace!( + target: LOG_TARGET, + "Downloading requested fork {hash:?} from {id}, {count} blocks", + ); + return Some(( + *hash, + BlockRequest:: { + id: 0, + fields: attributes, + from: FromBlock::Hash(*hash), + direction: Direction::Descending, + max: Some(count), + }, + )) + } else { + trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); + } + } + None +} + +/// Returns `true` if the given `block` is a descendent of `base`. +fn is_descendent_of( + client: &T, + base: &Block::Hash, + block: &Block::Hash, +) -> sp_blockchain::Result +where + Block: BlockT, + T: HeaderMetadata + ?Sized, +{ + if base == block { + return Ok(false) + } + + let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; + + Ok(ancestor.hash == *base) +} + +/// Validate that the given `blocks` are correct. +/// Returns the number of the first block in the sequence. +/// +/// It is expected that `blocks` are in ascending order. +fn validate_blocks( + blocks: &Vec>, + peer_id: &PeerId, + request: Option>, +) -> Result>, BadPeer> { + if let Some(request) = request { + if Some(blocks.len() as _) > request.max { + debug!( + target: LOG_TARGET, + "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", + peer_id, + request.max, + blocks.len(), + ); + + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + } + + let block_header = + if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } + .and_then(|b| b.header.as_ref()); + + let expected_block = block_header.as_ref().map_or(false, |h| match request.from { + FromBlock::Hash(hash) => h.hash() == hash, + FromBlock::Number(n) => h.number() == &n, + }); + + if !expected_block { + debug!( + target: LOG_TARGET, + "Received block that was not requested. Requested {:?}, got {:?}.", + request.from, + block_header, + ); + + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + } + + if request.fields.contains(BlockAttributes::HEADER) && + blocks.iter().any(|b| b.header.is_none()) + { + trace!( + target: LOG_TARGET, + "Missing requested header for a block in response from {peer_id}.", + ); + + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + } + + if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) + { + trace!( + target: LOG_TARGET, + "Missing requested body for a block in response from {peer_id}.", + ); + + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + } + } + + for b in blocks { + if let Some(header) = &b.header { + let hash = header.hash(); + if hash != b.hash { + debug!( + target:LOG_TARGET, + "Bad header received from {}. Expected hash {:?}, got {:?}", + peer_id, + b.hash, + hash, + ); + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + } + } + if let (Some(header), Some(body)) = (&b.header, &b.body) { + let expected = *header.extrinsics_root(); + let got = HashingFor::::ordered_trie_root( + body.iter().map(Encode::encode).collect(), + sp_runtime::StateVersion::V0, + ); + if expected != got { + debug!( + target:LOG_TARGET, + "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", + b.hash, + peer_id, + expected, + got, + ); + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + } + } + } + + Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) +} diff --git a/substrate/client/network/sync/src/chain_sync/test.rs b/substrate/client/network/sync/src/chain_sync/test.rs new file mode 100644 index 0000000000000000000000000000000000000000..15b2a95a07c8739b3dca5d2e5c2e9eb89ccf50ef --- /dev/null +++ b/substrate/client/network/sync/src/chain_sync/test.rs @@ -0,0 +1,967 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of [`ChainSync`]. + +use super::*; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderBuilder; +use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; +use sp_blockchain::HeaderBackend; +use substrate_test_runtime_client::{ + runtime::{Block, Hash, Header}, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilder, TestClientBuilderExt, +}; + +#[test] +fn processes_empty_response_on_justification_request_for_unknown_block() { + // if we ask for a justification for a given block to a peer that doesn't know that block + // (different from not having a justification), the peer will reply with an empty response. + // internally we should process the response as the justification not being available. + + let client = Arc::new(TestClientBuilder::new().build()); + let peer_id = PeerId::random(); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap(); + + let (a1_hash, a1_number) = { + let a1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + (a1.hash(), *a1.header.number()) + }; + + // add a new peer with the same best block + sync.new_peer(peer_id, a1_hash, a1_number); + + // and request a justification for the block + sync.request_justification(&a1_hash, a1_number); + + // the justification request should be scheduled to that peer + assert!(sync + .justification_requests() + .iter() + .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); + + // there are no extra pending requests + assert_eq!(sync.extra_justifications.pending_requests().count(), 0); + + // there's one in-flight extra request to the expected peer + assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + })); + + // if the peer replies with an empty response (i.e. it doesn't know the block), + // the active request should be cleared. + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }) + .unwrap(); + + // there should be no in-flight requests + assert_eq!(sync.extra_justifications.active_requests().count(), 0); + + // and the request should now be pending again, waiting for reschedule + assert!(sync + .extra_justifications + .pending_requests() + .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); +} + +#[test] +fn restart_doesnt_affect_peers_downloading_finality_data() { + let mut client = Arc::new(TestClientBuilder::new().build()); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + let peer_id3 = PeerId::random(); + + let mut new_blocks = |n| { + for _ in 0..n { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let (b1_hash, b1_number) = new_blocks(50); + + // add 2 peers at blocks that we don't have locally + sync.new_peer(peer_id1, Hash::random(), 42); + sync.new_peer(peer_id2, Hash::random(), 10); + + // we wil send block requests to these peers + // for these blocks we don't know about + assert!(sync + .block_requests() + .into_iter() + .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + + // add a new peer at a known block + sync.new_peer(peer_id3, b1_hash, b1_number); + + // we request a justification for a block we have locally + sync.request_justification(&b1_hash, b1_number); + + // the justification request should be scheduled to the + // new peer which is at the given block + assert!(sync.justification_requests().iter().any(|(p, r)| { + *p == peer_id3 && + r.fields == BlockAttributes::JUSTIFICATION && + r.from == FromBlock::Hash(b1_hash) + })); + + assert_eq!( + sync.peers.get(&peer_id3).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + + // clear old actions + let _ = sync.take_actions(); + + // we restart the sync state + sync.restart(); + let actions = sync.take_actions().collect::>(); + + // which should make us send out block requests to the first two peers + assert_eq!(actions.len(), 2); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, + })); + + // peer 3 should be unaffected it was downloading finality data + assert_eq!( + sync.peers.get(&peer_id3).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + + // Set common block to something that we don't have (e.g. failed import) + sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; + sync.restart(); + assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); +} + +/// Send a block annoucnement for the given `header`. +fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync) { + let announce = BlockAnnounce { + header: header.clone(), + state: Some(BlockState::Best), + data: Some(Vec::new()), + }; + + sync.on_validated_block_announce(true, peer_id, &announce); +} + +/// Create a block response from the given `blocks`. +fn create_block_response(blocks: Vec) -> BlockResponse { + BlockResponse:: { + id: 0, + blocks: blocks + .into_iter() + .map(|b| BlockData:: { + hash: b.hash(), + header: Some(b.header().clone()), + body: Some(b.deconstruct().1), + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }) + .collect(), + } +} + +/// Get a block request from `sync` and check that is matches the expected request. +fn get_block_request( + sync: &mut ChainSync, + from: FromBlock, + max: u32, + peer: &PeerId, +) -> BlockRequest { + let requests = sync.block_requests(); + + log::trace!(target: LOG_TARGET, "Requests: {requests:?}"); + + assert_eq!(1, requests.len()); + assert_eq!(*peer, requests[0].0); + + let request = requests[0].1.clone(); + + assert_eq!(from, request.from); + assert_eq!(Some(max), request.max); + request +} + +/// Build and import a new best block. +fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { + let at = at.unwrap_or_else(|| client.info().best_hash); + + let mut block_builder = BlockBuilderBuilder::new(&**client) + .on_parent_block(at) + .fetch_parent_block_number(&**client) + .unwrap() + .build() + .unwrap(); + + if fork { + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + } + + let block = block_builder.build().unwrap().block; + + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + block +} + +fn unwrap_from_block_number(from: FromBlock) -> u64 { + if let FromBlock::Number(from) = from { + from + } else { + panic!("Expected a number!"); + } +} + +/// A regression test for a behavior we have seen on a live network. +/// +/// The scenario is that the node is doing a full resync and is connected to some node that is +/// doing a major sync as well. This other node that is doing a major sync will finish before +/// our node and send a block announcement message, but we don't have seen any block +/// announcement from this node in its sync process. Meaning our common number didn't change. It +/// is now expected that we start an ancestor search to find the common number. +#[test] +fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { + sp_tracing::try_init_simple(); + + let blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + (0..MAX_DOWNLOAD_AHEAD * 2) + .map(|_| build_block(&mut client, None, false)) + .collect::>() + }; + + let mut client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let best_block = blocks.last().unwrap().clone(); + let max_blocks_to_request = sync.max_blocks_per_request; + // Connect the node we will sync from + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); + sync.new_peer(peer_id2, info.best_hash, 0); + + let mut best_block_num = 0; + while best_block_num < MAX_DOWNLOAD_AHEAD { + let request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(matches!( + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, + )); + + best_block_num += max_blocks_to_request as u32; + + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(), + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); + } + + // "Wait" for the queue to clear + sync.queue_blocks.clear(); + + // Let peer2 announce that it finished syncing + send_block_announce(best_block.header().clone(), peer_id2, &mut sync); + + let (peer1_req, peer2_req) = + sync.block_requests().into_iter().fold((None, None), |res, req| { + if req.0 == peer_id1 { + (Some(req.1), res.1) + } else if req.0 == peer_id2 { + (res.0, Some(req.1)) + } else { + panic!("Unexpected req: {:?}", req) + } + }); + + // We should now do an ancestor search to find the correct common block. + let peer2_req = peer2_req.unwrap(); + assert_eq!(Some(1), peer2_req.max); + assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); + + let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); + + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert!(actions.is_empty()); + + let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); + + // As we are on the same chain, we should directly continue with requesting blocks from + // peer 2 as well. + get_block_request( + &mut sync, + FromBlock::Number(peer1_from + max_blocks_to_request as u64), + max_blocks_to_request as u32, + &peer_id2, + ); +} + +/// A test that ensures that we can sync a huge fork. +/// +/// The following scenario: +/// A peer connects to us and we both have the common block 512. The last finalized is 2048. +/// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. +/// +/// We will first do an ancestor search to find the common block. After that we start to sync +/// the fork and finish it ;) +#[test] +fn can_sync_huge_fork() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Discard old actions we are not interested in + let _ = sync.take_actions(); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { + // We found the ancenstor + break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } + }; + + log::trace!(target: LOG_TARGET, "Request: {request:?}"); + } + + // Now request and import the fork. + let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; + while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { + let request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(matches!( + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize + )); + + best_block_num += sync.max_blocks_per_request as u32; + + sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(), + ); + + // Discard pending actions + let _ = sync.take_actions(); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request(&mut sync, FromBlock::Hash(fork_blocks.last().unwrap().hash()), 1, &peer_id1); +} + +#[test] +fn syncs_fork_without_duplicate_requests() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Discard pending actions + let _ = sync.take_actions(); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { + // We found the ancenstor + break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } + }; + + log::trace!(target: LOG_TARGET, "Request: {request:?}"); + } + + // Now request and import the fork. + let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; + + let mut request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; + while best_block_num < last_block_num { + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + // Discard old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(matches!( + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize + )); + + best_block_num += max_blocks_to_request as u32; + + if best_block_num < last_block_num { + // make sure we're not getting a duplicate request in the time before the blocks are + // processed + request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + } + + let mut notify_imported: Vec<_> = resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(); + + // The import queue may send notifications in batches of varying size. So we simulate + // this here by splitting the batch into 2 notifications. + let max_blocks_to_request = sync.max_blocks_per_request; + let second_batch = notify_imported.split_off(notify_imported.len() / 2); + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + notify_imported, + ); + + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + second_batch, + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request(&mut sync, FromBlock::Hash(fork_blocks.last().unwrap().hash()), 1, &peer_id1); +} + +#[test] +fn removes_target_fork_on_disconnect() { + sp_tracing::try_init_simple(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap(); + + let peer_id1 = PeerId::random(); + let common_block = blocks[1].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); + + // Create a "new" header and announce it + let mut header = blocks[0].header().clone(); + header.number = 4; + send_block_announce(header, peer_id1, &mut sync); + assert!(sync.fork_targets.len() == 1); + + let _ = sync.peer_disconnected(&peer_id1); + assert!(sync.fork_targets.len() == 0); +} + +#[test] +fn can_import_response_with_missing_blocks() { + sp_tracing::try_init_simple(); + let mut client2 = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); + + let empty_client = Arc::new(TestClientBuilder::new().build()); + + let mut sync = ChainSync::new(SyncMode::Full, empty_client.clone(), 1, 64, None).unwrap(); + + let peer_id1 = PeerId::random(); + let best_block = blocks[3].clone(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); + + sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; + sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; + + // Request all missing blocks and respond only with some. + let request = get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 4, &peer_id1); + let response = + create_block_response(vec![blocks[3].clone(), blocks[2].clone(), blocks[1].clone()]); + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + assert_eq!(sync.best_queued_number, 0); + + // Request should only contain the missing block. + let request = get_block_request(&mut sync, FromBlock::Number(1), 1, &peer_id1); + let response = create_block_response(vec![blocks[0].clone()]); + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert_eq!(sync.best_queued_number, 4); +} +#[test] +fn ancestor_search_repeat() { + let state = AncestorSearchState::::BinarySearch(1, 3); + assert!(handle_ancestor_search_state(&state, 2, true).is_none()); +} + +#[test] +fn sync_restart_removes_block_but_not_justification_requests() { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap(); + + let peers = vec![PeerId::random(), PeerId::random()]; + + let mut new_blocks = |n| { + for _ in 0..n { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let (b1_hash, b1_number) = new_blocks(50); + + // add new peer and request blocks from them + sync.new_peer(peers[0], Hash::random(), 42); + + // we don't actually perform any requests, just keep track of peers waiting for a response + let mut pending_responses = HashSet::new(); + + // we wil send block requests to these peers + // for these blocks we don't know about + for (peer, _request) in sync.block_requests() { + // "send" request + pending_responses.insert(peer); + } + + // add a new peer at a known block + sync.new_peer(peers[1], b1_hash, b1_number); + + // we request a justification for a block we have locally + sync.request_justification(&b1_hash, b1_number); + + // the justification request should be scheduled to the + // new peer which is at the given block + let mut requests = sync.justification_requests(); + assert_eq!(requests.len(), 1); + let (peer, _request) = requests.remove(0); + // "send" request + assert!(pending_responses.insert(peer)); + + assert!(!std::matches!( + sync.peers.get(&peers[0]).unwrap().state, + PeerSyncState::DownloadingJustification(_), + )); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + assert_eq!(pending_responses.len(), 2); + + // discard old actions + let _ = sync.take_actions(); + + // restart sync + sync.restart(); + let actions = sync.take_actions().collect::>(); + for action in actions.iter() { + match action { + ChainSyncAction::CancelBlockRequest { peer_id } => { + pending_responses.remove(&peer_id); + }, + ChainSyncAction::SendBlockRequest { peer_id, .. } => { + // we drop obsolete response, but don't register a new request, it's checked in + // the `assert!` below + pending_responses.remove(&peer_id); + }, + action @ _ => panic!("Unexpected action: {action:?}"), + } + } + assert!(actions.iter().any(|action| { + match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + _ => false, + } + })); + + assert_eq!(pending_responses.len(), 1); + assert!(pending_responses.contains(&peers[1])); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + let _ = sync.peer_disconnected(&peers[1]); + pending_responses.remove(&peers[1]); + assert_eq!(pending_responses.len(), 0); +} + +/// The test demonstrates https://github.com/paritytech/polkadot-sdk/issues/2094. +/// TODO: convert it into desired behavior test once the issue is fixed (see inline comments). +/// The issue: we currently rely on block numbers instead of block hash +/// to download blocks from peers. As a result, we can end up with blocks +/// from different forks as shown by the test. +#[test] +#[should_panic] +fn request_across_forks() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..100).map(|_| build_block(&mut client, None, false)).collect::>(); + + let fork_a_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, false)); + } + fork_blocks + }; + + let fork_b_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, true)); + } + fork_blocks + }; + + let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap(); + + // Add the peers, all at the common ancestor 100. + let common_block = blocks.last().unwrap(); + let peer_id1 = PeerId::random(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); + let peer_id2 = PeerId::random(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()); + + // Peer 1 announces 107 from fork 1, 100-107 get downloaded. + { + let block = (&fork_a_blocks[106]).clone(); + let peer = peer_id1; + log::trace!(target: LOG_TARGET, "<1> {peer} announces from fork 1"); + send_block_announce(block.header().clone(), peer, &mut sync); + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 7, &peer); + let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); + resp_blocks.reverse(); + let response = create_block_response(resp_blocks.clone()); + + // Drop old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(matches!( + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize + )); + assert_eq!(sync.best_queued_number, 107); + assert_eq!(sync.best_queued_hash, block.hash()); + assert!(sync.is_known(&block.header.parent_hash())); + } + + // Peer 2 also announces 107 from fork 1. + { + let prev_best_number = sync.best_queued_number; + let prev_best_hash = sync.best_queued_hash; + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<2> {peer} announces from fork 1"); + for i in 100..107 { + let block = (&fork_a_blocks[i]).clone(); + send_block_announce(block.header().clone(), peer, &mut sync); + assert!(sync.block_requests().is_empty()); + } + assert_eq!(sync.best_queued_number, prev_best_number); + assert_eq!(sync.best_queued_hash, prev_best_hash); + } + + // Peer 2 undergoes reorg, announces 108 from fork 2, gets downloaded even though we + // don't have the parent from fork 2. + { + let block = (&fork_b_blocks[107]).clone(); + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<3> {peer} announces from fork 2"); + send_block_announce(block.header().clone(), peer, &mut sync); + // TODO: when the issue is fixed, this test can be changed to test the + // expected behavior instead. The needed changes would be: + // 1. Remove the `#[should_panic]` directive + // 2. These should be changed to check that sync.block_requests().is_empty(), after the + // block is announced. + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); + let response = create_block_response(vec![block.clone()]); + + // Drop old actions we are not going to check + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(matches!( + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize + )); + assert!(sync.is_known(&block.header.parent_hash())); + } +} diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 1a383cdde4794231e13126c69d125005340b78ce..2cb8eab22f7acc08efb437ca40922acb0678fe8c 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -24,12 +24,18 @@ use crate::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, block_relay_protocol::{BlockDownloader, BlockResponseError}, + block_request_handler::MAX_BLOCKS_IN_RESPONSE, + chain_sync::{ChainSync, ChainSyncAction}, pending_responses::{PendingResponses, ResponseEvent}, schema::v1::{StateRequest, StateResponse}, - service::{self, chain_sync::ToServiceCommand}, - warp::WarpSyncParams, - BlockRequestAction, ChainSync, ClientError, ImportBlocksAction, ImportJustificationsAction, - OnBlockResponse, SyncingService, + service::{ + self, + syncing_service::{SyncingService, ToServiceCommand}, + }, + types::{ + BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncParams}, }; use codec::{Decode, Encode}; @@ -38,7 +44,6 @@ use futures::{ future::{BoxFuture, Fuse}, FutureExt, StreamExt, }; -use futures_timer::Delay; use libp2p::{request_response::OutboundFailure, PeerId}; use log::{debug, trace}; use prometheus_endpoint::{ @@ -47,9 +52,10 @@ use prometheus_endpoint::{ }; use prost::Message; use schnellru::{ByLength, LruMap}; +use tokio::time::{Interval, MissedTickBehavior}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; -use sc_consensus::import_queue::ImportQueueService; +use sc_consensus::{import_queue::ImportQueueService, IncomingBlock}; use sc_network::{ config::{ FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, @@ -61,17 +67,15 @@ use sc_network::{ }; use sc_network_common::{ role::Roles, - sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, - warp::{EncodedProof, WarpProofRequest}, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, OpaqueStateRequest, - OpaqueStateResponse, PeerRequest, SyncEvent, - }, + sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::HeaderMetadata; -use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; +use sp_blockchain::{Error as ClientError, HeaderMetadata}; +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, Zero}, + Justifications, +}; use std::{ collections::{HashMap, HashSet}, @@ -81,7 +85,6 @@ use std::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }, - task::Poll, time::{Duration, Instant}, }; @@ -250,7 +253,7 @@ pub struct SyncingEngine { service_rx: TracingUnboundedReceiver>, /// Channel for receiving inbound connections from `Protocol`. - rx: sc_utils::mpsc::TracingUnboundedReceiver>, + sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, /// Assigned roles. roles: Roles, @@ -262,7 +265,7 @@ pub struct SyncingEngine { event_streams: Vec>, /// Interval at which we call `tick`. - tick_timeout: Delay, + tick_timeout: Interval, /// All connected peers. Contains both full and light node peers. peers: HashMap>, @@ -300,7 +303,7 @@ pub struct SyncingEngine { boot_node_ids: HashSet, /// A channel to get target block header if we skip over proofs downloading during warp sync. - warp_sync_target_block_header_rx: + warp_sync_target_block_header_rx_fused: Fuse>>, /// Protocol name used for block announcements @@ -359,22 +362,21 @@ where block_downloader: Arc>, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - rx: sc_utils::mpsc::TracingUnboundedReceiver>, + sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let mode = net_config.network_config.sync_mode; let max_parallel_downloads = net_config.network_config.max_parallel_downloads; - let max_blocks_per_request = if net_config.network_config.max_blocks_per_request > - crate::MAX_BLOCKS_IN_RESPONSE as u32 - { - log::info!( - target: LOG_TARGET, - "clamping maximum blocks per request to {}", - crate::MAX_BLOCKS_IN_RESPONSE, - ); - crate::MAX_BLOCKS_IN_RESPONSE as u32 - } else { - net_config.network_config.max_blocks_per_request - }; + let max_blocks_per_request = + if net_config.network_config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 { + log::info!( + target: LOG_TARGET, + "clamping maximum blocks per request to {}", + MAX_BLOCKS_IN_RESPONSE, + ); + MAX_BLOCKS_IN_RESPONSE as u32 + } else { + net_config.network_config.max_blocks_per_request + }; let cache_capacity = (net_config.network_config.default_peers_set.in_peers + net_config.network_config.default_peers_set.out_peers) .max(1); @@ -433,7 +435,7 @@ where // Make sure polling of the target block channel is a no-op if there is no block to // retrieve. - let warp_sync_target_block_header_rx = warp_sync_target_block_header_rx + let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); let block_announce_config = Self::get_block_announce_proto_config( @@ -453,11 +455,9 @@ where let chain_sync = ChainSync::new( mode, client.clone(), - block_announce_protocol_name.clone(), max_parallel_downloads, max_blocks_per_request, warp_sync_config, - network_service.clone(), )?; let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); @@ -475,6 +475,12 @@ where let max_out_peers = net_config.network_config.default_peers_set.out_peers; let max_in_peers = (max_full_peers - max_out_peers) as usize; + let tick_timeout = { + let mut interval = tokio::time::interval(TICK_TIMEOUT); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + interval + }; + Ok(( Self { roles, @@ -490,11 +496,11 @@ where num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), service_rx, - rx, + sync_events_rx, genesis_hash, important_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), - warp_sync_target_block_header_rx, + warp_sync_target_block_header_rx_fused, boot_node_ids, default_peers_set_no_slot_peers, default_peers_set_num_full, @@ -502,7 +508,7 @@ where num_in_peers: 0usize, max_in_peers, event_streams: Vec::new(), - tick_timeout: Delay::new(TICK_TIMEOUT), + tick_timeout, syncing_started: None, last_notification_io: Instant::now(), metrics: if let Some(r) = metrics_registry { @@ -688,236 +694,278 @@ where self.syncing_started = Some(Instant::now()); loop { - futures::future::poll_fn(|cx| self.poll(cx)).await; + tokio::select! { + _ = self.tick_timeout.tick() => self.perform_periodic_actions(), + command = self.service_rx.select_next_some() => + self.process_service_command(command), + sync_event = self.sync_events_rx.select_next_some() => + self.process_sync_event(sync_event), + warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => + self.pass_warp_sync_target_block_header(warp_target_block_header), + response_event = self.pending_responses.select_next_some() => + self.process_response_event(response_event), + validation_result = self.block_announce_validator.select_next_some() => + self.process_block_announce_validation_result(validation_result), + } + + // Update atomic variables + self.num_connected.store(self.peers.len(), Ordering::Relaxed); + self.is_major_syncing + .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); + + // Process actions requested by `ChainSync`. + self.process_chain_sync_actions(); } } - pub fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { - self.num_connected.store(self.peers.len(), Ordering::Relaxed); - self.is_major_syncing - .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); - - while let Poll::Ready(()) = self.tick_timeout.poll_unpin(cx) { - self.report_metrics(); - self.tick_timeout.reset(TICK_TIMEOUT); - - // if `SyncingEngine` has just started, don't evict seemingly inactive peers right away - // as they may not have produced blocks not because they've disconnected but because - // they're still waiting to receive enough relaychain blocks to start producing blocks. - if let Some(started) = self.syncing_started { - if started.elapsed() < INITIAL_EVICTION_WAIT_PERIOD { - continue - } + fn process_chain_sync_actions(&mut self) { + self.chain_sync.actions().for_each(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, request } => { + // Sending block request implies dropping obsolete pending response as we are not + // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). + // Furthermore, only one request at a time is allowed to any peer. + let removed = self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request.clone()); - self.syncing_started = None; - self.last_notification_io = Instant::now(); - } + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.", + peer_id, + request, + removed, + ) + }, + ChainSyncAction::CancelBlockRequest { peer_id } => { + let removed = self.pending_responses.remove(&peer_id); - // if syncing hasn't sent or received any blocks within `INACTIVITY_EVICT_THRESHOLD`, - // it means the local node has stalled and is connected to peers who either don't - // consider it connected or are also all stalled. In order to unstall the node, - // disconnect all peers and allow `ProtocolController` to establish new connections. - if self.last_notification_io.elapsed() > INACTIVITY_EVICT_THRESHOLD { - log::debug!( + trace!(target: LOG_TARGET, "Processed {action:?}, response removed: {removed}."); + }, + ChainSyncAction::SendStateRequest { peer_id, request } => { + self.send_state_request(peer_id, request); + + trace!( target: LOG_TARGET, - "syncing has halted due to inactivity, evicting all peers", + "Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.", ); + }, + ChainSyncAction::SendWarpProofRequest { peer_id, request } => { + self.send_warp_proof_request(peer_id, request.clone()); - for peer in self.peers.keys() { - self.network_service.report_peer(*peer, rep::INACTIVE_SUBSTREAM); - self.network_service - .disconnect_peer(*peer, self.block_announce_protocol_name.clone()); - } + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.", + peer_id, + request, + ); + }, + ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { + self.pending_responses.remove(&peer_id); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(peer_id, rep); - // after all the peers have been evicted, start timer again to prevent evicting - // new peers that join after the old peer have been evicted - self.last_notification_io = Instant::now(); + trace!(target: LOG_TARGET, "Processed {action:?}."); + }, + ChainSyncAction::ImportBlocks { origin, blocks } => { + let count = blocks.len(); + self.import_blocks(origin, blocks); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportBlocks` with {count} blocks.", + ); + }, + ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => { + self.import_justifications(peer_id, hash, number, justifications); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).", + peer_id, + hash, + number, + ) + }, + }); + } + + fn perform_periodic_actions(&mut self) { + self.report_metrics(); + + // if `SyncingEngine` has just started, don't evict seemingly inactive peers right away + // as they may not have produced blocks not because they've disconnected but because + // they're still waiting to receive enough relaychain blocks to start producing blocks. + if let Some(started) = self.syncing_started { + if started.elapsed() < INITIAL_EVICTION_WAIT_PERIOD { + return } + + self.syncing_started = None; + self.last_notification_io = Instant::now(); } - while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { - match event { - ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { - self.chain_sync.set_sync_fork_request(peers, &hash, number); - }, - ToServiceCommand::EventStream(tx) => self.event_streams.push(tx), - ToServiceCommand::RequestJustification(hash, number) => - self.chain_sync.request_justification(&hash, number), - ToServiceCommand::ClearJustificationRequests => - self.chain_sync.clear_justification_requests(), - ToServiceCommand::BlocksProcessed(imported, count, results) => { - for result in self.chain_sync.on_blocks_processed(imported, count, results) { - match result { - Ok(action) => match action { - BlockRequestAction::SendRequest { peer_id, request } => { - // drop obsolete pending response first - self.pending_responses.remove(&peer_id); - self.send_block_request(peer_id, request); - }, - BlockRequestAction::RemoveStale { peer_id } => { - self.pending_responses.remove(&peer_id); - }, - }, - Err(BadPeer(peer_id, repu)) => { - self.pending_responses.remove(&peer_id); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, repu) - }, - } - } - }, - ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { - self.chain_sync.on_justification_import(hash, number, success); - if !success { - log::info!( - target: LOG_TARGET, - "💔 Invalid justification provided by {peer_id} for #{hash}", - ); - self.network_service - .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer( - peer_id, - ReputationChange::new_fatal("Invalid justification"), - ); - } - }, - ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), - ToServiceCommand::NewBestBlockImported(hash, number) => - self.new_best_block_imported(hash, number), - ToServiceCommand::Status(tx) => { - let mut status = self.chain_sync.status(); - status.num_connected_peers = self.peers.len() as u32; - let _ = tx.send(status); - }, - ToServiceCommand::NumActivePeers(tx) => { - let _ = tx.send(self.num_active_peers()); - }, - ToServiceCommand::SyncState(tx) => { - let _ = tx.send(self.chain_sync.status()); - }, - ToServiceCommand::BestSeenBlock(tx) => { - let _ = tx.send(self.chain_sync.status().best_seen_block); - }, - ToServiceCommand::NumSyncPeers(tx) => { - let _ = tx.send(self.chain_sync.status().num_peers); - }, - ToServiceCommand::NumQueuedBlocks(tx) => { - let _ = tx.send(self.chain_sync.status().queued_blocks); - }, - ToServiceCommand::NumDownloadedBlocks(tx) => { - let _ = tx.send(self.chain_sync.num_downloaded_blocks()); - }, - ToServiceCommand::NumSyncRequests(tx) => { - let _ = tx.send(self.chain_sync.num_sync_requests()); - }, - ToServiceCommand::PeersInfo(tx) => { - let peers_info = self - .peers - .iter() - .map(|(peer_id, peer)| (*peer_id, peer.info.clone())) - .collect(); - let _ = tx.send(peers_info); - }, - ToServiceCommand::OnBlockFinalized(hash, header) => - self.chain_sync.on_block_finalized(&hash, *header.number()), + // if syncing hasn't sent or received any blocks within `INACTIVITY_EVICT_THRESHOLD`, + // it means the local node has stalled and is connected to peers who either don't + // consider it connected or are also all stalled. In order to unstall the node, + // disconnect all peers and allow `ProtocolController` to establish new connections. + if self.last_notification_io.elapsed() > INACTIVITY_EVICT_THRESHOLD { + log::debug!( + target: LOG_TARGET, + "syncing has halted due to inactivity, evicting all peers", + ); + + for peer in self.peers.keys() { + self.network_service.report_peer(*peer, rep::INACTIVE_SUBSTREAM); + self.network_service + .disconnect_peer(*peer, self.block_announce_protocol_name.clone()); } + + // after all the peers have been evicted, start timer again to prevent evicting + // new peers that join after the old peer have been evicted + self.last_notification_io = Instant::now(); } + } - while let Poll::Ready(Some(event)) = self.rx.poll_next_unpin(cx) { - match event { - sc_network::SyncEvent::NotificationStreamOpened { - remote, - received_handshake, - sink, - inbound, - tx, - } => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) { - Ok(()) => { - let _ = tx.send(true); - }, - Err(()) => { - log::debug!( - target: LOG_TARGET, - "Failed to register peer {remote:?}: {received_handshake:?}", - ); - let _ = tx.send(false); - }, + fn process_service_command(&mut self, command: ToServiceCommand) { + match command { + ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { + self.chain_sync.set_sync_fork_request(peers, &hash, number); + }, + ToServiceCommand::EventStream(tx) => self.event_streams.push(tx), + ToServiceCommand::RequestJustification(hash, number) => + self.chain_sync.request_justification(&hash, number), + ToServiceCommand::ClearJustificationRequests => + self.chain_sync.clear_justification_requests(), + ToServiceCommand::BlocksProcessed(imported, count, results) => { + self.chain_sync.on_blocks_processed(imported, count, results); + }, + ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { + self.chain_sync.on_justification_import(hash, number, success); + if !success { + log::info!( + target: LOG_TARGET, + "💔 Invalid justification provided by {peer_id} for #{hash}", + ); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + self.network_service + .report_peer(peer_id, ReputationChange::new_fatal("Invalid justification")); + } + }, + ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), + ToServiceCommand::NewBestBlockImported(hash, number) => + self.new_best_block_imported(hash, number), + ToServiceCommand::Status(tx) => { + let mut status = self.chain_sync.status(); + status.num_connected_peers = self.peers.len() as u32; + let _ = tx.send(status); + }, + ToServiceCommand::NumActivePeers(tx) => { + let _ = tx.send(self.num_active_peers()); + }, + ToServiceCommand::SyncState(tx) => { + let _ = tx.send(self.chain_sync.status()); + }, + ToServiceCommand::BestSeenBlock(tx) => { + let _ = tx.send(self.chain_sync.status().best_seen_block); + }, + ToServiceCommand::NumSyncPeers(tx) => { + let _ = tx.send(self.chain_sync.status().num_peers); + }, + ToServiceCommand::NumQueuedBlocks(tx) => { + let _ = tx.send(self.chain_sync.status().queued_blocks); + }, + ToServiceCommand::NumDownloadedBlocks(tx) => { + let _ = tx.send(self.chain_sync.num_downloaded_blocks()); + }, + ToServiceCommand::NumSyncRequests(tx) => { + let _ = tx.send(self.chain_sync.num_sync_requests()); + }, + ToServiceCommand::PeersInfo(tx) => { + let peers_info = self + .peers + .iter() + .map(|(peer_id, peer)| (*peer_id, peer.info.clone())) + .collect(); + let _ = tx.send(peers_info); + }, + ToServiceCommand::OnBlockFinalized(hash, header) => + self.chain_sync.on_block_finalized(&hash, *header.number()), + } + } + + fn process_sync_event(&mut self, event: sc_network::SyncEvent) { + match event { + sc_network::SyncEvent::NotificationStreamOpened { + remote, + received_handshake, + sink, + inbound, + tx, + } => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) { + Ok(()) => { + let _ = tx.send(true); }, - sc_network::SyncEvent::NotificationStreamClosed { remote } => { - if self.on_sync_peer_disconnected(remote).is_err() { - log::trace!( - target: LOG_TARGET, - "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", - remote - ); - } + Err(()) => { + log::debug!( + target: LOG_TARGET, + "Failed to register peer {remote:?}: {received_handshake:?}", + ); + let _ = tx.send(false); }, - sc_network::SyncEvent::NotificationsReceived { remote, messages } => { - for message in messages { - if self.peers.contains_key(&remote) { - if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.last_notification_io = Instant::now(); - self.push_block_announce_validation(remote, announce); - } else { - log::warn!(target: "sub-libp2p", "Failed to decode block announce"); - } + }, + sc_network::SyncEvent::NotificationStreamClosed { remote } => { + if self.on_sync_peer_disconnected(remote).is_err() { + log::trace!( + target: LOG_TARGET, + "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", + remote + ); + } + }, + sc_network::SyncEvent::NotificationsReceived { remote, messages } => { + for message in messages { + if self.peers.contains_key(&remote) { + if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { + self.last_notification_io = Instant::now(); + self.push_block_announce_validation(remote, announce); } else { - log::trace!( - target: LOG_TARGET, - "Received sync for peer earlier refused by sync layer: {remote}", - ); + log::warn!(target: "sub-libp2p", "Failed to decode block announce"); } + } else { + log::trace!( + target: LOG_TARGET, + "Received sync for peer earlier refused by sync layer: {remote}", + ); } - }, - sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => { - if let Some(peer) = self.peers.get_mut(&remote) { - peer.sink = sink; - } - }, - } + } + }, + sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => { + if let Some(peer) = self.peers.get_mut(&remote) { + peer.sink = sink; + } + }, } + } - // Retreive warp sync target block header just before polling `ChainSync` - // to make progress as soon as we receive it. - match self.warp_sync_target_block_header_rx.poll_unpin(cx) { - Poll::Ready(Ok(target)) => { - self.chain_sync.set_warp_sync_target_block(target); + fn pass_warp_sync_target_block_header(&mut self, header: Result) { + match header { + Ok(header) => { + self.chain_sync.set_warp_sync_target_block(header); }, - Poll::Ready(Err(err)) => { + Err(err) => { log::error!( target: LOG_TARGET, "Failed to get target block for warp sync. Error: {err:?}", ); }, - Poll::Pending => {}, - } - - // Send outbound requests on `ChanSync`'s behalf. - self.send_chain_sync_requests(); - - // Poll & process pending responses. - while let Poll::Ready(Some(event)) = self.pending_responses.poll_next_unpin(cx) { - self.process_response_event(event); - } - - // Poll block announce validations last, because if a block announcement was received - // through the event stream between `SyncingEngine` and `Protocol` and the validation - // finished right after it is queued, the resulting block request (if any) can be sent - // right away. - while let Poll::Ready(Some(result)) = self.block_announce_validator.poll_next_unpin(cx) { - self.process_block_announce_validation_result(result); } - - Poll::Pending } /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. - pub fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { + fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { if let Some(info) = self.peers.remove(&peer_id) { if self.important_peers.contains(&peer_id) { log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); @@ -942,9 +990,7 @@ where } } - if let Some(import_blocks_action) = self.chain_sync.peer_disconnected(&peer_id) { - self.import_blocks(import_blocks_action) - } + self.chain_sync.peer_disconnected(&peer_id); self.pending_responses.remove(&peer_id); self.event_streams.retain(|stream| { @@ -961,7 +1007,7 @@ where /// /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync /// from. - pub fn on_sync_peer_connected( + fn on_sync_peer_connected( &mut self, peer_id: PeerId, status: &BlockAnnouncesHandshake, @@ -1055,17 +1101,7 @@ where inbound, }; - let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number) { - Ok(req) => req, - Err(BadPeer(id, repu)) => { - self.network_service.report_peer(id, repu); - return Err(()) - }, - } - } else { - None - }; + self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number); log::debug!(target: LOG_TARGET, "Connected {peer_id}"); @@ -1077,36 +1113,14 @@ where self.num_in_peers += 1; } - if let Some(req) = req { - self.send_block_request(peer_id, req); - } - self.event_streams .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(peer_id)).is_ok()); Ok(()) } - fn send_chain_sync_requests(&mut self) { - for (peer_id, request) in self.chain_sync.block_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.state_request() { - self.send_state_request(peer_id, request); - } - - for (peer_id, request) in self.chain_sync.justification_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { - self.send_warp_sync_request(peer_id, request); - } - } - fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1122,7 +1136,7 @@ where } fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1151,8 +1165,8 @@ where } } - fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + fn send_warp_proof_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1204,15 +1218,7 @@ where PeerRequest::Block(req) => { match self.block_downloader.block_response_into_blocks(&req, resp) { Ok(blocks) => { - match self.chain_sync.on_block_response(peer_id, req, blocks) { - OnBlockResponse::SendBlockRequest { peer_id, request } => - self.send_block_request(peer_id, request), - OnBlockResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnBlockResponse::ImportJustifications(action) => - self.import_justifications(action), - OnBlockResponse::Nothing => {}, - } + self.chain_sync.on_block_response(peer_id, req, blocks); }, Err(BlockResponseError::DecodeFailed(e)) => { debug!( @@ -1257,14 +1263,10 @@ where }, }; - if let Some(import_blocks_action) = - self.chain_sync.on_state_response(peer_id, response) - { - self.import_blocks(import_blocks_action); - } + self.chain_sync.on_state_response(peer_id, response); }, PeerRequest::WarpProof => { - self.chain_sync.on_warp_sync_response(peer_id, EncodedProof(resp)); + self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)); }, }, Ok(Err(e)) => { @@ -1370,7 +1372,7 @@ where } /// Import blocks. - fn import_blocks(&mut self, ImportBlocksAction { origin, blocks }: ImportBlocksAction) { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if let Some(metrics) = &self.metrics { metrics.import_queue_blocks_submitted.inc(); } @@ -1379,13 +1381,17 @@ where } /// Import justifications. - fn import_justifications(&mut self, action: ImportJustificationsAction) { + fn import_justifications( + &mut self, + peer_id: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { if let Some(metrics) = &self.metrics { metrics.import_queue_justifications_submitted.inc(); } - let ImportJustificationsAction { peer_id, hash, number, justifications } = action; - self.import_queue.import_justifications(peer_id, hash, number, justifications); } } diff --git a/substrate/client/network/sync/src/extra_requests.rs b/substrate/client/network/sync/src/extra_requests.rs index 09e6bdb57399f3dbd7bd30aec8da15662f853179..8edd1a772e26b20e0ff8afb3f76432ae195ee075 100644 --- a/substrate/client/network/sync/src/extra_requests.rs +++ b/substrate/client/network/sync/src/extra_requests.rs @@ -16,11 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{PeerSync, PeerSyncState}; +use crate::{ + chain_sync::{PeerSync, PeerSyncState}, + request_metrics::Metrics, +}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; -use sc_network_common::sync::metrics::Metrics; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ @@ -343,7 +345,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { use super::*; - use crate::PeerSync; + use crate::chain_sync::PeerSync; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sp_blockchain::Error as ClientError; use sp_test_primitives::{Block, BlockNumber, Hash}; diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 10eaa2450518e4b0d06d14a7431e3a9de2b24676..1a7e773c95f7ad68f0831b4aeaf489f685996d7f 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -16,67 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains the state of the chain synchronization process -//! -//! At any given point in time, a running node tries as much as possible to be at the head of the -//! chain. This module handles the logic of which blocks to request from remotes, and processing -//! responses. It yields blocks to check and potentially move to the database. -//! -//! # Usage -//! -//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on -//! the network, or whenever a block has been successfully verified, call the appropriate method in -//! order to update it. +//! Blockchain syncing implementation in Substrate. -use crate::{ - blocks::BlockCollection, - schema::v1::StateResponse, - state::StateSync, - warp::{WarpProofImportResult, WarpSync, WarpSyncConfig}, -}; - -use codec::Encode; -use extra_requests::ExtraRequests; -use libp2p::PeerId; -use log::{debug, error, info, trace, warn}; - -use sc_client_api::{BlockBackend, ProofProvider}; -use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network::types::ProtocolName; -use sc_network_common::sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, - FromBlock, - }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, - BadPeer, ChainSync as ChainSyncT, ImportBlocksAction, Metrics, OnBlockData, - OnBlockJustification, OnStateData, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, - SyncState, SyncStatus, -}; -use sp_arithmetic::traits::Saturating; -use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockOrigin, BlockStatus}; -use sp_runtime::{ - traits::{ - Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, - }, - EncodedJustification, Justifications, -}; - -use std::{ - collections::{HashMap, HashSet}, - ops::Range, - sync::Arc, -}; - -pub use service::chain_sync::SyncingService; +pub use service::syncing_service::SyncingService; +pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; +mod chain_sync; mod extra_requests; mod futures_stream; mod pending_responses; +mod request_metrics; mod schema; +mod types; pub mod block_relay_protocol; pub mod block_request_handler; @@ -88,3280 +40,3 @@ pub mod state; pub mod state_request_handler; pub mod warp; pub mod warp_request_handler; - -/// Log target for this file. -const LOG_TARGET: &'static str = "sync"; - -/// Maximum blocks to store in the import queue. -const MAX_IMPORTING_BLOCKS: usize = 2048; - -/// Maximum blocks to download ahead of any gap. -const MAX_DOWNLOAD_AHEAD: u32 = 2048; - -/// Maximum blocks to look backwards. The gap is the difference between the highest block and the -/// common block of a node. -const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; - -/// Pick the state to sync as the latest finalized number minus this. -const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; - -/// We use a heuristic that with a high likelihood, by the time -/// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same -/// chain as (or at least closer to) the peer so we want to delay -/// the ancestor search to not waste time doing that when we are -/// so far behind. -const MAJOR_SYNC_BLOCKS: u8 = 5; - -/// Number of peers that need to be connected before warp sync is started. -const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; - -/// Maximum blocks per response. -pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; - -mod rep { - use sc_network::ReputationChange as Rep; - /// Reputation change when a peer sent us a message that led to a - /// database read error. - pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); - - /// Reputation change when a peer sent us a status message with a different - /// genesis than us. - pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); - - /// Reputation change for peers which send us a block with an incomplete header. - pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); - - /// Reputation change for peers which send us a block which we fail to verify. - pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); - - /// Reputation change for peers which send us a known bad block. - pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); - - /// Peer did not provide us with advertised block data. - pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); - - /// Reputation change for peers which send us non-requested block data. - pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); - - /// Reputation change for peers which send us a block with bad justifications. - pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - - /// Reputation change when a peer sent us invlid ancestry result. - pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); - - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); -} - -enum AllowedRequests { - Some(HashSet), - All, -} - -impl AllowedRequests { - fn add(&mut self, id: &PeerId) { - if let Self::Some(ref mut set) = self { - set.insert(*id); - } - } - - fn take(&mut self) -> Self { - std::mem::take(self) - } - - fn set_all(&mut self) { - *self = Self::All; - } - - fn contains(&self, id: &PeerId) -> bool { - match self { - Self::Some(set) => set.contains(id), - Self::All => true, - } - } - - fn is_empty(&self) -> bool { - match self { - Self::Some(set) => set.is_empty(), - Self::All => false, - } - } - - fn clear(&mut self) { - std::mem::take(self); - } -} - -impl Default for AllowedRequests { - fn default() -> Self { - Self::Some(HashSet::default()) - } -} - -struct GapSync { - blocks: BlockCollection, - best_queued_number: NumberFor, - target: NumberFor, -} - -/// Action that [`engine::SyncingEngine`] should perform after reporting imported blocks with -/// [`ChainSync::on_blocks_processed`]. -enum BlockRequestAction { - /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendRequest { peer_id: PeerId, request: BlockRequest }, - /// Drop stale block request. - RemoveStale { peer_id: PeerId }, -} - -/// Action that [`engine::SyncingEngine`] should perform if we want to import justifications. -struct ImportJustificationsAction { - peer_id: PeerId, - hash: B::Hash, - number: NumberFor, - justifications: Justifications, -} - -/// Action that [`engine::SyncingEngine`] should perform on behalf of [`ChainSync`] -/// after reporting block response with [`ChainSync::on_block_response`]. -enum OnBlockResponse { - /// Nothing to do - Nothing, - /// Perform block request. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Import justifications. - ImportJustifications(ImportJustificationsAction), -} - -/// The main data structure which contains all the state for a chains -/// active syncing strategy. -pub struct ChainSync { - /// Chain client. - client: Arc, - /// The active peers that we are using to sync and their PeerSync status - peers: HashMap>, - /// A `BlockCollection` of blocks that are being downloaded from peers - blocks: BlockCollection, - /// The best block number in our queue of blocks to import - best_queued_number: NumberFor, - /// The best block hash in our queue of blocks to import - best_queued_hash: B::Hash, - /// Current mode (full/light) - mode: SyncMode, - /// Any extra justification requests. - extra_justifications: ExtraRequests, - /// A set of hashes of blocks that are being downloaded or have been - /// downloaded and are queued for import. - queue_blocks: HashSet, - /// Fork sync targets. - fork_targets: HashMap>, - /// A set of peers for which there might be potential block requests - allowed_requests: AllowedRequests, - /// Maximum number of peers to ask the same blocks in parallel. - max_parallel_downloads: u32, - /// Maximum blocks per request. - max_blocks_per_request: u32, - /// Total number of downloaded blocks. - downloaded_blocks: usize, - /// State sync in progress, if any. - state_sync: Option>, - /// Warp sync in progress, if any. - warp_sync: Option>, - /// Warp sync configuration. - /// - /// Will be `None` after `self.warp_sync` is `Some(_)`. - warp_sync_config: Option>, - /// A temporary storage for warp sync target block until warp sync is initialized. - warp_sync_target_block_header: Option, - /// Enable importing existing blocks. This is used used after the state download to - /// catch up to the latest state while re-importing blocks. - import_existing: bool, - /// Gap download process. - gap_sync: Option>, - /// Handle for communicating with `NetworkService` - network_service: service::network::NetworkServiceHandle, - /// Protocol name used for block announcements - block_announce_protocol_name: ProtocolName, -} - -/// All the data we have about a Peer that we are trying to sync with -#[derive(Debug, Clone)] -pub struct PeerSync { - /// Peer id of this peer. - pub peer_id: PeerId, - /// The common number is the block number that is a common point of - /// ancestry for both our chains (as far as we know). - pub common_number: NumberFor, - /// The hash of the best block that we've seen for this peer. - pub best_hash: B::Hash, - /// The number of the best block that we've seen for this peer. - pub best_number: NumberFor, - /// The state of syncing this peer is in for us, generally categories - /// into `Available` or "busy" with something as defined by `PeerSyncState`. - pub state: PeerSyncState, -} - -impl PeerSync { - /// Update the `common_number` iff `new_common > common_number`. - fn update_common_number(&mut self, new_common: NumberFor) { - if self.common_number < new_common { - trace!( - target: LOG_TARGET, - "Updating peer {} common number from={} => to={}.", - self.peer_id, - self.common_number, - new_common, - ); - self.common_number = new_common; - } - } -} - -struct ForkTarget { - number: NumberFor, - parent_hash: Option, - peers: HashSet, -} - -/// The state of syncing between a Peer and ourselves. -/// -/// Generally two categories, "busy" or `Available`. If busy, the enum -/// defines what we are busy with. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum PeerSyncState { - /// Available for sync requests. - Available, - /// Searching for ancestors the Peer has in common with us. - AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, - /// Actively downloading new blocks, starting from the given Number. - DownloadingNew(NumberFor), - /// Downloading a stale block with given Hash. Stale means that it is a - /// block with a number that is lower than our best number. It might be - /// from a fork and not necessarily already imported. - DownloadingStale(B::Hash), - /// Downloading justification for given block hash. - DownloadingJustification(B::Hash), - /// Downloading state. - DownloadingState, - /// Downloading warp proof. - DownloadingWarpProof, - /// Downloading warp sync target block. - DownloadingWarpTargetBlock, - /// Actively downloading block history after warp sync. - DownloadingGap(NumberFor), -} - -impl PeerSyncState { - pub fn is_available(&self) -> bool { - matches!(self, Self::Available) - } -} - -impl ChainSyncT for ChainSync -where - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - fn peer_info(&self, who: &PeerId) -> Option> { - self.peers - .get(who) - .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) - } - - /// Returns the current sync status. - fn status(&self) -> SyncStatus { - let median_seen = self.median_seen(); - let best_seen_block = - median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); - let sync_state = if let Some(target) = median_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing - // if the same can be said about queued blocks. - let best_block = self.client.info().best_number; - if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { - // If target is not queued, we're downloading, otherwise importing. - if target > self.best_queued_number { - SyncState::Downloading { target } - } else { - SyncState::Importing { target } - } - } else { - SyncState::Idle - } - } else { - SyncState::Idle - }; - - let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) { - (_, _, Some(gap_sync)) => Some(WarpSyncProgress { - phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), - total_bytes: 0, - }), - (None, SyncMode::Warp, _) => Some(WarpSyncProgress { - phase: WarpSyncPhase::AwaitingPeers { - required_peers: MIN_PEERS_TO_START_WARP_SYNC, - }, - total_bytes: 0, - }), - (Some(sync), _, _) => Some(sync.progress()), - _ => None, - }; - - SyncStatus { - state: sync_state, - best_seen_block, - num_peers: self.peers.len() as u32, - num_connected_peers: 0u32, - queued_blocks: self.queue_blocks.len() as u32, - state_sync: self.state_sync.as_ref().map(|s| s.progress()), - warp_sync: warp_sync_progress, - } - } - - fn num_sync_requests(&self) -> usize { - self.fork_targets - .values() - .filter(|f| f.number <= self.best_queued_number) - .count() - } - - fn num_downloaded_blocks(&self) -> usize { - self.downloaded_blocks - } - - fn num_peers(&self) -> usize { - self.peers.len() - } - - #[must_use] - fn new_peer( - &mut self, - who: PeerId, - best_hash: B::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer> { - // There is nothing sync can get from the node that has no blockchain data. - match self.block_status(&best_hash) { - Err(e) => { - debug!(target:LOG_TARGET, "Error reading blockchain: {e}"); - Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - }, - Ok(BlockStatus::KnownBad) => { - info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); - Err(BadPeer(who, rep::BAD_BLOCK)) - }, - Ok(BlockStatus::Unknown) => { - if best_number.is_zero() { - info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)) - } - - // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have - // enough to do in the import queue that it's not worth kicking off - // an ancestor search, which is what we do in the next match case below. - if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { - debug!( - target:LOG_TARGET, - "New peer with unknown best hash {} ({}), assuming common block.", - self.best_queued_hash, - self.best_queued_number - ); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: self.best_queued_number, - best_hash, - best_number, - state: PeerSyncState::Available, - }, - ); - return Ok(None) - } - - // If we are at genesis, just start downloading. - let (state, req) = if self.best_queued_number.is_zero() { - debug!( - target:LOG_TARGET, - "New peer with best hash {best_hash} ({best_number}).", - ); - - (PeerSyncState::Available, None) - } else { - let common_best = std::cmp::min(self.best_queued_number, best_number); - - debug!( - target:LOG_TARGET, - "New peer with unknown best hash {} ({}), searching for common ancestor.", - best_hash, - best_number - ); - - ( - PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }, - Some(ancestry_request::(common_best)), - ) - }; - - self.allowed_requests.add(&who); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: Zero::zero(), - best_hash, - best_number, - state, - }, - ); - - if let SyncMode::Warp = self.mode { - if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() - { - log::debug!(target: LOG_TARGET, "Starting warp state sync."); - - if let Some(config) = self.warp_sync_config.take() { - let mut warp_sync = WarpSync::new(self.client.clone(), config); - if let Some(header) = self.warp_sync_target_block_header.take() { - warp_sync.set_target_block(header); - } - self.warp_sync = Some(warp_sync); - } - } - } - Ok(req) - }, - Ok(BlockStatus::Queued) | - Ok(BlockStatus::InChainWithState) | - Ok(BlockStatus::InChainPruned) => { - debug!( - target: LOG_TARGET, - "New peer with known best hash {best_hash} ({best_number}).", - ); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: std::cmp::min(self.best_queued_number, best_number), - best_hash, - best_number, - state: PeerSyncState::Available, - }, - ); - self.allowed_requests.add(&who); - Ok(None) - }, - } - } - - fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { - self.on_block_queued(best_hash, best_number); - } - - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_justifications - .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) - } - - fn clear_justification_requests(&mut self) { - self.extra_justifications.reset(); - } - - // The implementation is similar to `on_validated_block_announce` with unknown parent hash. - fn set_sync_fork_request( - &mut self, - mut peers: Vec, - hash: &B::Hash, - number: NumberFor, - ) { - if peers.is_empty() { - peers = self - .peers - .iter() - // Only request blocks from peers who are ahead or on a par. - .filter(|(_, peer)| peer.best_number >= number) - .map(|(id, _)| *id) - .collect(); - - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with no peers specified. \ - Syncing from these peers {peers:?} instead.", - ); - } else { - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with {peers:?}", - ); - } - - if self.is_known(hash) { - debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); - return - } - - trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); - for peer_id in &peers { - if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue - } - - if number > peer.best_number { - peer.best_number = number; - peer.best_hash = *hash; - } - self.allowed_requests.add(peer_id); - } - } - - self.fork_targets - .entry(*hash) - .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) - .peers - .extend(peers); - } - - #[must_use] - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer> { - self.downloaded_blocks += response.blocks.len(); - let mut gap = false; - let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { - let mut blocks = response.blocks; - if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { - trace!(target: LOG_TARGET, "Reversing incoming block list"); - blocks.reverse() - } - self.allowed_requests.add(who); - if let Some(request) = request { - match &mut peer.state { - PeerSyncState::DownloadingNew(_) => { - self.blocks.clear_peer_download(who); - peer.state = PeerSyncState::Available; - if let Some(start_block) = - validate_blocks::(&blocks, who, Some(request))? - { - self.blocks.insert(start_block, blocks, *who); - } - self.ready_blocks() - }, - PeerSyncState::DownloadingGap(_) => { - peer.state = PeerSyncState::Available; - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who); - if let Some(start_block) = - validate_blocks::(&blocks, who, Some(request))? - { - gap_sync.blocks.insert(start_block, blocks, *who); - } - gap = true; - let blocks: Vec<_> = gap_sync - .blocks - .ready_blocks(gap_sync.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - let justifications = - block_data.block.justifications.or_else(|| { - legacy_justification_mapping( - block_data.block.justification, - ) - }); - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - indexed_body: block_data.block.indexed_body, - justifications, - origin: block_data.origin, - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: true, - state: None, - } - }) - .collect(); - debug!( - target: LOG_TARGET, - "Drained {} gap blocks from {}", - blocks.len(), - gap_sync.best_queued_number, - ); - blocks - } else { - debug!(target: LOG_TARGET, "Unexpected gap block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } - }, - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - if blocks.is_empty() { - debug!(target: LOG_TARGET, "Empty block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } - validate_blocks::(&blocks, who, Some(request))?; - blocks - .into_iter() - .map(|b| { - let justifications = b - .justifications - .or_else(|| legacy_justification_mapping(b.justification)); - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - indexed_body: None, - justifications, - origin: Some(*who), - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: self.skip_execution(), - state: None, - } - }) - .collect() - }, - PeerSyncState::AncestorSearch { current, start, state } => { - let matching_hash = match (blocks.get(0), self.client.hash(*current)) { - (Some(block), Ok(maybe_our_block_hash)) => { - trace!( - target: LOG_TARGET, - "Got ancestry block #{} ({}) from peer {}", - current, - block.hash, - who, - ); - maybe_our_block_hash.filter(|x| x == &block.hash) - }, - (None, _) => { - debug!( - target: LOG_TARGET, - "Invalid response when searching for ancestor from {who}", - ); - return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) - }, - (_, Err(e)) => { - info!( - target: LOG_TARGET, - "❌ Error answering legitimate blockchain query: {e}", - ); - return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) - }, - }; - if matching_hash.is_some() { - if *start < self.best_queued_number && - self.best_queued_number <= peer.best_number - { - // We've made progress on this chain since the search was started. - // Opportunistically set common number to updated number - // instead of the one that started the search. - peer.common_number = self.best_queued_number; - } else if peer.common_number < *current { - peer.common_number = *current; - } - } - if matching_hash.is_none() && current.is_zero() { - trace!( - target:LOG_TARGET, - "Ancestry search: genesis mismatch for peer {who}", - ); - return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) - } - if let Some((next_state, next_num)) = - handle_ancestor_search_state(state, *current, matching_hash.is_some()) - { - peer.state = PeerSyncState::AncestorSearch { - current: next_num, - start: *start, - state: next_state, - }; - return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) - } else { - // Ancestry search is complete. Check if peer is on a stale fork unknown - // to us and add it to sync targets if necessary. - trace!( - target: LOG_TARGET, - "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", - self.best_queued_hash, - self.best_queued_number, - peer.best_hash, - peer.best_number, - matching_hash, - peer.common_number, - ); - if peer.common_number < peer.best_number && - peer.best_number < self.best_queued_number - { - trace!( - target: LOG_TARGET, - "Added fork target {} for {}", - peer.best_hash, - who, - ); - self.fork_targets - .entry(peer.best_hash) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), - }) - .peers - .insert(*who); - } - peer.state = PeerSyncState::Available; - Vec::new() - } - }, - PeerSyncState::DownloadingWarpTargetBlock => { - peer.state = PeerSyncState::Available; - if let Some(warp_sync) = &mut self.warp_sync { - if blocks.len() == 1 { - validate_blocks::(&blocks, who, Some(request))?; - match warp_sync.import_target_block( - blocks.pop().expect("`blocks` len checked above."), - ) { - warp::TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), - warp::TargetBlockImportResult::BadResponse => - return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), - } - } else if blocks.is_empty() { - debug!(target: LOG_TARGET, "Empty block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } else { - debug!( - target: LOG_TARGET, - "Too many blocks ({}) in warp target block response from {}", - blocks.len(), - who, - ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - } else { - debug!( - target: LOG_TARGET, - "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", - who, - ); - return Ok(OnBlockData::Continue) - } - }, - PeerSyncState::Available | - PeerSyncState::DownloadingJustification(..) | - PeerSyncState::DownloadingState | - PeerSyncState::DownloadingWarpProof => Vec::new(), - } - } else { - // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who, None)?; - blocks - .into_iter() - .map(|b| { - let justifications = b - .justifications - .or_else(|| legacy_justification_mapping(b.justification)); - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - indexed_body: None, - justifications, - origin: Some(*who), - allow_missing_state: true, - import_existing: false, - skip_execution: true, - state: None, - } - }) - .collect() - } - } else { - // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) - } - - #[must_use] - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer> { - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!( - target: LOG_TARGET, - "💔 Called on_block_justification with a peer ID of an unknown peer", - ); - return Ok(OnBlockJustification::Nothing) - }; - - self.allowed_requests.add(&who); - if let PeerSyncState::DownloadingJustification(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one justification at a time - let justification = if let Some(block) = response.blocks.into_iter().next() { - if hash != block.hash { - warn!( - target: LOG_TARGET, - "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", - who, - hash, - block.hash, - ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) - } - - block - .justifications - .or_else(|| legacy_justification_mapping(block.justification)) - } else { - // we might have asked the peer for a justification on a block that we assumed it - // had but didn't (regardless of whether it had a justification for it or not). - trace!( - target: LOG_TARGET, - "Peer {who:?} provided empty response for justification request {hash:?}", - ); - - None - }; - - if let Some((peer_id, hash, number, justifications)) = - self.extra_justifications.on_response(who, justification) - { - return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) - } - } - - Ok(OnBlockJustification::Nothing) - } - - fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications - .try_finalize_root((hash, number), finalization_result, true); - self.allowed_requests.set_all(); - } - - fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let SyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { - // Finalized a recent block. - let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); - heads.sort(); - let median = heads[heads.len() / 2]; - if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(*hash) { - log::debug!( - target: LOG_TARGET, - "Starting state sync for #{number} ({hash})", - ); - self.state_sync = Some(StateSync::new( - self.client.clone(), - header, - None, - None, - *skip_proofs, - )); - self.allowed_requests.set_all(); - } - } - } - } - - if let Err(err) = r { - warn!( - target: LOG_TARGET, - "💔 Error cleaning up pending extra justification data requests: {err}", - ); - } - } - - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ) { - let number = *announce.header.number(); - let hash = announce.header.hash(); - let parent_status = - self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); - return - }; - - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", who); - return - } - - if is_best { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - } - - // If the announced block is the best they have and is not ahead of us, our common number - // is either one further ahead or it's the one they just announced, if we know about it. - if is_best { - if known && self.best_queued_number >= number { - self.update_peer_common_number(&who, number); - } else if announce.header.parent_hash() == &self.best_queued_hash || - known_parent && self.best_queued_number >= number - { - self.update_peer_common_number(&who, number.saturating_sub(One::one())); - } - } - self.allowed_requests.add(&who); - - // known block case - if known || self.is_already_downloading(&hash) { - trace!(target: "sync", "Known block announce from {}: {}", who, hash); - if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(who); - } - return - } - - if ancient_parent { - trace!( - target: "sync", - "Ignored ancient block announced from {}: {} {:?}", - who, - hash, - announce.header, - ); - return - } - - if self.status().state == SyncState::Idle { - trace!( - target: "sync", - "Added sync target for block announced from {}: {} {:?}", - who, - hash, - announce.summary(), - ); - self.fork_targets - .entry(hash) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), - }) - .peers - .insert(who); - } - } - - #[must_use] - fn peer_disconnected(&mut self, who: &PeerId) -> Option> { - self.blocks.clear_peer_download(who); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who) - } - self.peers.remove(who); - self.extra_justifications.peer_disconnected(who); - self.allowed_requests.set_all(); - self.fork_targets.retain(|_, target| { - target.peers.remove(who); - !target.peers.is_empty() - }); - - let blocks = self.ready_blocks(); - - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) - } - - fn metrics(&self) -> Metrics { - Metrics { - queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), - fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - justifications: self.extra_justifications.metrics(), - } - } -} - -impl ChainSync -where - Self: ChainSyncT, - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - /// Create a new instance. - pub fn new( - mode: SyncMode, - client: Arc, - block_announce_protocol_name: ProtocolName, - max_parallel_downloads: u32, - max_blocks_per_request: u32, - warp_sync_config: Option>, - network_service: service::network::NetworkServiceHandle, - ) -> Result { - let mut sync = Self { - client, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: Default::default(), - best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification"), - mode, - queue_blocks: Default::default(), - fork_targets: Default::default(), - allowed_requests: Default::default(), - max_parallel_downloads, - max_blocks_per_request, - downloaded_blocks: 0, - state_sync: None, - warp_sync: None, - import_existing: false, - gap_sync: None, - network_service, - warp_sync_config, - warp_sync_target_block_header: None, - block_announce_protocol_name, - }; - - sync.reset_sync_start_point()?; - Ok(sync) - } - - /// Returns the median seen block number. - fn median_seen(&self) -> Option> { - let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); - - if best_seens.is_empty() { - None - } else { - let middle = best_seens.len() / 2; - - // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) - } - } - - fn required_block_attributes(&self) -> BlockAttributes { - match self.mode { - SyncMode::Full => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | - BlockAttributes::JUSTIFICATION | - BlockAttributes::INDEXED_BODY, - } - } - - fn skip_execution(&self) -> bool { - match self.mode { - SyncMode::Full => false, - SyncMode::LightState { .. } => true, - SyncMode::Warp => true, - } - } - - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> ImportBlocksAction { - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!( - target: LOG_TARGET, - "Ignoring {} blocks that are already queued", - orig_len - new_blocks.len(), - ); - } - - let origin = if !gap && !self.status().state.is_major_syncing() { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - - if let Some((h, n)) = new_blocks - .last() - .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) - { - trace!( - target:LOG_TARGET, - "Accepted {} blocks ({:?}) with origin {:?}", - new_blocks.len(), - h, - origin, - ); - self.on_block_queued(h, n) - } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - - ImportBlocksAction { origin, blocks: new_blocks } - } - - fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { - if let Some(peer) = self.peers.get_mut(peer_id) { - peer.update_common_number(new_common); - } - } - - /// Called when a block has been queued for import. - /// - /// Updates our internal state for best queued block and then goes - /// through all peers to update our view of their state as well. - fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - if self.fork_targets.remove(hash).is_some() { - trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); - } - if let Some(gap_sync) = &mut self.gap_sync { - if number > gap_sync.best_queued_number && number <= gap_sync.target { - gap_sync.best_queued_number = number; - } - } - if number > self.best_queued_number { - self.best_queued_number = number; - self.best_queued_hash = *hash; - // Update common blocks - for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - // Wait for ancestry search to complete first. - continue - } - let new_common_number = - if peer.best_number >= number { number } else { peer.best_number }; - trace!( - target: LOG_TARGET, - "Updating peer {} info, ours={}, common={}->{}, their best={}", - n, - number, - peer.common_number, - new_common_number, - peer.best_number, - ); - peer.common_number = new_common_number; - } - } - self.allowed_requests.set_all(); - } - - /// Restart the sync process. This will reset all pending block requests and return an iterator - /// of new block requests to make to peers. Peers that were downloading finality data (i.e. - /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { - self.blocks.clear(); - if let Err(e) = self.reset_sync_start_point() { - warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); - } - self.allowed_requests.set_all(); - debug!( - target: LOG_TARGET, - "Restarted with {} ({})", - self.best_queued_number, - self.best_queued_hash, - ); - let old_peers = std::mem::take(&mut self.peers); - - old_peers.into_iter().filter_map(move |(peer_id, mut p)| { - // peers that were downloading justifications - // should be kept in that state. - if let PeerSyncState::DownloadingJustification(_) = p.state { - // We make sure our commmon number is at least something we have. - p.common_number = self.best_queued_number; - self.peers.insert(peer_id, p); - return None - } - - // handle peers that were in other states. - match self.new_peer(peer_id, p.best_hash, p.best_number) { - // since the request is not a justification, remove it from pending responses - Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), - // update the request if the new one is available - Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), - // this implies that we need to drop pending response from the peer - Err(e) => Some(Err(e)), - } - }) - } - - /// Find a block to start sync from. If we sync with state, that's the latest block we have - /// state for. - fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { - let info = self.client.info(); - if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { - warn!( - target: LOG_TARGET, - "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." - ); - self.mode = SyncMode::Full; - } - if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { - warn!( - target: LOG_TARGET, - "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." - ); - self.mode = SyncMode::Full; - } - self.import_existing = false; - self.best_queued_hash = info.best_hash; - self.best_queued_number = info.best_number; - - if self.mode == SyncMode::Full && - self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState - { - self.import_existing = true; - // Latest state is missing, start with the last finalized state or genesis instead. - if let Some((hash, number)) = info.finalized_state { - debug!(target: LOG_TARGET, "Starting from finalized state #{number}"); - self.best_queued_hash = hash; - self.best_queued_number = number; - } else { - debug!(target: LOG_TARGET, "Restarting from genesis"); - self.best_queued_hash = Default::default(); - self.best_queued_number = Zero::zero(); - } - } - - if let Some((start, end)) = info.block_gap { - debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); - self.gap_sync = Some(GapSync { - best_queued_number: start - One::one(), - target: end, - blocks: BlockCollection::new(), - }); - } - trace!( - target: LOG_TARGET, - "Restarted sync at #{} ({:?})", - self.best_queued_number, - self.best_queued_hash, - ); - Ok(()) - } - - /// What is the status of the block corresponding to the given hash? - fn block_status(&self, hash: &B::Hash) -> Result { - if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) - } - self.client.block_status(*hash) - } - - /// Is the block corresponding to the given hash known? - fn is_known(&self, hash: &B::Hash) -> bool { - self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) - } - - /// Is any peer downloading the given hash? - fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers - .iter() - .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) - } - - /// Is the peer know to the sync state machine? - pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { - self.peers.contains_key(peer_id) - } - - /// Get the set of downloaded blocks that are ready to be queued for import. - fn ready_blocks(&mut self) -> Vec> { - self.blocks - .ready_blocks(self.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - let justifications = block_data - .block - .justifications - .or_else(|| legacy_justification_mapping(block_data.block.justification)); - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - indexed_body: block_data.block.indexed_body, - justifications, - origin: block_data.origin, - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: self.skip_execution(), - state: None, - } - }) - .collect() - } - - /// Set warp sync target block externally in case we skip warp proof downloading. - pub fn set_warp_sync_target_block(&mut self, header: B::Header) { - if let Some(ref mut warp_sync) = self.warp_sync { - warp_sync.set_target_block(header); - } else { - self.warp_sync_target_block_header = Some(header); - } - } - - /// Generate block request for downloading of the target block body during warp sync. - fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { - let sync = &self.warp_sync.as_ref()?; - - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers - .iter() - .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) - { - // Only one pending warp target block request is allowed. - return None - } - - if let Some((target_number, request)) = sync.next_target_block_request() { - // Find a random peer that has a block with the target number. - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= target_number { - trace!(target: LOG_TARGET, "New warp target block request for {id}"); - peer.state = PeerSyncState::DownloadingWarpTargetBlock; - self.allowed_requests.clear(); - return Some((*id, request)) - } - } - } - - None - } - - /// Process blocks received in a response. - #[must_use] - pub(crate) fn on_block_response( - &mut self, - peer_id: PeerId, - request: BlockRequest, - blocks: Vec>, - ) -> OnBlockResponse { - let block_response = BlockResponse:: { id: request.id, blocks }; - - let blocks_range = || match ( - block_response - .blocks - .first() - .and_then(|b| b.header.as_ref().map(|h| h.number())), - block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!( - target: LOG_TARGET, - "BlockResponse {} from {} with {} blocks {}", - block_response.id, - peer_id, - block_response.blocks.len(), - blocks_range(), - ); - - if request.fields == BlockAttributes::JUSTIFICATION { - match self.on_block_justification(peer_id, block_response) { - Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, - Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => - OnBlockResponse::ImportJustifications(ImportJustificationsAction { - peer_id, - hash, - number, - justifications, - }), - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - OnBlockResponse::Nothing - }, - } - } else { - match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), - Ok(OnBlockData::Request(peer_id, request)) => - OnBlockResponse::SendBlockRequest { peer_id, request }, - Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - OnBlockResponse::Nothing - }, - } - } - } - - /// Process state received in a response. - #[must_use] - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> Option> { - match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - Some(ImportBlocksAction { origin, blocks: vec![block] }), - Ok(OnStateData::Continue) => None, - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - None - }, - } - } - - pub fn on_warp_sync_response(&mut self, peer_id: PeerId, response: EncodedProof) { - if let Err(BadPeer(id, repu)) = self.on_warp_sync_data(&peer_id, response) { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - } - } - - fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { - let peers = &mut self.peers; - let mut matcher = self.extra_justifications.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(peers) { - peers - .get_mut(&peer) - .expect( - "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", - ) - .state = PeerSyncState::DownloadingJustification(request.0); - let req = BlockRequest:: { - id: 0, - fields: BlockAttributes::JUSTIFICATION, - from: FromBlock::Hash(request.0), - direction: Direction::Ascending, - max: Some(1), - }; - Some((peer, req)) - } else { - None - } - }) - .collect() - } - - fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { - if self.mode == SyncMode::Warp { - return self - .warp_target_block_request() - .map_or_else(|| Vec::new(), |req| Vec::from([req])) - } - - if self.allowed_requests.is_empty() || self.state_sync.is_some() { - return Vec::new() - } - - if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { - trace!(target: LOG_TARGET, "Too many blocks in the queue."); - return Vec::new() - } - let is_major_syncing = self.status().state.is_major_syncing(); - let attrs = self.required_block_attributes(); - let blocks = &mut self.blocks; - let fork_targets = &mut self.fork_targets; - let last_finalized = - std::cmp::min(self.best_queued_number, self.client.info().finalized_number); - let best_queued = self.best_queued_number; - let client = &self.client; - let queue = &self.queue_blocks; - let allowed_requests = self.allowed_requests.take(); - let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; - let max_blocks_per_request = self.max_blocks_per_request; - let gap_sync = &mut self.gap_sync; - self.peers - .iter_mut() - .filter_map(move |(&id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(&id) { - return None - } - - // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from - // the common number, the peer best number is higher than our best queued and the - // common number is smaller than the last finalized block number, we should do an - // ancestor search to find a better common block. If the queue is full we wait till - // all blocks are imported though. - if best_queued.saturating_sub(peer.common_number) > - MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && - best_queued < peer.best_number && - peer.common_number < last_finalized && - queue.len() <= MAJOR_SYNC_BLOCKS.into() - { - trace!( - target: LOG_TARGET, - "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", - id, - peer.common_number, - best_queued, - ); - let current = std::cmp::min(peer.best_number, best_queued); - peer.state = PeerSyncState::AncestorSearch { - current, - start: best_queued, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }; - Some((id, ancestry_request::(current))) - } else if let Some((range, req)) = peer_block_request( - &id, - peer, - blocks, - attrs, - max_parallel, - max_blocks_per_request, - last_finalized, - best_queued, - ) { - peer.state = PeerSyncState::DownloadingNew(range.start); - trace!( - target: LOG_TARGET, - "New block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - Some((id, req)) - } else if let Some((hash, req)) = fork_sync_request( - &id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| { - if queue.contains(hash) { - BlockStatus::Queued - } else { - client.block_status(*hash).unwrap_or(BlockStatus::Unknown) - } - }, - max_blocks_per_request, - ) { - trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); - peer.state = PeerSyncState::DownloadingStale(hash); - Some((id, req)) - } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { - peer_gap_block_request( - &id, - peer, - &mut sync.blocks, - attrs, - sync.target, - sync.best_queued_number, - max_blocks_per_request, - ) - }) { - peer.state = PeerSyncState::DownloadingGap(range.start); - trace!( - target: LOG_TARGET, - "New gap block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - Some((id, req)) - } else { - None - } - }) - .collect() - } - - fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { - if self.allowed_requests.is_empty() { - return None - } - if (self.state_sync.is_some() || self.warp_sync.is_some()) && - self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) - { - // Only one pending state request is allowed. - return None - } - if let Some(sync) = &self.state_sync { - if sync.is_complete() { - return None - } - - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.common_number >= sync.target_block_num() { - peer.state = PeerSyncState::DownloadingState; - let request = sync.next_request(); - trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); - self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) - } - } - } - if let Some(sync) = &self.warp_sync { - if sync.is_complete() { - return None - } - if let (Some(request), Some(target)) = - (sync.next_state_request(), sync.target_block_number()) - { - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= target { - trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}"); - peer.state = PeerSyncState::DownloadingState; - self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) - } - } - } - } - None - } - - fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { - if let Some(sync) = &self.warp_sync { - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers - .iter() - .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) - { - // Only one pending state request is allowed. - return None - } - if let Some(request) = sync.next_warp_proof_request() { - let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); - if !targets.is_empty() { - targets.sort(); - let median = targets[targets.len() / 2]; - // Find a random peer that is synced as much as peer majority. - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= median { - trace!(target: LOG_TARGET, "New WarpProofRequest for {id}"); - peer.state = PeerSyncState::DownloadingWarpProof; - self.allowed_requests.clear(); - return Some((*id, request)) - } - } - } - } - } - None - } - - fn on_state_data( - &mut self, - who: &PeerId, - response: OpaqueStateResponse, - ) -> Result, BadPeer> { - let response: Box = response.0.downcast().map_err(|_error| { - error!( - target: LOG_TARGET, - "Failed to downcast opaque state response, this is an implementation bug." - ); - - BadPeer(*who, rep::BAD_RESPONSE) - })?; - - if let Some(peer) = self.peers.get_mut(who) { - if let PeerSyncState::DownloadingState = peer.state { - peer.state = PeerSyncState::Available; - self.allowed_requests.set_all(); - } - } - let import_result = if let Some(sync) = &mut self.state_sync { - debug!( - target: LOG_TARGET, - "Importing state data from {} with {} keys, {} proof nodes.", - who, - response.entries.len(), - response.proof.len(), - ); - sync.import(*response) - } else if let Some(sync) = &mut self.warp_sync { - debug!( - target: LOG_TARGET, - "Importing state data from {} with {} keys, {} proof nodes.", - who, - response.entries.len(), - response.proof.len(), - ); - sync.import_state(*response) - } else { - debug!(target: LOG_TARGET, "Ignored obsolete state response from {who}"); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - match import_result { - state::ImportResult::Import(hash, header, state, body, justifications) => { - let origin = BlockOrigin::NetworkInitialSync; - let block = IncomingBlock { - hash, - header: Some(header), - body, - indexed_body: None, - justifications, - origin: None, - allow_missing_state: true, - import_existing: true, - skip_execution: self.skip_execution(), - state: Some(state), - }; - debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - Ok(OnStateData::Import(origin, block)) - }, - state::ImportResult::Continue => Ok(OnStateData::Continue), - state::ImportResult::BadResponse => { - debug!(target: LOG_TARGET, "Bad state data received from {who}"); - Err(BadPeer(*who, rep::BAD_BLOCK)) - }, - } - } - - fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer> { - if let Some(peer) = self.peers.get_mut(who) { - if let PeerSyncState::DownloadingWarpProof = peer.state { - peer.state = PeerSyncState::Available; - self.allowed_requests.set_all(); - } - } - let import_result = if let Some(sync) = &mut self.warp_sync { - debug!( - target: LOG_TARGET, - "Importing warp proof data from {}, {} bytes.", - who, - response.0.len(), - ); - sync.import_warp_proof(response) - } else { - debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {who}"); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - match import_result { - WarpProofImportResult::Success => Ok(()), - WarpProofImportResult::BadResponse => { - debug!(target: LOG_TARGET, "Bad proof data received from {who}"); - Err(BadPeer(*who, rep::BAD_BLOCK)) - }, - } - } - - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. If an error is returned, the pending response - /// from the peer must be dropped. - #[must_use] - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box, BadPeer>>> { - trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - - let mut output = Vec::new(); - - let mut has_error = false; - for (_, hash) in &results { - self.queue_blocks.remove(hash); - self.blocks.clear_queued(hash); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_queued(hash); - } - } - for (result, hash) in results { - if has_error { - break - } - - has_error |= result.is_err(); - - match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - }, - Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { - if aux.clear_justification_requests { - trace!( - target: LOG_TARGET, - "Block imported clears all pending justification requests {number}: {hash:?}", - ); - self.clear_justification_requests(); - } - - if aux.needs_justification { - trace!( - target: LOG_TARGET, - "Block imported but requires justification {number}: {hash:?}", - ); - self.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(ref peer) = who { - warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); - } - } - - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - } - let state_sync_complete = - self.state_sync.as_ref().map_or(false, |s| s.target() == hash); - if state_sync_complete { - info!( - target: LOG_TARGET, - "State sync is complete ({} MiB), restarting block sync.", - self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), - ); - self.state_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let warp_sync_complete = self - .warp_sync - .as_ref() - .map_or(false, |s| s.target_block_hash() == Some(hash)); - if warp_sync_complete { - info!( - target: LOG_TARGET, - "Warp sync is complete ({} MiB), restarting block sync.", - self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), - ); - self.warp_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let gap_sync_complete = - self.gap_sync.as_ref().map_or(false, |s| s.target == number); - if gap_sync_complete { - info!( - target: LOG_TARGET, - "Block history download is complete." - ); - self.gap_sync = None; - } - }, - Err(BlockImportError::IncompleteHeader(who)) => - if let Some(peer) = who { - warn!( - target: LOG_TARGET, - "💔 Peer sent block with incomplete header to import", - ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); - }, - Err(BlockImportError::VerificationFailed(who, e)) => { - let extra_message = - who.map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); - - warn!( - target: LOG_TARGET, - "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", - ); - - if let Some(peer) = who { - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); - } - - output.extend(self.restart()); - }, - Err(BlockImportError::BadBlock(who)) => - if let Some(peer) = who { - warn!( - target: LOG_TARGET, - "💔 Block {hash:?} received from peer {peer} has been blacklisted", - ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - }, - Err(BlockImportError::MissingState) => { - // This may happen if the chain we were requesting upon has been discarded - // in the meantime because other chain has been finalized. - // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); - }, - e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); - self.state_sync = None; - self.warp_sync = None; - output.extend(self.restart()); - }, - Err(BlockImportError::Cancelled) => {}, - }; - } - - self.allowed_requests.set_all(); - Box::new(output.into_iter()) - } -} - -// This is purely during a backwards compatible transitionary period and should be removed -// once we can assume all nodes can send and receive multiple Justifications -// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. -// See: https://github.com/paritytech/substrate/issues/8172 -fn legacy_justification_mapping( - justification: Option, -) -> Option { - justification.map(|just| (*b"FRNK", just).into()) -} - -/// Request the ancestry for a block. Sends a request for header and justification for the given -/// block number. Used during ancestry search. -fn ancestry_request(block: NumberFor) -> BlockRequest { - BlockRequest:: { - id: 0, - fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - from: FromBlock::Number(block), - direction: Direction::Ascending, - max: Some(1), - } -} - -/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using -/// to try to find an ancestor block -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum AncestorSearchState { - /// Use exponential backoff to find an ancestor, then switch to binary search. - /// We keep track of the exponent. - ExponentialBackoff(NumberFor), - /// Using binary search to find the best ancestor. - /// We keep track of left and right bounds. - BinarySearch(NumberFor, NumberFor), -} - -/// This function handles the ancestor search strategy used. The goal is to find a common point -/// that both our chains agree on that is as close to the tip as possible. -/// The way this works is we first have an exponential backoff strategy, where we try to step -/// forward until we find a block hash mismatch. The size of the step doubles each step we take. -/// -/// When we've found a block hash mismatch we then fall back to a binary search between the two -/// last known points to find the common block closest to the tip. -fn handle_ancestor_search_state( - state: &AncestorSearchState, - curr_block_num: NumberFor, - block_hash_match: bool, -) -> Option<(AncestorSearchState, NumberFor)> { - let two = >::one() + >::one(); - match state { - AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { - let next_distance_to_tip = *next_distance_to_tip; - if block_hash_match && next_distance_to_tip == One::one() { - // We found the ancestor in the first step so there is no need to execute binary - // search. - return None - } - if block_hash_match { - let left = curr_block_num; - let right = left + next_distance_to_tip / two; - let middle = left + (right - left) / two; - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } else { - let next_block_num = - curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); - let next_distance_to_tip = next_distance_to_tip * two; - Some(( - AncestorSearchState::ExponentialBackoff(next_distance_to_tip), - next_block_num, - )) - } - }, - AncestorSearchState::BinarySearch(mut left, mut right) => { - if left >= curr_block_num { - return None - } - if block_hash_match { - left = curr_block_num; - } else { - right = curr_block_num; - } - assert!(right >= left); - let middle = left + (right - left) / two; - if middle == curr_block_num { - None - } else { - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } - }, - } -} - -/// Get a new block request for the peer if any. -fn peer_block_request( - id: &PeerId, - peer: &PeerSync, - blocks: &mut BlockCollection, - attrs: BlockAttributes, - max_parallel_downloads: u32, - max_blocks_per_request: u32, - finalized: NumberFor, - best_num: NumberFor, -) -> Option<(Range>, BlockRequest)> { - if best_num >= peer.best_number { - // Will be downloaded as alternative fork instead. - return None - } else if peer.common_number < finalized { - trace!( - target: LOG_TARGET, - "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, peer.common_number, finalized, peer.best_number, best_num, - ); - } - let range = blocks.needed_blocks( - *id, - max_blocks_per_request, - peer.best_number, - peer.common_number, - max_parallel_downloads, - MAX_DOWNLOAD_AHEAD, - )?; - - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); - - let from = if peer.best_number == last { - FromBlock::Hash(peer.best_hash) - } else { - FromBlock::Number(last) - }; - - let request = BlockRequest:: { - id: 0, - fields: attrs, - from, - direction: Direction::Descending, - max: Some((range.end - range.start).saturated_into::()), - }; - - Some((range, request)) -} - -/// Get a new block request for the peer if any. -fn peer_gap_block_request( - id: &PeerId, - peer: &PeerSync, - blocks: &mut BlockCollection, - attrs: BlockAttributes, - target: NumberFor, - common_number: NumberFor, - max_blocks_per_request: u32, -) -> Option<(Range>, BlockRequest)> { - let range = blocks.needed_blocks( - *id, - max_blocks_per_request, - std::cmp::min(peer.best_number, target), - common_number, - 1, - MAX_DOWNLOAD_AHEAD, - )?; - - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); - let from = FromBlock::Number(last); - - let request = BlockRequest:: { - id: 0, - fields: attrs, - from, - direction: Direction::Descending, - max: Some((range.end - range.start).saturated_into::()), - }; - Some((range, request)) -} - -/// Get pending fork sync targets for a peer. -fn fork_sync_request( - id: &PeerId, - targets: &mut HashMap>, - best_num: NumberFor, - finalized: NumberFor, - attributes: BlockAttributes, - check_block: impl Fn(&B::Hash) -> BlockStatus, - max_blocks_per_request: u32, -) -> Option<(B::Hash, BlockRequest)> { - targets.retain(|hash, r| { - if r.number <= finalized { - trace!( - target: LOG_TARGET, - "Removed expired fork sync request {:?} (#{})", - hash, - r.number, - ); - return false - } - if check_block(hash) != BlockStatus::Unknown { - trace!( - target: LOG_TARGET, - "Removed obsolete fork sync request {:?} (#{})", - hash, - r.number, - ); - return false - } - true - }); - for (hash, r) in targets { - if !r.peers.contains(&id) { - continue - } - // Download the fork only if it is behind or not too far ahead our tip of the chain - // Otherwise it should be downloaded in full sync mode. - if r.number <= best_num || - (r.number - best_num).saturated_into::() < max_blocks_per_request as u32 - { - let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); - let count = if parent_status == BlockStatus::Unknown { - (r.number - finalized).saturated_into::() // up to the last finalized block - } else { - // request only single block - 1 - }; - trace!( - target: LOG_TARGET, - "Downloading requested fork {hash:?} from {id}, {count} blocks", - ); - return Some(( - *hash, - BlockRequest:: { - id: 0, - fields: attributes, - from: FromBlock::Hash(*hash), - direction: Direction::Descending, - max: Some(count), - }, - )) - } else { - trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); - } - } - None -} - -/// Returns `true` if the given `block` is a descendent of `base`. -fn is_descendent_of( - client: &T, - base: &Block::Hash, - block: &Block::Hash, -) -> sp_blockchain::Result -where - Block: BlockT, - T: HeaderMetadata + ?Sized, -{ - if base == block { - return Ok(false) - } - - let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; - - Ok(ancestor.hash == *base) -} - -/// Validate that the given `blocks` are correct. -/// Returns the number of the first block in the sequence. -/// -/// It is expected that `blocks` are in ascending order. -fn validate_blocks( - blocks: &Vec>, - who: &PeerId, - request: Option>, -) -> Result>, BadPeer> { - if let Some(request) = request { - if Some(blocks.len() as _) > request.max { - debug!( - target: LOG_TARGET, - "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", - who, - request.max, - blocks.len(), - ); - - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - - let block_header = - if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } - .and_then(|b| b.header.as_ref()); - - let expected_block = block_header.as_ref().map_or(false, |h| match request.from { - FromBlock::Hash(hash) => h.hash() == hash, - FromBlock::Number(n) => h.number() == &n, - }); - - if !expected_block { - debug!( - target: LOG_TARGET, - "Received block that was not requested. Requested {:?}, got {:?}.", - request.from, - block_header, - ); - - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - - if request.fields.contains(BlockAttributes::HEADER) && - blocks.iter().any(|b| b.header.is_none()) - { - trace!( - target: LOG_TARGET, - "Missing requested header for a block in response from {who}.", - ); - - return Err(BadPeer(*who, rep::BAD_RESPONSE)) - } - - if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) - { - trace!( - target: LOG_TARGET, - "Missing requested body for a block in response from {who}.", - ); - - return Err(BadPeer(*who, rep::BAD_RESPONSE)) - } - } - - for b in blocks { - if let Some(header) = &b.header { - let hash = header.hash(); - if hash != b.hash { - debug!( - target:LOG_TARGET, - "Bad header received from {}. Expected hash {:?}, got {:?}", - who, - b.hash, - hash, - ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) - } - } - if let (Some(header), Some(body)) = (&b.header, &b.body) { - let expected = *header.extrinsics_root(); - let got = HashingFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, - ); - if expected != got { - debug!( - target:LOG_TARGET, - "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", - b.hash, - who, - expected, - got, - ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) - } - } - } - - Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::service::network::NetworkServiceProvider; - use futures::executor::block_on; - use sc_block_builder::BlockBuilderProvider; - use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; - use sp_blockchain::HeaderBackend; - use substrate_test_runtime_client::{ - runtime::{Block, Hash, Header}, - BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, - TestClientBuilder, TestClientBuilderExt, - }; - - #[test] - fn processes_empty_response_on_justification_request_for_unknown_block() { - // if we ask for a justification for a given block to a peer that doesn't know that block - // (different from not having a justification), the peer will reply with an empty response. - // internally we should process the response as the justification not being available. - - let client = Arc::new(TestClientBuilder::new().build()); - let peer_id = PeerId::random(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let (a1_hash, a1_number) = { - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - (a1.hash(), *a1.header.number()) - }; - - // add a new peer with the same best block - sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); - - // and request a justification for the block - sync.request_justification(&a1_hash, a1_number); - - // the justification request should be scheduled to that peer - assert!(sync - .justification_requests() - .iter() - .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); - - // there are no extra pending requests - assert_eq!(sync.extra_justifications.pending_requests().count(), 0); - - // there's one in-flight extra request to the expected peer - assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { - *who == peer_id && *hash == a1_hash && *number == a1_number - })); - - // if the peer replies with an empty response (i.e. it doesn't know the block), - // the active request should be cleared. - assert_eq!( - sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), - Ok(OnBlockJustification::Nothing), - ); - - // there should be no in-flight requests - assert_eq!(sync.extra_justifications.active_requests().count(), 0); - - // and the request should now be pending again, waiting for reschedule - assert!(sync - .extra_justifications - .pending_requests() - .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); - } - - #[test] - fn restart_doesnt_affect_peers_downloading_finality_data() { - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - let peer_id3 = PeerId::random(); - - let mut new_blocks = |n| { - for _ in 0..n { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - let info = client.info(); - (info.best_hash, info.best_number) - }; - - let (b1_hash, b1_number) = new_blocks(50); - - // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); - - // we wil send block requests to these peers - // for these blocks we don't know about - assert!(sync - .block_requests() - .into_iter() - .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); - - // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); - - // we request a justification for a block we have locally - sync.request_justification(&b1_hash, b1_number); - - // the justification request should be scheduled to the - // new peer which is at the given block - assert!(sync.justification_requests().iter().any(|(p, r)| { - *p == peer_id3 && - r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) - })); - - assert_eq!( - sync.peers.get(&peer_id3).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - - // we restart the sync state - let block_requests = sync.restart(); - - // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { - BlockRequestAction::SendRequest { peer_id, .. } => - peer_id == peer_id1 || peer_id == peer_id2, - BlockRequestAction::RemoveStale { .. } => false, - })); - - // peer 3 should be unaffected it was downloading finality data - assert_eq!( - sync.peers.get(&peer_id3).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - - // Set common block to something that we don't have (e.g. failed import) - sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; - let _ = sync.restart().count(); - assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); - } - - /// Send a block annoucnement for the given `header`. - fn send_block_announce( - header: Header, - peer_id: PeerId, - sync: &mut ChainSync, - ) { - let announce = BlockAnnounce { - header: header.clone(), - state: Some(BlockState::Best), - data: Some(Vec::new()), - }; - - sync.on_validated_block_announce(true, peer_id, &announce); - } - - /// Create a block response from the given `blocks`. - fn create_block_response(blocks: Vec) -> BlockResponse { - BlockResponse:: { - id: 0, - blocks: blocks - .into_iter() - .map(|b| BlockData:: { - hash: b.hash(), - header: Some(b.header().clone()), - body: Some(b.deconstruct().1), - indexed_body: None, - receipt: None, - message_queue: None, - justification: None, - justifications: None, - }) - .collect(), - } - } - - /// Get a block request from `sync` and check that is matches the expected request. - fn get_block_request( - sync: &mut ChainSync, - from: FromBlock, - max: u32, - peer: &PeerId, - ) -> BlockRequest { - let requests = sync.block_requests(); - - log::trace!(target: LOG_TARGET, "Requests: {requests:?}"); - - assert_eq!(1, requests.len()); - assert_eq!(*peer, requests[0].0); - - let request = requests[0].1.clone(); - - assert_eq!(from, request.from); - assert_eq!(Some(max), request.max); - request - } - - /// Build and import a new best block. - fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { - let at = at.unwrap_or_else(|| client.info().best_hash); - - let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); - - if fork { - block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); - } - - let block = block_builder.build().unwrap().block; - - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - block - } - - /// This test is a regression test as observed on a real network. - /// - /// The node is connected to multiple peers. Both of these peers are having a best block (1) - /// that is below our best block (3). Now peer 2 announces a fork of block 3 that we will - /// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4. - /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already - /// have) from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request - /// for block 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to - /// succeed, as we have requested block 2 from both peers. - #[test] - fn do_not_report_peer_on_block_response_for_block_request() { - sp_tracing::try_init_simple(); - - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - - let mut client2 = client.clone(); - let mut build_block_at = |at, import| { - let mut block_builder = client2.new_block_at(at, Default::default(), false).unwrap(); - // Make sure we generate a different block as fork - block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); - - let block = block_builder.build().unwrap().block; - - if import { - block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - block - }; - - let block1 = build_block(&mut client, None, false); - let block2 = build_block(&mut client, None, false); - let block3 = build_block(&mut client, None, false); - let block3_fork = build_block_at(block2.hash(), false); - - // Add two peers which are on block 1. - sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); - sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); - - // Tell sync that our best block is 3. - sync.update_chain_info(&block3.hash(), 3); - - // There should be no requests. - assert!(sync.block_requests().is_empty()); - - // Let peer2 announce a fork of block 3 - send_block_announce(block3_fork.header().clone(), peer_id2, &mut sync); - - // Import and tell sync that we now have the fork. - block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); - sync.update_chain_info(&block3_fork.hash(), 3); - - let block4 = build_block_at(block3_fork.hash(), false); - - // Let peer2 announce block 4 and check that sync wants to get the block. - send_block_announce(block4.header().clone(), peer_id2, &mut sync); - - let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); - - // Peer1 announces the same block, but as the common block is still `1`, sync will request - // block 2 again. - send_block_announce(block4.header().clone(), peer_id1, &mut sync); - - let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); - - let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); - let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); - - // We should not yet import the blocks, because there is still an open request for fetching - // block `2` which blocks the import. - assert!( - matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) - ); - - let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); - - let response = create_block_response(vec![block2.clone()]); - let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) - if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) - )); - - let response = create_block_response(vec![block2.clone()]); - let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); - // Nothing to import - assert!( - matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) - ); - } - - fn unwrap_from_block_number(from: FromBlock) -> u64 { - if let FromBlock::Number(from) = from { - from - } else { - panic!("Expected a number!"); - } - } - - /// A regression test for a behavior we have seen on a live network. - /// - /// The scenario is that the node is doing a full resync and is connected to some node that is - /// doing a major sync as well. This other node that is doing a major sync will finish before - /// our node and send a block announcement message, but we don't have seen any block - /// announcement from this node in its sync process. Meaning our common number didn't change. It - /// is now expected that we start an ancestor search to find the common number. - #[test] - fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { - sp_tracing::try_init_simple(); - - let blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - (0..MAX_DOWNLOAD_AHEAD * 2) - .map(|_| build_block(&mut client, None, false)) - .collect::>() - }; - - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - - let best_block = blocks.last().unwrap().clone(); - let max_blocks_to_request = sync.max_blocks_per_request; - // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); - - let mut best_block_num = 0; - while best_block_num < MAX_DOWNLOAD_AHEAD { - let request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); - - best_block_num += max_blocks_to_request as u32; - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(), - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); - } - - // "Wait" for the queue to clear - sync.queue_blocks.clear(); - - // Let peer2 announce that it finished syncing - send_block_announce(best_block.header().clone(), peer_id2, &mut sync); - - let (peer1_req, peer2_req) = - sync.block_requests().into_iter().fold((None, None), |res, req| { - if req.0 == peer_id1 { - (Some(req.1), res.1) - } else if req.0 == peer_id2 { - (res.0, Some(req.1)) - } else { - panic!("Unexpected req: {:?}", req) - } - }); - - // We should now do an ancestor search to find the correct common block. - let peer2_req = peer2_req.unwrap(); - assert_eq!(Some(1), peer2_req.max); - assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); - - let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); - let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() - ),); - - let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); - - // As we are on the same chain, we should directly continue with requesting blocks from - // peer 2 as well. - get_block_request( - &mut sync, - FromBlock::Number(peer1_from + max_blocks_to_request as u64), - max_blocks_to_request as u32, - &peer_id2, - ); - } - - /// A test that ensures that we can sync a huge fork. - /// - /// The following scenario: - /// A peer connects to us and we both have the common block 512. The last finalized is 2048. - /// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. - /// - /// We will first do an ancestor search to find the common block. After that we start to sync - /// the fork and finish it ;) - #[test] - fn can_sync_huge_fork() { - sp_tracing::try_init_simple(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) - .collect::>(); - - let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() - .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) - .cloned() - .collect::>(); - - fork_blocks - .into_iter() - .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), - ) - .collect::>() - }; - - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); - let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); - sync.update_chain_info(&info.best_hash, info.best_number); - - let peer_id1 = PeerId::random(); - - let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); - - let mut request = - get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); - - // Do the ancestor search - loop { - let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; - let response = create_block_response(vec![block.clone()]); - - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { - // We found the ancenstor - break - }; - - log::trace!(target: LOG_TARGET, "Request: {request:?}"); - } - - // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; - let max_blocks_to_request = sync.max_blocks_per_request; - while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { - let request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize - ),); - - best_block_num += sync.max_blocks_per_request as u32; - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(), - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); - } - - // Request the tip - get_block_request( - &mut sync, - FromBlock::Hash(fork_blocks.last().unwrap().hash()), - 1, - &peer_id1, - ); - } - - #[test] - fn syncs_fork_without_duplicate_requests() { - sp_tracing::try_init_simple(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) - .collect::>(); - - let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() - .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) - .cloned() - .collect::>(); - - fork_blocks - .into_iter() - .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), - ) - .collect::>() - }; - - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); - let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); - sync.update_chain_info(&info.best_hash, info.best_number); - - let peer_id1 = PeerId::random(); - - let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); - - let mut request = - get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); - - // Do the ancestor search - loop { - let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; - let response = create_block_response(vec![block.clone()]); - - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { - // We found the ancenstor - break - }; - - log::trace!(target: LOG_TARGET, "Request: {request:?}"); - } - - // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; - let max_blocks_to_request = sync.max_blocks_per_request; - - let mut request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; - while best_block_num < last_block_num { - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); - - best_block_num += max_blocks_to_request as u32; - - if best_block_num < last_block_num { - // make sure we're not getting a duplicate request in the time before the blocks are - // processed - request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - } - - let mut notify_imported: Vec<_> = resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(); - - // The import queue may send notifications in batches of varying size. So we simulate - // this here by splitting the batch into 2 notifications. - let max_blocks_to_request = sync.max_blocks_per_request; - let second_batch = notify_imported.split_off(notify_imported.len() / 2); - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - notify_imported, - ); - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - second_batch, - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); - } - - // Request the tip - get_block_request( - &mut sync, - FromBlock::Hash(fork_blocks.last().unwrap().hash()), - 1, - &peer_id1, - ); - } - - #[test] - fn removes_target_fork_on_disconnect() { - sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let common_block = blocks[1].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - // Create a "new" header and announce it - let mut header = blocks[0].header().clone(); - header.number = 4; - send_block_announce(header, peer_id1, &mut sync); - assert!(sync.fork_targets.len() == 1); - - let _ = sync.peer_disconnected(&peer_id1); - assert!(sync.fork_targets.len() == 0); - } - - #[test] - fn can_import_response_with_missing_blocks() { - sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client2 = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); - - let empty_client = Arc::new(TestClientBuilder::new().build()); - - let mut sync = ChainSync::new( - SyncMode::Full, - empty_client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - - sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; - sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; - - // Request all missing blocks and respond only with some. - let request = - get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 4, &peer_id1); - let response = - create_block_response(vec![blocks[3].clone(), blocks[2].clone(), blocks[1].clone()]); - sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); - assert_eq!(sync.best_queued_number, 0); - - // Request should only contain the missing block. - let request = get_block_request(&mut sync, FromBlock::Number(1), 1, &peer_id1); - let response = create_block_response(vec![blocks[0].clone()]); - sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert_eq!(sync.best_queued_number, 4); - } - #[test] - fn ancestor_search_repeat() { - let state = AncestorSearchState::::BinarySearch(1, 3); - assert!(handle_ancestor_search_state(&state, 2, true).is_none()); - } - - #[test] - fn sync_restart_removes_block_but_not_justification_requests() { - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peers = vec![PeerId::random(), PeerId::random()]; - - let mut new_blocks = |n| { - for _ in 0..n { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - let info = client.info(); - (info.best_hash, info.best_number) - }; - - let (b1_hash, b1_number) = new_blocks(50); - - // add new peer and request blocks from them - sync.new_peer(peers[0], Hash::random(), 42).unwrap(); - - // we don't actually perform any requests, just keep track of peers waiting for a response - let mut pending_responses = HashSet::new(); - - // we wil send block requests to these peers - // for these blocks we don't know about - for (peer, _request) in sync.block_requests() { - // "send" request - pending_responses.insert(peer); - } - - // add a new peer at a known block - sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); - - // we request a justification for a block we have locally - sync.request_justification(&b1_hash, b1_number); - - // the justification request should be scheduled to the - // new peer which is at the given block - let mut requests = sync.justification_requests(); - assert_eq!(requests.len(), 1); - let (peer, _request) = requests.remove(0); - // "send" request - assert!(pending_responses.insert(peer)); - - assert!(!std::matches!( - sync.peers.get(&peers[0]).unwrap().state, - PeerSyncState::DownloadingJustification(_), - )); - assert_eq!( - sync.peers.get(&peers[1]).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - assert_eq!(pending_responses.len(), 2); - - // restart sync - let request_events = sync.restart().collect::>(); - for event in request_events.iter() { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { peer_id } => { - pending_responses.remove(&peer_id); - }, - BlockRequestAction::SendRequest { peer_id, .. } => { - // we drop obsolete response, but don't register a new request, it's checked in - // the `assert!` below - pending_responses.remove(&peer_id); - }, - } - } - assert!(request_events.iter().any(|event| { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { .. } => false, - BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], - } - })); - - assert_eq!(pending_responses.len(), 1); - assert!(pending_responses.contains(&peers[1])); - assert_eq!( - sync.peers.get(&peers[1]).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - let _ = sync.peer_disconnected(&peers[1]); - pending_responses.remove(&peers[1]); - assert_eq!(pending_responses.len(), 0); - } -} diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index ed7c647c797732f0ff0c39828b52a9ec470f292e..42220096e0695b29c26c3652e810514b8d0044d5 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -23,65 +23,8 @@ use crate::block_relay_protocol::{BlockDownloader as BlockDownloaderT, BlockResp use futures::channel::oneshot; use libp2p::PeerId; use sc_network::RequestFailure; -use sc_network_common::sync::{ - message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, - BadPeer, ChainSync as ChainSyncT, ImportBlocksAction, Metrics, OnBlockData, - OnBlockJustification, PeerInfo, SyncStatus, -}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -mockall::mock! { - pub ChainSync {} - - impl ChainSyncT for ChainSync { - fn peer_info(&self, who: &PeerId) -> Option>; - fn status(&self) -> SyncStatus; - fn num_sync_requests(&self) -> usize; - fn num_downloaded_blocks(&self) -> usize; - fn num_peers(&self) -> usize; - fn new_peer( - &mut self, - who: PeerId, - best_hash: Block::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer>; - fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); - fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); - fn clear_justification_requests(&mut self); - fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &Block::Hash, - number: NumberFor, - ); - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_justification_import( - &mut self, - hash: Block::Hash, - number: NumberFor, - success: bool, - ); - fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ); - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; - fn metrics(&self) -> Metrics; - } -} +use sc_network_common::sync::message::{BlockData, BlockRequest}; +use sp_runtime::traits::Block as BlockT; mockall::mock! { pub BlockDownloader {} diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs index c863267e7808f13104eacd3d30f39987946443bf..55308dfc1ea907a745eda17474f31292d11eea8b 100644 --- a/substrate/client/network/sync/src/pending_responses.rs +++ b/substrate/client/network/sync/src/pending_responses.rs @@ -17,22 +17,25 @@ // along with this program. If not, see . //! [`PendingResponses`] is responsible for keeping track of pending responses and -//! polling them. +//! polling them. [`Stream`] implemented by [`PendingResponses`] never terminates. +use crate::types::PeerRequest; use futures::{ channel::oneshot, future::BoxFuture, - stream::{BoxStream, Stream}, + stream::{BoxStream, FusedStream, Stream}, FutureExt, StreamExt, }; use libp2p::PeerId; use log::error; use sc_network::request_responses::RequestFailure; -use sc_network_common::sync::PeerRequest; use sp_runtime::traits::Block as BlockT; -use std::task::{Context, Poll}; +use std::task::{Context, Poll, Waker}; use tokio_stream::StreamMap; +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + /// Response result. type ResponseResult = Result, RequestFailure>, oneshot::Canceled>; @@ -50,11 +53,13 @@ pub(crate) struct ResponseEvent { pub(crate) struct PendingResponses { /// Pending responses pending_responses: StreamMap, ResponseResult)>>, + /// Waker to implement never terminating stream + waker: Option, } impl PendingResponses { pub fn new() -> Self { - Self { pending_responses: StreamMap::new() } + Self { pending_responses: StreamMap::new(), waker: None } } pub fn insert( @@ -74,11 +79,15 @@ impl PendingResponses { .is_some() { error!( - target: crate::LOG_TARGET, + target: LOG_TARGET, "Discarded pending response from peer {peer_id}, request type: {request_type:?}.", ); debug_assert!(false); } + + if let Some(waker) = self.waker.take() { + waker.wake(); + } } pub fn remove(&mut self, peer_id: &PeerId) -> bool { @@ -90,8 +99,6 @@ impl PendingResponses { } } -impl Unpin for PendingResponses {} - impl Stream for PendingResponses { type Item = ResponseEvent; @@ -99,8 +106,8 @@ impl Stream for PendingResponses { mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - match futures::ready!(self.pending_responses.poll_next_unpin(cx)) { - Some((peer_id, (request, response))) => { + match self.pending_responses.poll_next_unpin(cx) { + Poll::Ready(Some((peer_id, (request, response)))) => { // We need to manually remove the stream, because `StreamMap` doesn't know yet that // it's going to yield `None`, so may not remove it before the next request is made // to the same peer. @@ -108,7 +115,18 @@ impl Stream for PendingResponses { Poll::Ready(Some(ResponseEvent { peer_id, request, response })) }, - None => Poll::Ready(None), + Poll::Ready(None) | Poll::Pending => { + self.waker = Some(cx.waker().clone()); + + Poll::Pending + }, } } } + +// As [`PendingResponses`] never terminates, we can easily implement [`FusedStream`] for it. +impl FusedStream for PendingResponses { + fn is_terminated(&self) -> bool { + false + } +} diff --git a/substrate/client/network/common/src/sync/metrics.rs b/substrate/client/network/sync/src/request_metrics.rs similarity index 100% rename from substrate/client/network/common/src/sync/metrics.rs rename to substrate/client/network/sync/src/request_metrics.rs diff --git a/substrate/client/network/sync/src/service/mod.rs b/substrate/client/network/sync/src/service/mod.rs index 18331d63ed29f76f383c928107b6753d7ca25d16..d045af26e70dea6da2d33ee6dcf10a6e50d44b55 100644 --- a/substrate/client/network/sync/src/service/mod.rs +++ b/substrate/client/network/sync/src/service/mod.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! `ChainSync`-related service code +//! `SyncingEngine`-related service code -pub mod chain_sync; pub mod mock; pub mod network; +pub mod syncing_service; diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/syncing_service.rs similarity index 96% rename from substrate/client/network/sync/src/service/chain_sync.rs rename to substrate/client/network/sync/src/service/syncing_service.rs index f9e0e401fdf8fad5054cd1b8bb0dfe806e4ee3ea..92d649d65dc3a5e5fac70c5a1cc16ed4c825d400 100644 --- a/substrate/client/network/sync/src/service/chain_sync.rs +++ b/substrate/client/network/sync/src/service/syncing_service.rs @@ -16,14 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::types::{ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider}; + use futures::{channel::oneshot, Stream}; use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network::{NetworkBlock, NetworkSyncForkRequest}; -use sc_network_common::sync::{ - ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider, -}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -35,7 +34,7 @@ use std::{ }, }; -/// Commands send to `ChainSync` +/// Commands send to `SyncingEngine` pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), @@ -64,7 +63,7 @@ pub enum ToServiceCommand { // }, } -/// Handle for communicating with `ChainSync` asynchronously +/// Handle for communicating with `SyncingEngine` asynchronously #[derive(Clone)] pub struct SyncingService { tx: TracingUnboundedSender>, @@ -149,7 +148,7 @@ impl SyncingService { /// Get sync status /// - /// Returns an error if `ChainSync` has terminated. + /// Returns an error if `SyncingEngine` has terminated. pub async fn status(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::Status(tx)); diff --git a/substrate/client/network/sync/src/state.rs b/substrate/client/network/sync/src/state.rs index 305f0ee6838a296b5e6f73902360069318232c88..5d34613d1c5e3e236a46396d340c6bd79709ddcc 100644 --- a/substrate/client/network/sync/src/state.rs +++ b/substrate/client/network/sync/src/state.rs @@ -18,12 +18,14 @@ //! State sync support. -use crate::schema::v1::{StateEntry, StateRequest, StateResponse}; +use crate::{ + schema::v1::{StateEntry, StateRequest, StateResponse}, + types::StateDownloadProgress, +}; use codec::{Decode, Encode}; use log::debug; use sc_client_api::{CompactProof, ProofProvider}; use sc_consensus::ImportedState; -use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; use sp_runtime::{ diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..5931cf47b28a4511699906e2a41f0d02959a3fd4 --- /dev/null +++ b/substrate/client/network/sync/src/types.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Common syncing types. + +use futures::Stream; +use sc_network_common::{role::Roles, types::ReputationChange}; + +use libp2p::PeerId; + +use crate::warp::WarpSyncProgress; +use sc_network_common::sync::message::BlockRequest; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; + +pub use sc_network_common::sync::SyncMode; + +/// The sync status of a peer we are trying to sync with +#[derive(Debug)] +pub struct PeerInfo { + /// Their best block hash. + pub best_hash: Block::Hash, + /// Their best block number. + pub best_number: NumberFor, +} + +/// Info about a peer's known state (both full and light). +#[derive(Clone, Debug)] +pub struct ExtendedPeerInfo { + /// Roles + pub roles: Roles, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: NumberFor, +} + +/// Reported sync state. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum SyncState { + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading { target: BlockNumber }, + /// All blocks are downloaded and are being imported. + Importing { target: BlockNumber }, +} + +impl SyncState { + /// Are we actively catching up with the chain? + pub fn is_major_syncing(&self) -> bool { + !matches!(self, SyncState::Idle) + } +} + +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + +/// Syncing status and statistics. +#[derive(Debug, Clone)] +pub struct SyncStatus { + /// Current global sync state. + pub state: SyncState>, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of peers known to `SyncingEngine` (both full and light). + pub num_connected_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option>, +} + +/// A peer did not behave as expected and should be reported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BadPeer(pub PeerId, pub ReputationChange); + +impl fmt::Display for BadPeer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } +} + +impl std::error::Error for BadPeer {} + +#[derive(Debug)] +pub struct Metrics { + pub queued_blocks: u32, + pub fork_targets: u32, + pub justifications: crate::request_metrics::Metrics, +} + +#[derive(Debug)] +pub enum PeerRequest { + Block(BlockRequest), + State, + WarpProof, +} + +#[derive(Debug)] +pub enum PeerRequestType { + Block, + State, + WarpProof, +} + +impl PeerRequest { + pub fn get_type(&self) -> PeerRequestType { + match self { + PeerRequest::Block(_) => PeerRequestType::Block, + PeerRequest::State => PeerRequestType::State, + PeerRequest::WarpProof => PeerRequestType::WarpProof, + } + } +} + +/// Wrapper for implementation-specific state request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateRequest(pub Box); + +impl fmt::Debug for OpaqueStateRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateRequest").finish() + } +} + +/// Wrapper for implementation-specific state response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateResponse(pub Box); + +impl fmt::Debug for OpaqueStateResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateResponse").finish() + } +} + +/// Provides high-level status of syncing. +#[async_trait::async_trait] +pub trait SyncStatusProvider: Send + Sync { + /// Get high-level view of the syncing status. + async fn status(&self) -> Result, ()>; +} + +#[async_trait::async_trait] +impl SyncStatusProvider for Arc +where + T: ?Sized, + T: SyncStatusProvider, + Block: BlockT, +{ + async fn status(&self) -> Result, ()> { + T::status(self).await + } +} + +/// Syncing-related events that other protocols can subscribe to. +pub enum SyncEvent { + /// Peer that the syncing implementation is tracking connected. + PeerConnected(PeerId), + + /// Peer that the syncing implementation was tracking disconnected. + PeerDisconnected(PeerId), +} + +pub trait SyncEventStream: Send + Sync { + /// Subscribe to syncing-related events. + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl SyncEventStream for Arc +where + T: ?Sized, + T: SyncEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 74835a6e015e3e6d8506950a794a1659ae3fdae0..169b3de35aa1bfe5f11098680b9243bc3213d263 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -18,28 +18,107 @@ //! Warp sync support. +pub use sp_consensus_grandpa::{AuthorityList, SetId}; + use crate::{ schema::v1::{StateRequest, StateResponse}, state::{ImportResult, StateSync}, }; +use codec::{Decode, Encode}; use futures::channel::oneshot; use log::error; use sc_client_api::ProofProvider; -use sc_network_common::sync::{ - message::{BlockAttributes, BlockData, BlockRequest, Direction, FromBlock}, - warp::{ - EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, - WarpSyncProvider, - }, +use sc_network_common::sync::message::{ + BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; use sp_blockchain::HeaderBackend; -use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; -use std::sync::Arc; +use std::{fmt, sync::Arc}; /// Log target for this file. const LOG_TARGET: &'static str = "sync"; +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug, Clone)] +pub struct WarpProofRequest { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), +} + +/// Warp sync backend. Handles retrieving and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof + /// size is reached. + fn generate( + &self, + start: Block::Hash, + ) -> Result>; + /// Verify warp proof against current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting + /// sync. + fn current_authorities(&self) -> AuthorityList; +} + +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers { required_peers: usize }, + /// Waiting for target block to be received. + AwaitingTargetBlock, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading target block. + DownloadingTargetBlock, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, + /// Downloading block history. + DownloadingBlocks(NumberFor), +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::AwaitingPeers { required_peers } => + write!(f, "Waiting for {required_peers} peers to be connected"), + Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"), + Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingTargetBlock => write!(f, "Downloading target block"), + Self::DownloadingState => write!(f, "Downloading state"), + Self::ImportingState => write!(f, "Importing state"), + Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, +} + /// The different types of warp syncing, passed to `build_network`. pub enum WarpSyncParams { /// Standard warp sync for the chain. diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 0e502a6dba59e55ea619ab890a0fc7b3ea34af35..b23f30c50dd24b9305f32790403bccc10668e631 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -20,13 +20,13 @@ use codec::Decode; use futures::{channel::oneshot, stream::StreamExt}; use log::debug; +use crate::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}; use sc_network::{ config::ProtocolId, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, }; -use sc_network_common::sync::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; diff --git a/substrate/client/network/test/src/block_import.rs b/substrate/client/network/test/src/block_import.rs index 25e3b9bee87f1dc472f5af68d1dc0da393fff90b..35795432b37f4d47904c50f6507a7e555ca798a4 100644 --- a/substrate/client/network/test/src/block_import.rs +++ b/substrate/client/network/test/src/block_import.rs @@ -20,7 +20,6 @@ use super::*; use futures::executor::block_on; -use sc_block_builder::BlockBuilderProvider; use sc_consensus::{ import_single_block, BasicQueue, BlockImportError, BlockImportStatus, ImportedAux, IncomingBlock, @@ -34,7 +33,14 @@ use substrate_test_runtime_client::{ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { let mut client = substrate_test_runtime_client::new(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::File, block)).unwrap(); let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 11505903e35d70a04c5dd04d2a0935db8cfd50a3..cfc3cb7af3fcb9580d08ce6a6762e68485bb8ae7 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -38,7 +38,7 @@ use futures::{channel::oneshot, future::BoxFuture, pin_mut, prelude::*}; use libp2p::{build_multiaddr, PeerId}; use log::trace; use parking_lot::Mutex; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; use sc_client_api::{ backend::{AuxStore, Backend, Finalizer}, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, @@ -60,16 +60,15 @@ use sc_network::{ Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, NetworkWorker, }; -use sc_network_common::{ - role::Roles, - sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, -}; +use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, - service::{chain_sync::SyncingService, network::NetworkServiceProvider}, + service::{network::NetworkServiceProvider, syncing_service::SyncingService}, state_request_handler::StateRequestHandler, - warp::WarpSyncParams, + warp::{ + AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, + }, warp_request_handler, }; use sc_service::client::Client; @@ -306,9 +305,7 @@ where edit_block: F, ) -> Vec where - F: FnMut( - BlockBuilder, - ) -> Block, + F: FnMut(BlockBuilder) -> Block, { let best_hash = self.client.info().best_hash; self.generate_blocks_at( @@ -332,9 +329,7 @@ where fork_choice: ForkChoiceStrategy, ) -> Vec where - F: FnMut( - BlockBuilder, - ) -> Block, + F: FnMut(BlockBuilder) -> Block, { let best_hash = self.client.info().best_hash; self.generate_blocks_at( @@ -363,15 +358,18 @@ where fork_choice: ForkChoiceStrategy, ) -> Vec where - F: FnMut( - BlockBuilder, - ) -> Block, + F: FnMut(BlockBuilder) -> Block, { let mut hashes = Vec::with_capacity(count); let full_client = self.client.as_client(); let mut at = full_client.block_hash_from_id(&at).unwrap().unwrap(); for _ in 0..count { - let builder = full_client.new_block_at(at, Default::default(), false).unwrap(); + let builder = BlockBuilderBuilder::new(&*full_client) + .on_parent_block(at) + .fetch_parent_block_number(&*full_client) + .unwrap() + .build() + .unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 5e42465974bb33439d7c15a5fe9dd5176f78d600..2a6aa4b3a40aee615ea1cce248b2681b33339610 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -21,6 +21,7 @@ log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network = { path = ".." } sc-network-common = { path = "../common" } +sc-network-sync = { path = "../sync" } sc-utils = { path = "../../utils" } sp-runtime = { path = "../../../primitives/runtime" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index b46733d427230e3a8e061ab427dd1a28114e247f..1b97d4b96c9764f6fc18d2de2dfaeee7c530a2c6 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -42,11 +42,8 @@ use sc_network::{ utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent, SyncEventStream}, - ExHashT, -}; +use sc_network_common::{role::ObservedRole, ExHashT}; +use sc_network_sync::{SyncEvent, SyncEventStream}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index a11ac7d86ecb8200502f4375c13a5f850096ee68..756ab77ff94eb2cfd85765ecb40d5ed68ea5438b 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -328,12 +328,13 @@ mod tests { use super::*; use futures::executor::block_on; use libp2p::{Multiaddr, PeerId}; - use sc_block_builder::BlockBuilderProvider as _; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::Backend as _; use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName, ReputationChange}; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; + use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::{ @@ -470,16 +471,24 @@ mod tests { let key = &b"hello"[..]; let value = &b"world"[..]; - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); let ext = ExtrinsicBuilder::new_offchain_index_set(key.to_vec(), value.to_vec()).build(); block_builder.push(ext).unwrap(); let block = block_builder.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); let ext = ExtrinsicBuilder::new_offchain_index_clear(key.to_vec()).nonce(1).build(); block_builder.push(ext).unwrap(); diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 9dca2e72fcdd5d5d3ce6deecbd794e5b2e1c02e4..e72bbe48ee3e01349796b57c558a0765665068e9 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -14,9 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0" sc-chain-spec = { path = "../chain-spec" } sc-mixnet = { path = "../mixnet" } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 674a8db39cb576f6a0461d43c2fe8c7e0acb2ff2..a7cc374f97a1e011b26f58d16850eabfde57fd65 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } log = "0.4.17" -serde_json = "1.0.107" +serde_json = "1.0.108" tokio = { version = "1.22.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } tower-http = { version = "0.4.0", features = ["cors"] } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 1eaed65706e049f0e1e4765334f76b26e723e088..ca61286ddfa074db5cfc4bf2ba916fc057d2f831 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -21,6 +21,7 @@ sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } sp-api = { path = "../../primitives/api" } +sp-rpc = { path = "../../primitives/rpc" } sp-blockchain = { path = "../../primitives/blockchain" } sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } @@ -37,7 +38,7 @@ array-bytes = "6.1" log = "0.4.17" futures-util = { version = "0.3.19", default-features = false } [dev-dependencies] -serde_json = "1.0" +serde_json = "1.0.108" tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 36f7716e393cee5f46d68f63e5b495ef1f98b5f7..6b288c2c95423d15f822f96c7c1fa8e5c479568a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -27,10 +27,13 @@ use jsonrpsee::{ types::{error::CallError, EmptyServerParams as EmptyParams}, RpcModule, }; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; -use sp_runtime::SaturatedConversion; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + SaturatedConversion, +}; use std::sync::Arc; use substrate_test_runtime::Transfer; use substrate_test_runtime_client::{ @@ -72,7 +75,12 @@ async fn archive_body() { assert!(res.is_none()); // Import a new block with an extrinsic. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder .push_transfer(runtime::Transfer { from: AccountKeyring::Alice.into(), @@ -101,7 +109,12 @@ async fn archive_header() { assert!(res.is_none()); // Import a new block with an extrinsic. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder .push_transfer(runtime::Transfer { from: AccountKeyring::Alice.into(), @@ -147,24 +160,57 @@ async fn archive_hash_by_height() { // ^^^ h = N // ^^^ h = N + 1 // ^^^ h = N + 2 - let finalized = client.new_block(Default::default()).unwrap().build().unwrap().block; + let finalized = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let finalized_hash = finalized.header.hash(); client.import(BlockOrigin::Own, finalized.clone()).await.unwrap(); client.finalize_block(finalized_hash, None).unwrap(); - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(finalized.hash()) + .with_parent_block_number(*finalized.header().number()) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(*block_1.header().number()) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let block_3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(*block_2.header().number()) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_3_hash = block_3.header.hash(); client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Import block 4 fork. - let mut block_builder = client.new_block_at(block_1_hash, Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(*block_1.header().number()) + .build() + .unwrap(); + // This push is required as otherwise block 3 has the same hash as block 1 and won't get // imported block_builder @@ -238,7 +284,14 @@ async fn archive_call() { .unwrap(); assert_matches!(result, MethodResult::Err(_)); - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index d93c4018b60faee0928204f1a6ecdfb75dab845c..9ae801379559409ea8ad72e1a29c3a95f0e9919a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -21,6 +21,7 @@ //! API trait of the chain head. use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -73,14 +74,6 @@ pub trait ChainHeadApi { hash: Hash, ) -> RpcResult>; - /// Get the chain's genesis hash. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_genesisHash", blocking)] - fn chain_head_unstable_genesis_hash(&self) -> RpcResult; - /// Returns storage entries at a specific block's state. /// /// # Unstable @@ -109,16 +102,22 @@ pub trait ChainHeadApi { call_parameters: String, ) -> RpcResult; - /// Unpin a block reported by the `follow` method. + /// Unpin a block or multiple blocks reported by the `follow` method. /// /// Ongoing operations that require the provided block /// will continue normally. /// + /// When this method returns an error, it is guaranteed that no blocks have been unpinned. + /// /// # Unstable /// /// This method is unstable and subject to change in the future. #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin(&self, follow_subscription: String, hash: Hash) -> RpcResult<()>; + fn chain_head_unstable_unpin( + &self, + follow_subscription: String, + hash_or_hashes: ListOrValue, + ) -> RpcResult<()>; /// Resumes a storage fetch started with `chainHead_storage` after it has generated an /// `operationWaitingForContinue` event. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a8c1c4f7e083d86b19906d8845a5ee84b3b5ca6e..866701a7dbf899924c22eb1bc926b56b9934d110 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -48,6 +48,7 @@ use sc_client_api::{ use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::{traits::CallContext, Bytes}; +use sp_rpc::list::ListOrValue; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; @@ -106,8 +107,6 @@ pub struct ChainHead, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. subscriptions: Arc>, - /// The hexadecimal encoded hash of the genesis block. - genesis_hash: String, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, @@ -117,14 +116,12 @@ pub struct ChainHead, Block: BlockT, Client> { impl, Block: BlockT, Client> ChainHead { /// Create a new [`ChainHead`]. - pub fn new>( + pub fn new( client: Arc, backend: Arc, executor: SubscriptionTaskExecutor, - genesis_hash: GenesisHash, config: ChainHeadConfig, ) -> Self { - let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend: backend.clone(), @@ -136,7 +133,6 @@ impl, Block: BlockT, Client> ChainHead { backend, )), operation_max_storage_items: config.operation_max_storage_items, - genesis_hash, _phantom: PhantomData, } } @@ -314,10 +310,6 @@ where .map_err(Into::into) } - fn chain_head_unstable_genesis_hash(&self) -> RpcResult { - Ok(self.genesis_hash.clone()) - } - fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -432,9 +424,16 @@ where fn chain_head_unstable_unpin( &self, follow_subscription: String, - hash: Block::Hash, + hash_or_hashes: ListOrValue, ) -> RpcResult<()> { - match self.subscriptions.unpin_block(&follow_subscription, hash) { + let result = match hash_or_hashes { + ListOrValue::Value(hash) => + self.subscriptions.unpin_blocks(&follow_subscription, [hash]), + ListOrValue::List(hashes) => + self.subscriptions.unpin_blocks(&follow_subscription, hashes), + }; + + match result { Ok(()) => Ok(()), Err(SubscriptionManagementError::SubscriptionAbsent) => { // Invalid invalid subscription ID. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index d6f64acd63f5f2f22584e965f5ba5f5abd3e2f33..abd42ad96785d22a6ba7d70e998c760cc1296d62 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -750,22 +750,36 @@ impl> SubscriptionsInner { } } - pub fn unpin_block( + pub fn unpin_blocks( &mut self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let Some(sub) = self.subs.get_mut(sub_id) else { return Err(SubscriptionManagementError::SubscriptionAbsent) }; - // Check that unpin was not called before and the block was pinned - // for this subscription. - if !sub.unregister_block(hash) { - return Err(SubscriptionManagementError::BlockHashAbsent) + // Ensure that all blocks are part of the subscription before removing individual + // blocks. + for hash in hashes.clone() { + if !sub.contains_block(hash) { + return Err(SubscriptionManagementError::BlockHashAbsent); + } + } + + // Note: this needs to be separate from the global mappings to avoid barrow checker + // thinking we borrow `&mut self` twice: once from `self.subs.get_mut` and once from + // `self.global_unregister_block`. Although the borrowing is correct, since different + // fields of the structure are borrowed, one at a time. + for hash in hashes.clone() { + sub.unregister_block(hash); + } + + // Block have been removed from the subscription. Remove them from the global tracking. + for hash in hashes { + self.global_unregister_block(hash); } - self.global_unregister_block(hash); Ok(()) } @@ -806,7 +820,7 @@ impl> SubscriptionsInner { #[cfg(test)] mod tests { use super::*; - use sc_block_builder::BlockBuilderProvider; + use sc_block_builder::BlockBuilderBuilder; use sc_service::client::new_in_mem; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; @@ -1004,8 +1018,15 @@ mod tests { fn subscription_check_block() { let (backend, mut client) = init_backend(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let hash = block.header.hash(); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let hash = block.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); let mut subs = @@ -1022,11 +1043,11 @@ mod tests { assert_eq!(block.has_runtime(), true); let invalid_id = "abc-invalid".to_string(); - let err = subs.unpin_block(&invalid_id, hash).unwrap_err(); + let err = subs.unpin_blocks(&invalid_id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); // Unpin the block. - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); } @@ -1034,7 +1055,14 @@ mod tests { #[test] fn subscription_ref_count() { let (backend, mut client) = init_backend(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); @@ -1063,13 +1091,13 @@ mod tests { // Ensure the block propagated to the subscription. subs.subs.get(&id_second).unwrap().blocks.get(&hash).unwrap(); - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); assert_eq!(*subs.global_blocks.get(&hash).unwrap(), 1); // Cannot unpin a block twice for the same subscription. - let err = subs.unpin_block(&id, hash).unwrap_err(); + let err = subs.unpin_blocks(&id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); - subs.unpin_block(&id_second, hash).unwrap(); + subs.unpin_blocks(&id_second, vec![hash]).unwrap(); // Block unregistered from the memory. assert!(subs.global_blocks.get(&hash).is_none()); } @@ -1077,13 +1105,34 @@ mod tests { #[test] fn subscription_remove_subscription() { let (backend, mut client) = init_backend(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_1 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_1) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_2 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_2) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_3 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); @@ -1122,13 +1171,34 @@ mod tests { #[test] fn subscription_check_limits() { let (backend, mut client) = init_backend(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_1 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_1) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_2 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_2) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_3 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); @@ -1173,13 +1243,34 @@ mod tests { #[test] fn subscription_check_limits_with_duration() { let (backend, mut client) = init_backend(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let hash_1 = block.header.hash(); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let hash_1 = block.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_1) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_2 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(hash_2) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash_3 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index b25b1a4913b49f52035b17190f77df99f701c504..c830e662da2e5c3499f86ae1ddc40bd3b6f4e40a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -94,22 +94,23 @@ impl> SubscriptionManagement { inner.pin_block(sub_id, hash) } - /// Unpin the block from the subscription. + /// Unpin the blocks from the subscription. /// - /// The last subscription that unpins the block is also unpinning the block - /// from the backend. + /// Blocks are reference counted and when the last subscription unpins a given block, the block + /// is also unpinned from the backend. /// /// This method is called only once per subscription. /// - /// Returns an error if the block is not pinned for the subscription or - /// the subscription ID is invalid. - pub fn unpin_block( + /// Returns an error if the subscription ID is invalid, or any of the blocks are not pinned + /// for the subscriptions. When an error is returned, it is guaranteed that no blocks have + /// been unpinned. + pub fn unpin_blocks( &self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let mut inner = self.inner.write(); - inner.unpin_block(sub_id, hash) + inner.unpin_blocks(sub_id, hashes) } /// Ensure the block remains pinned until the return object is dropped. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 8aaeb413cdff9a6c797bc7ea4e975edb6808dd09..4e6775fe2809a8b800586b8c40f23cf627e27831 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -28,10 +28,10 @@ use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, rpc_params, - types::{error::CallError, EmptyServerParams as EmptyParams}, + types::error::CallError, RpcModule, }; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; use sp_api::BlockT; @@ -61,7 +61,6 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; -const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -111,7 +110,6 @@ async fn setup_api() -> ( client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -125,7 +123,14 @@ async fn setup_api() -> ( let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block.clone()).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. @@ -155,7 +160,6 @@ async fn follow_subscription_produces_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -177,7 +181,14 @@ async fn follow_subscription_produces_blocks() { }); assert_eq!(event, expected); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let best_hash = block.header.hash(); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); @@ -217,7 +228,6 @@ async fn follow_with_runtime() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -255,8 +265,15 @@ async fn follow_with_runtime() { // Import a new block without runtime changes. // The runtime field must be None in this case. - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let best_hash = block.header.hash(); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let best_hash = block.hash(); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); let event: FollowEvent = get_next_event(&mut sub).await; @@ -302,7 +319,11 @@ async fn follow_with_runtime() { ) .unwrap(); - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(best_hash) + .with_parent_block_number(1) + .build() + .unwrap(); builder.push_storage_change(CODE.to_vec(), Some(wasm)).unwrap(); let block = builder.build().unwrap().block; let best_hash = block.header.hash(); @@ -320,31 +341,6 @@ async fn follow_with_runtime() { assert_eq!(event, expected); } -#[tokio::test] -async fn get_genesis() { - let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - - let api = ChainHead::new( - client.clone(), - backend, - Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, - ChainHeadConfig { - global_max_pinned_blocks: MAX_PINNED_BLOCKS, - subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), - subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - }, - ) - .into_rpc(); - - let genesis: String = - api.call("chainHead_unstable_genesisHash", EmptyParams::new()).await.unwrap(); - assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); -} - #[tokio::test] async fn get_header() { let (_client, api, _sub, sub_id, block) = setup_api().await; @@ -411,7 +407,11 @@ async fn get_body() { ); // Import a block with extrinsics. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); builder .push_transfer(runtime::Transfer { from: AccountKeyring::Alice.into(), @@ -540,7 +540,6 @@ async fn call_runtime_without_flag() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -554,7 +553,14 @@ async fn call_runtime_without_flag() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); @@ -648,7 +654,11 @@ async fn get_storage_hash() { ); // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); @@ -730,11 +740,15 @@ async fn get_storage_hash() { #[tokio::test] async fn get_storage_multi_query_iter() { - let (mut client, api, mut block_sub, sub_id, _) = setup_api().await; + let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; let key = hex_string(&KEY); // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); @@ -851,7 +865,7 @@ async fn get_storage_multi_query_iter() { #[tokio::test] async fn get_storage_value() { let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; - let block_hash = format!("{:?}", block.header.hash()); + let block_hash = format!("{:?}", block.hash()); let invalid_hash = hex_string(&INVALID_HASH); let key = hex_string(&KEY); @@ -908,10 +922,14 @@ async fn get_storage_value() { ); // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap(); let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); + let block_hash = format!("{:?}", block.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. @@ -1180,7 +1198,6 @@ async fn separate_operation_ids_for_subscriptions() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1199,7 +1216,14 @@ async fn separate_operation_ids_for_subscriptions() { let sub_id_second = sub_second.subscription_id(); let sub_id_second = serde_json::to_string(&sub_id_second).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block.clone()).await.unwrap(); let block_hash = format!("{:?}", block.header.hash()); @@ -1261,7 +1285,6 @@ async fn follow_generates_initial_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1274,18 +1297,35 @@ async fn follow_generates_initial_blocks() { let finalized_hash = client.info().finalized_hash; // Block tree: - // finalized -> block 1 -> block 2 -> block 4 - // -> block 1 -> block 3 - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + // finalized -> block 1 -> block 2 -> block 3 + // -> block 1 -> block 2_f + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(1) + .build() + .unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1296,9 +1336,9 @@ async fn follow_generates_initial_blocks() { nonce: 0, }) .unwrap(); - let block_3 = block_builder.build().unwrap().block; - let block_3_hash = block_3.header.hash(); - client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + let block_2_f = block_builder.build().unwrap().block; + let block_2_f_hash = block_2_f.header.hash(); + client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); @@ -1333,7 +1373,7 @@ async fn follow_generates_initial_blocks() { // Check block 3. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_3_hash), + block_hash: format!("{:?}", block_2_f_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, with_runtime: false, @@ -1346,14 +1386,21 @@ async fn follow_generates_initial_blocks() { }); assert_eq!(event, expected); - // Import block 4. - let block_4 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_4_hash = block_4.header.hash(); - client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); + // Import block 3. + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2_hash) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_4_hash), + block_hash: format!("{:?}", block_3_hash), parent_block_hash: format!("{:?}", block_2_hash), new_runtime: None, with_runtime: false, @@ -1362,23 +1409,23 @@ async fn follow_generates_initial_blocks() { let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: format!("{:?}", block_4_hash), + best_block_hash: format!("{:?}", block_3_hash), }); assert_eq!(event, expected); // Check the finalized event: // - blocks 1, 2, 4 from canonical chain are finalized // - block 3 from the fork is pruned - client.finalize_block(block_4_hash, None).unwrap(); + client.finalize_block(block_3_hash, None).unwrap(); let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ format!("{:?}", block_1_hash), format!("{:?}", block_2_hash), - format!("{:?}", block_4_hash), + format!("{:?}", block_3_hash), ], - pruned_block_hashes: vec![format!("{:?}", block_3_hash)], + pruned_block_hashes: vec![format!("{:?}", block_2_f_hash)], }); assert_eq!(event, expected); } @@ -1393,7 +1440,6 @@ async fn follow_exceeding_pinned_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1405,7 +1451,15 @@ async fn follow_exceeding_pinned_blocks() { let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. @@ -1426,13 +1480,27 @@ async fn follow_exceeding_pinned_blocks() { // finalized_block -> block -> block2 // The first 2 blocks are pinned into the subscription, but the block2 will exceed the limit (2 // blocks). - let block2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block2.clone()).await.unwrap(); assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); // Subscription will not produce any more event for further blocks. - let block3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block3.clone()).await.unwrap(); assert!(sub.next::>().await.is_none()); @@ -1448,7 +1516,6 @@ async fn follow_with_unpin() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1462,7 +1529,14 @@ async fn follow_with_unpin() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); @@ -1483,14 +1557,17 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>("chainHead_unstable_unpin", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) .await .unwrap_err(); assert_matches!(err, @@ -1498,12 +1575,22 @@ async fn follow_with_unpin() { ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Block tree: // finalized_block -> block -> block2 // ^ has been unpinned - let block2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block2.clone()).await.unwrap(); assert_matches!( @@ -1516,13 +1603,173 @@ async fn follow_with_unpin() { FollowEvent::BestBlockChanged(_) ); - let block3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block3.clone()).await.unwrap(); assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); assert!(sub.next::>().await.is_none()); } +#[tokio::test] +async fn follow_with_multiple_unpin_hashes() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + + // Import 3 blocks. + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.header.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Unpin an invalid subscription ID must return Ok(()). + let invalid_hash = hex_string(&INVALID_HASH); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .await + .unwrap(); + + // Valid subscription with invalid block hash. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .await + .unwrap(); + + // One block hash is invalid. Block 1 is already unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + // Unpin multiple blocks. + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .await + .unwrap(); + + // Check block 2 and 3 are unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_2_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_3_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); +} + #[tokio::test] async fn follow_prune_best_block() { let builder = TestClientBuilder::new(); @@ -1533,7 +1780,6 @@ async fn follow_prune_best_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1560,27 +1806,52 @@ async fn follow_prune_best_block() { // finalized -> block 1 -> block 2 // ^^^ best block reported // - // -> block 1 -> block 3 -> block 4 + // -> block 1 -> block 2_f -> block 4 // ^^^ finalized // // The block 4 is needed on the longest chain because we want the // best block 2 to be reported as pruned. Pruning is happening at // height (N - 1), where N is the finalized block number. - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_1_hash = block_1.header.hash(); + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let block_3 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_3_hash = block_3.header.hash(); - client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); - - let block_4 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_4_hash = block_4.header.hash(); + let block_2_f = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_f_hash = block_2_f.hash(); + client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); + + let block_4 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2_f_hash) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_4_hash = block_4.hash(); client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); // Import block 2 as best on the fork. - let mut block_builder = client.new_block_at(block_1_hash, Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(1) + .build() + .unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1613,7 +1884,7 @@ async fn follow_prune_best_block() { // Check block 3. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_3_hash), + block_hash: format!("{:?}", block_2_f_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, with_runtime: false, @@ -1621,7 +1892,7 @@ async fn follow_prune_best_block() { assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: format!("{:?}", block_3_hash), + best_block_hash: format!("{:?}", block_2_f_hash), }); assert_eq!(event, expected); @@ -1629,7 +1900,7 @@ async fn follow_prune_best_block() { let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { block_hash: format!("{:?}", block_4_hash), - parent_block_hash: format!("{:?}", block_3_hash), + parent_block_hash: format!("{:?}", block_2_f_hash), new_runtime: None, with_runtime: false, }); @@ -1670,7 +1941,7 @@ async fn follow_prune_best_block() { let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ format!("{:?}", block_1_hash), - format!("{:?}", block_3_hash), + format!("{:?}", block_2_f_hash), format!("{:?}", block_4_hash), ], pruned_block_hashes: vec![format!("{:?}", block_2_hash)], @@ -1681,7 +1952,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -1694,7 +1965,6 @@ async fn follow_forks_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1708,22 +1978,46 @@ async fn follow_forks_pruned_block() { // // finalized -> block 1 -> block 2 -> block 3 // ^^^ finalized - // -> block 1 -> block 4 -> block 5 + // -> block 1 -> block 2_f -> block 3_f // - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let block_3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_3_hash = block_3.header.hash(); client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); // This push is required as otherwise block 4 has the same hash as block 2 and won't get // imported block_builder @@ -1734,11 +2028,14 @@ async fn follow_forks_pruned_block() { nonce: 0, }) .unwrap(); - let block_4 = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); + let block_2_f = block_builder.build().unwrap().block; + client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); - let mut block_builder = - client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2_f.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), @@ -1747,10 +2044,10 @@ async fn follow_forks_pruned_block() { nonce: 0, }) .unwrap(); - let block_5 = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block_5.clone()).await.unwrap(); + let block_3_f = block_builder.build().unwrap().block; + client.import(BlockOrigin::Own, block_3_f.clone()).await.unwrap(); - // Block 4 and 5 are not pruned, pruning happens at height (N - 1). + // Block 2_f and 3_f are not pruned, pruning happens at height (N - 1). client.finalize_block(block_3_hash, None).unwrap(); let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); @@ -1766,22 +2063,29 @@ async fn follow_forks_pruned_block() { // Block tree: // - // finalized -> block 1 -> block 2 -> block 3 -> block 6 + // finalized -> block 1 -> block 2 -> block 3 -> block 4 // ^^^ finalized - // -> block 1 -> block 4 -> block 5 + // -> block 1 -> block 2_f -> block 3_f // - // Mark block 6 as finalized to force block 4 and 5 to get pruned. + // Mark block 4 as finalized to force block 2_f and 3_f to get pruned. - let block_6 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_6_hash = block_6.header.hash(); - client.import(BlockOrigin::Own, block_6.clone()).await.unwrap(); + let block_4 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_3.hash()) + .with_parent_block_number(3) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_4_hash = block_4.hash(); + client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - client.finalize_block(block_6_hash, None).unwrap(); + client.finalize_block(block_4_hash, None).unwrap(); // Check block 6. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_6_hash), + block_hash: format!("{:?}", block_4_hash), parent_block_hash: format!("{:?}", block_3_hash), new_runtime: None, with_runtime: false, @@ -1789,14 +2093,14 @@ async fn follow_forks_pruned_block() { assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: format!("{:?}", block_6_hash), + best_block_hash: format!("{:?}", block_4_hash), }); assert_eq!(event, expected); // Block 4 and 5 must not be reported as pruned. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { - finalized_block_hashes: vec![format!("{:?}", block_6_hash)], + finalized_block_hashes: vec![format!("{:?}", block_4_hash)], pruned_block_hashes: vec![], }); assert_eq!(event, expected); @@ -1812,7 +2116,6 @@ async fn follow_report_multiple_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1826,26 +2129,51 @@ async fn follow_report_multiple_pruned_block() { // // finalized -> block 1 -> block 2 -> block 3 // ^^^ finalized after subscription - // -> block 1 -> block 4 -> block 5 + // -> block 1 -> block 2_f -> block 3_f let finalized_hash = client.info().finalized_hash; - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_1_hash = block_1.header.hash(); + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_2_hash = block_2.header.hash(); + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let block_3 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_3_hash = block_3.header.hash(); + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.hash(); client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); - // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); - // This push is required as otherwise block 4 has the same hash as block 2 and won't get + // Block 2_f with parent Block 1 is not the best imported. + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + + // This push is required as otherwise block 2_f has the same hash as block 2 and won't get // imported block_builder .push_transfer(Transfer { @@ -1855,12 +2183,16 @@ async fn follow_report_multiple_pruned_block() { nonce: 0, }) .unwrap(); - let block_4 = block_builder.build().unwrap().block; - let block_4_hash = block_4.header.hash(); - client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); + let block_2_f = block_builder.build().unwrap().block; + let block_2_f_hash = block_2_f.hash(); + client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); + + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2_f.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); - let mut block_builder = - client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), @@ -1869,9 +2201,9 @@ async fn follow_report_multiple_pruned_block() { nonce: 0, }) .unwrap(); - let block_5 = block_builder.build().unwrap().block; - let block_5_hash = block_5.header.hash(); - client.import(BlockOrigin::Own, block_5.clone()).await.unwrap(); + let block_3_f = block_builder.build().unwrap().block; + let block_3_f_hash = block_3_f.hash(); + client.import(BlockOrigin::Own, block_3_f.clone()).await.unwrap(); let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); // Initialized must always be reported first. @@ -1913,7 +2245,7 @@ async fn follow_report_multiple_pruned_block() { // The fork must also be reported. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_4_hash), + block_hash: format!("{:?}", block_2_f_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, with_runtime: false, @@ -1922,8 +2254,8 @@ async fn follow_report_multiple_pruned_block() { let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_5_hash), - parent_block_hash: format!("{:?}", block_4_hash), + block_hash: format!("{:?}", block_3_f_hash), + parent_block_hash: format!("{:?}", block_2_f_hash), new_runtime: None, with_runtime: false, }); @@ -1953,22 +2285,30 @@ async fn follow_report_multiple_pruned_block() { // Block tree: // - // finalized -> block 1 -> block 2 -> block 3 -> block 6 + // finalized -> block 1 -> block 2 -> block 3 -> block 4 // ^^^ finalized - // -> block 1 -> block 4 -> block 5 + // -> block 1 -> block 2_f -> block 3_f // - // Mark block 6 as finalized to force block 4 and 5 to get pruned. + // Mark block 4 as finalized to force block 2_f and 3_f to get pruned. - let block_6 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_6_hash = block_6.header.hash(); - client.import(BlockOrigin::Own, block_6.clone()).await.unwrap(); + let block_4 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_3.hash()) + .with_parent_block_number(3) + .build() + .unwrap() + .build() + .unwrap() + .block; + + let block_4_hash = block_4.hash(); + client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - client.finalize_block(block_6_hash, None).unwrap(); + client.finalize_block(block_4_hash, None).unwrap(); // Check block 6. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::NewBlock(NewBlock { - block_hash: format!("{:?}", block_6_hash), + block_hash: format!("{:?}", block_4_hash), parent_block_hash: format!("{:?}", block_3_hash), new_runtime: None, with_runtime: false, @@ -1976,15 +2316,15 @@ async fn follow_report_multiple_pruned_block() { assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: format!("{:?}", block_6_hash), + best_block_hash: format!("{:?}", block_4_hash), }); assert_eq!(event, expected); // Block 4 and 5 be reported as pruned, not just the stale head (block 5). let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { - finalized_block_hashes: vec![format!("{:?}", block_6_hash)], - pruned_block_hashes: vec![format!("{:?}", block_4_hash), format!("{:?}", block_5_hash)], + finalized_block_hashes: vec![format!("{:?}", block_4_hash)], + pruned_block_hashes: vec![format!("{:?}", block_2_f_hash), format!("{:?}", block_3_f_hash)], }); assert_eq!(event, expected); } @@ -2021,7 +2361,6 @@ async fn pin_block_references() { client.clone(), backend.clone(), Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2052,7 +2391,14 @@ async fn pin_block_references() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let hash = block.header.hash(); let block_hash = format!("{:?}", hash); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); @@ -2080,7 +2426,10 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); @@ -2089,8 +2438,15 @@ async fn pin_block_references() { // Add another 2 blocks and make sure we drop the subscription with the blocks pinned. let mut hashes = Vec::new(); for _ in 0..2 { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let hash = block.header.hash(); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + let hash = block.hash(); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); // Ensure the imported block is propagated for this subscription. @@ -2115,7 +2471,14 @@ async fn pin_block_references() { drop(sub); // The `chainHead` detects the subscription was terminated when it tries // to send another block. - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block.clone()).await.unwrap(); for hash in &hashes { @@ -2135,7 +2498,6 @@ async fn follow_finalized_before_new_block() { client_mock.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2146,7 +2508,14 @@ async fn follow_finalized_before_new_block() { .into_rpc(); // Make sure the block is imported for it to be pinned. - let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); @@ -2191,8 +2560,15 @@ async fn follow_finalized_before_new_block() { }); assert_eq!(event, expected); - let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_2_hash = block_2.header.hash(); + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); // Triggering the `BlockImportNotification` notification for block 1 should have no effect @@ -2236,7 +2612,6 @@ async fn ensure_operation_limits_works() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2250,7 +2625,14 @@ async fn ensure_operation_limits_works() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block.clone()).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. @@ -2334,7 +2716,6 @@ async fn check_continue_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2349,7 +2730,11 @@ async fn check_continue_operation() { let sub_id = serde_json::to_string(&sub_id).unwrap(); // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap(); builder.push_storage_change(b":moc".to_vec(), Some(b"abc".to_vec())).unwrap(); @@ -2513,7 +2898,6 @@ async fn stop_storage_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2528,7 +2912,11 @@ async fn stop_storage_operation() { let sub_id = serde_json::to_string(&sub_id).unwrap(); // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap(); let block = builder.build().unwrap().block; @@ -2612,7 +3000,7 @@ async fn stop_storage_operation() { #[tokio::test] async fn storage_closest_merkle_value() { - let (mut client, api, mut sub, sub_id, _) = setup_api().await; + let (mut client, api, mut sub, sub_id, block) = setup_api().await; /// The core of this test. /// @@ -2738,7 +3126,11 @@ async fn storage_closest_merkle_value() { } // Import a new block with storage changes. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); builder.push_storage_change(b":AAAB".to_vec(), Some(vec![2; 64])).unwrap(); let block = builder.build().unwrap().block; @@ -2758,7 +3150,11 @@ async fn storage_closest_merkle_value() { let merkle_values_lhs = expect_merkle_request(&api, &mut sub, sub_id.clone(), block_hash).await; // Import a new block with and change AAAB value. - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(block.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); builder.push_storage_change(b":AAAB".to_vec(), Some(vec![3; 64])).unwrap(); let block = builder.build().unwrap().block; diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index dd1120e5b0f86638ec0881716c0b7391097775ad..1cedcb3a6d08c1650919550d648fe63fefc3d5c7 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -18,7 +18,7 @@ futures = "0.3.21" jsonrpsee = { version = "0.16.2", features = ["server"] } log = "0.4.17" parking_lot = "0.12.1" -serde_json = "1.0.107" +serde_json = "1.0.108" sc-block-builder = { path = "../block-builder" } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } diff --git a/substrate/client/rpc/src/chain/tests.rs b/substrate/client/rpc/src/chain/tests.rs index 75211a43bd9f1f5a5aa5e5cbb6bbd6e54e205728..cff5bf39811c7dd20d553b60af59d9fef5270f36 100644 --- a/substrate/client/rpc/src/chain/tests.rs +++ b/substrate/client/rpc/src/chain/tests.rs @@ -20,7 +20,7 @@ use super::*; use crate::testing::{test_executor, timeout_secs}; use assert_matches::assert_matches; use jsonrpsee::types::EmptyServerParams as EmptyParams; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sp_consensus::BlockOrigin; use sp_rpc::list::ListOrValue; use substrate_test_runtime_client::{ @@ -75,7 +75,14 @@ async fn should_return_a_block() { let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), test_executor()).into_rpc(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); @@ -152,7 +159,14 @@ async fn should_return_block_hash() { api.call("chain_getBlockHash", [ListOrValue::from(1_u64)]).await.unwrap(); assert_matches!(res, None); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block.clone()).await.unwrap(); let res: ListOrValue> = @@ -197,7 +211,14 @@ async fn should_return_finalized_hash() { assert_eq!(res, client.genesis_hash()); // import new block - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); @@ -232,7 +253,14 @@ async fn test_head_subscription(method: &str) { let mut sub = { let api = new_full(client.clone(), test_executor()).into_rpc(); let sub = api.subscribe(method, EmptyParams::new()).await.unwrap(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); client.finalize_block(block_hash, None).unwrap(); diff --git a/substrate/client/rpc/src/dev/tests.rs b/substrate/client/rpc/src/dev/tests.rs index db6a9a119da0a152e9c338c994e1fb724f621776..448d16274f27e27e5a23b68a0770bcfe8414b7fa 100644 --- a/substrate/client/rpc/src/dev/tests.rs +++ b/substrate/client/rpc/src/dev/tests.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use super::*; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use substrate_test_runtime_client::{prelude::*, runtime::Block}; @@ -27,7 +27,14 @@ async fn block_stats_work() { let mut client = Arc::new(substrate_test_runtime_client::new()); let api = >::new(client.clone(), DenyUnsafe::No).into_rpc(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; let (expected_witness_len, expected_witness_compact_len, expected_block_len) = { let genesis_hash = client.chain_info().genesis_hash; @@ -72,7 +79,14 @@ async fn deny_unsafe_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); let api = >::new(client.clone(), DenyUnsafe::Yes).into_rpc(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; client.import(BlockOrigin::Own, block).await.unwrap(); let best_hash = client.info().best_hash; diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 35352f6d890ed0a14fb0e74d529347f17753844a..594f51efba686695b6f99b412dbc8caa62ce6750 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -25,7 +25,7 @@ use jsonrpsee::{ core::Error as RpcError, types::{error::CallError as RpcCallError, EmptyServerParams as EmptyParams, ErrorObject}, }; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_rpc_api::DenyUnsafe; use sp_consensus::BlockOrigin; use sp_core::{hash::H256, storage::ChildInfo}; @@ -218,7 +218,11 @@ async fn should_notify_about_storage_changes() { let sub = api_rpc.subscribe("state_subscribeStorage", EmptyParams::new()).await.unwrap(); // Cause a change: - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); builder .push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -263,7 +267,11 @@ async fn should_send_initial_storage_changes_and_notifications() { .await .unwrap(); - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); builder .push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -291,7 +299,11 @@ async fn should_query_storage() { let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let mut add_block = |index| { - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap(); // fake change: None -> None -> None builder .push(ExtrinsicBuilder::new_storage_change(vec![1], None).build()) diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 2386bebf24d26d9ec98ab51b1bdad7d078efc734..de69c50702ac4810f8e5fa9184d7077ccc432d48 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -35,7 +35,7 @@ futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" serde = "1.0.188" -serde_json = "1.0.107" +serde_json = "1.0.108" sc-keystore = { path = "../keystore" } sp-runtime = { path = "../../primitives/runtime" } sp-trie = { path = "../../primitives/trie" } @@ -69,7 +69,6 @@ sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-pr sc-rpc-server = { path = "../rpc-servers" } sc-rpc = { path = "../rpc" } sc-rpc-spec-v2 = { path = "../rpc-spec-v2" } -sc-block-builder = { path = "../block-builder" } sc-informant = { path = "../informant" } sc-telemetry = { path = "../telemetry" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 3838accde02393b053943e944e86f1f02ba8944e..25f998385ba1ac64d943f7b35a4e1c5f3d66a50a 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -637,7 +637,6 @@ where client.clone(), backend.clone(), task_executor.clone(), - client.info().genesis_hash, // Defaults to sensible limits for the `ChainHead`. sc_rpc_spec_v2::chain_head::ChainHeadConfig::default(), ) diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index da4a4f66e2af16f85bad5f5b526956152f63d7ae..9d51aae55b20d131841b16cabde6c4f3bc6c4e5a 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -24,7 +24,6 @@ use log::{error, info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; use rand::Rng; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_chain_spec::{resolve_state_version_from_wasm, BuildGenesisBlock}; use sc_client_api::{ backend::{ @@ -70,7 +69,7 @@ use sp_runtime::{ Block as BlockT, BlockIdTo, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - Digest, Justification, Justifications, StateVersion, + Justification, Justifications, StateVersion, }; use sp_state_machine::{ prove_child_read, prove_range_read_with_child_with_size, prove_read, @@ -1402,46 +1401,6 @@ where } } -impl BlockBuilderProvider for Client -where - B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - Block: BlockT, - Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: ApiExt + BlockBuilderApi, -{ - fn new_block_at>( - &self, - parent: Block::Hash, - inherent_digests: Digest, - record_proof: R, - ) -> sp_blockchain::Result> { - sc_block_builder::BlockBuilder::new( - self, - parent, - self.expect_block_number_from_id(&BlockId::Hash(parent))?, - record_proof.into(), - inherent_digests, - &self.backend, - ) - } - - fn new_block( - &self, - inherent_digests: Digest, - ) -> sp_blockchain::Result> { - let info = self.chain_info(); - sc_block_builder::BlockBuilder::new( - self, - info.best_hash, - info.best_number, - RecordProof::No, - inherent_digests, - &self.backend, - ) - } -} - impl ExecutorProvider for Client where B: backend::Backend, diff --git a/substrate/client/service/src/metrics.rs b/substrate/client/service/src/metrics.rs index ece5758be771881bbac5940375ec6d7b5a8180de..a411a83a784e92f5fa424fd7e68e1d410d948ada 100644 --- a/substrate/client/service/src/metrics.rs +++ b/substrate/client/service/src/metrics.rs @@ -23,7 +23,7 @@ use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; use sc_network::{config::Role, NetworkStatus, NetworkStatusProvider}; -use sc_network_common::sync::{SyncStatus, SyncStatusProvider}; +use sc_network_sync::{SyncStatus, SyncStatusProvider}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 670312e4161aa18e56597997b7e13c65fc2d209c..c6091f97d6378b139fcafde9a419e92a4d8ad988 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-channel = "1.8.0" array-bytes = "6.1" -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" log = "0.4.17" parity-scale-codec = "3.6.1" diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index c40ac33da4bb9a7fe7f8f1dc512bd87426fa5ac7..ba83bec8276eb3b4f7b09ff9a5717343b3f5e59e 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -19,7 +19,7 @@ use async_channel::TryRecvError; use futures::executor::block_on; use parity_scale_codec::{Decode, Encode, Joiner}; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{ in_mem, BlockBackend, BlockchainEvents, ExecutorProvider, FinalityNotifications, HeaderBackend, StorageProvider, @@ -39,7 +39,6 @@ use sp_runtime::{ }; use sp_state_machine::{backend::Backend as _, InMemoryBackend, OverlayedChanges, StateMachine}; use sp_storage::{ChildInfo, StorageKey}; -use sp_trie::{LayoutV0, TrieConfiguration}; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ @@ -62,22 +61,17 @@ fn construct_block( backend: &InMemoryBackend, number: BlockNumber, parent_hash: Hash, - state_root: Hash, txs: Vec, -) -> (Vec, Hash) { +) -> Vec { let transactions = txs.into_iter().map(|tx| tx.into_unchecked_extrinsic()).collect::>(); - let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = LayoutV0::::ordered_trie_root(iter).into(); - let mut header = Header { parent_hash, number, - state_root, - extrinsics_root, + state_root: Default::default(), + extrinsics_root: Default::default(), digest: Digest { logs: vec![] }, }; - let hash = header.hash(); let mut overlay = OverlayedChanges::default(); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -124,19 +118,16 @@ fn construct_block( .unwrap(); header = Header::decode(&mut &ret_data[..]).unwrap(); - (vec![].and(&Block { header, extrinsics: transactions }), hash) + vec![].and(&Block { header, extrinsics: transactions }) } -fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> Vec { construct_block( backend, 1, genesis_hash, - array_bytes::hex_n_into_unchecked( - "25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c", - ), vec![Transfer { - from: AccountKeyring::Alice.into(), + from: AccountKeyring::One.into(), to: AccountKeyring::Two.into(), amount: 69 * DOLLARS, nonce: 0, @@ -175,7 +166,7 @@ fn construct_genesis_should_work_with_native() { let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from((storage, StateVersion::default())); - let (b1data, _b1hash) = block1(genesis_hash, &backend); + let b1data = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -206,7 +197,7 @@ fn construct_genesis_should_work_with_wasm() { let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from((storage, StateVersion::default())); - let (b1data, _b1hash) = block1(genesis_hash, &backend); + let b1data = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -250,7 +241,14 @@ fn client_initializes_from_genesis_ok() { fn block_builder_works_with_no_transactions() { let mut client = substrate_test_runtime_client::new(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -261,7 +259,11 @@ fn block_builder_works_with_no_transactions() { fn block_builder_works_with_transactions() { let mut client = substrate_test_runtime_client::new(); - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); builder .push_transfer(Transfer { @@ -316,7 +318,11 @@ fn block_builder_works_with_transactions() { #[test] fn block_builder_does_not_include_invalid() { let mut client = substrate_test_runtime_client::new(); - let mut builder = client.new_block(Default::default()).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); builder .push_transfer(Transfer { @@ -388,11 +394,25 @@ fn uncles_with_only_ancestors() { let mut client = substrate_test_runtime_client::new(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(*a1.header().number()) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let v: Vec = Vec::new(); assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); @@ -408,12 +428,21 @@ fn uncles_with_multiple_forks() { let mut client = substrate_test_runtime_client::new(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -421,8 +450,10 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -430,8 +461,10 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .with_parent_block_number(3) + .build() .unwrap() .build() .unwrap() @@ -439,8 +472,10 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .with_parent_block_number(4) + .build() .unwrap() .build() .unwrap() @@ -448,7 +483,12 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -462,8 +502,10 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -471,8 +513,10 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .with_parent_block_number(3) + .build() .unwrap() .build() .unwrap() @@ -480,7 +524,11 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -494,7 +542,11 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -536,11 +588,25 @@ fn finality_target_on_longest_chain_with_single_chain_3_blocks() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(*a1.header().number()) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -563,12 +629,21 @@ fn finality_target_on_longest_chain_with_multiple_forks() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(*a1.header().number()) + .build() .unwrap() .build() .unwrap() @@ -576,8 +651,10 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(*a2.header().number()) + .build() .unwrap() .build() .unwrap() @@ -585,8 +662,10 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .with_parent_block_number(*a3.header().number()) + .build() .unwrap() .build() .unwrap() @@ -594,8 +673,10 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .with_parent_block_number(4) + .build() .unwrap() .build() .unwrap() @@ -603,7 +684,11 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -617,8 +702,10 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -626,8 +713,10 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .with_parent_block_number(3) + .build() .unwrap() .build() .unwrap() @@ -635,7 +724,12 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -649,7 +743,12 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -780,13 +879,28 @@ fn finality_target_on_longest_chain_with_max_depth_higher_than_best() { // G -> A1 -> A2 let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let chain = client.chain_info(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -802,30 +916,70 @@ fn finality_target_with_best_not_on_longest_chain() { // ^best let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); - let genesis_hash = client.chain_info().genesis_hash; + let chain = client.chain_info(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a4 = BlockBuilderBuilder::new(&client) + .on_parent_block(a3.hash()) + .with_parent_block_number(3) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A3 -> A5 - let a5 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a5 = BlockBuilderBuilder::new(&client) + .on_parent_block(a4.hash()) + .with_parent_block_number(4) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -838,7 +992,10 @@ fn finality_target_with_best_not_on_longest_chain() { let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); - assert_eq!(a5.hash(), block_on(chain_select.finality_target(genesis_hash, None)).unwrap()); + assert_eq!( + a5.hash(), + block_on(chain_select.finality_target(client.chain_info().genesis_hash, None)).unwrap() + ); assert_eq!(a5.hash(), block_on(chain_select.finality_target(a1.hash(), None)).unwrap()); assert_eq!(a5.hash(), block_on(chain_select.finality_target(a2.hash(), None)).unwrap()); assert_eq!(a5.hash(), block_on(chain_select.finality_target(a3.hash(), None)).unwrap()); @@ -846,8 +1003,10 @@ fn finality_target_with_best_not_on_longest_chain() { assert_eq!(a5.hash(), block_on(chain_select.finality_target(a5.hash(), None)).unwrap()); // B2 -> B3 - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -855,8 +1014,10 @@ fn finality_target_with_best_not_on_longest_chain() { block_on(client.import_as_best(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + let b4 = BlockBuilderBuilder::new(&client) + .on_parent_block(b3.hash()) + .with_parent_block_number(3) + .build() .unwrap() .build() .unwrap() @@ -870,7 +1031,10 @@ fn finality_target_with_best_not_on_longest_chain() { // double check that B3 is still the best... assert_eq!(client.info().best_hash, b3.hash()); - assert_eq!(b4.hash(), block_on(chain_select.finality_target(genesis_hash, None)).unwrap()); + assert_eq!( + b4.hash(), + block_on(chain_select.finality_target(client.chain_info().genesis_hash, None)).unwrap() + ); assert_eq!(b4.hash(), block_on(chain_select.finality_target(a1.hash(), None)).unwrap()); assert!(block_on(chain_select.finality_target(a2.hash(), None)).is_err()); assert_eq!(b4.hash(), block_on(chain_select.finality_target(b2.hash(), None)).unwrap()); @@ -887,12 +1051,21 @@ fn import_with_justification() { let mut finality_notifications = client.finality_notification_stream(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -902,8 +1075,10 @@ fn import_with_justification() { // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -933,24 +1108,30 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -988,24 +1169,30 @@ fn finalizing_diverged_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1018,8 +1205,10 @@ fn finalizing_diverged_block_should_trigger_reorg() { let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + let b2 = BlockBuilderBuilder::new(&client) + .on_parent_block(b1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -1044,8 +1233,10 @@ fn finalizing_diverged_block_should_trigger_reorg() { assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b2.hash()); // after we build B3 on top of B2 and import it, it should be the new best block - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -1076,32 +1267,40 @@ fn finality_notifications_content() { let mut finality_notifications = client.finality_notification_stream(); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1114,16 +1313,20 @@ fn finality_notifications_content() { let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + let b2 = BlockBuilderBuilder::new(&client) + .on_parent_block(b1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); - let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut c1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { @@ -1136,7 +1339,12 @@ fn finality_notifications_content() { let c1 = c1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c1.clone())).unwrap(); - let mut d3 = client.new_block_at(a2.hash(), Default::default(), false).unwrap(); + let mut d3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() + .unwrap(); + // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1148,8 +1356,10 @@ fn finality_notifications_content() { let d3 = d3.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d3.clone())).unwrap(); - let d4 = client - .new_block_at(d3.hash(), Default::default(), false) + let d4 = BlockBuilderBuilder::new(&client) + .on_parent_block(d3.hash()) + .with_parent_block_number(3) + .build() .unwrap() .build() .unwrap() @@ -1209,8 +1419,10 @@ fn state_reverted_on_reorg() { // G -> A1 -> A2 // \ // -> B1 - let mut a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); a1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1222,8 +1434,10 @@ fn state_reverted_on_reorg() { let a1 = a1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1237,7 +1451,11 @@ fn state_reverted_on_reorg() { block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950 * DOLLARS, current_balance(&client)); - let mut a2 = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); a2.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), @@ -1281,24 +1499,30 @@ fn doesnt_import_blocks_that_revert_finality() { // \ // -> B1 -> B2 -> B3 - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure B1 gets a different hash from A1 @@ -1312,8 +1536,10 @@ fn doesnt_import_blocks_that_revert_finality() { let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + let b2 = BlockBuilderBuilder::new(&client) + .on_parent_block(b1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -1322,8 +1548,10 @@ fn doesnt_import_blocks_that_revert_finality() { // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized - let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + let b3 = BlockBuilderBuilder::new(&client) + .on_parent_block(b2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -1341,8 +1569,10 @@ fn doesnt_import_blocks_that_revert_finality() { // adding a C1 block which is lower than the last finalized should also // fail (with a cheaper check that doesn't require checking ancestry). - let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut c1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // needed to make sure C1 gets a different hash from A1 and B1 @@ -1361,8 +1591,10 @@ fn doesnt_import_blocks_that_revert_finality() { assert_eq!(import_err.to_string(), expected_err.to_string()); - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -1409,8 +1641,10 @@ fn respects_block_rules() { // B'[2] - block not ok, (incorrect fork) // build B[1] - let block_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let block_ok = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() @@ -1428,8 +1662,10 @@ fn respects_block_rules() { assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // build B'[1] - let mut block_not_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut block_not_ok = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1452,7 +1688,11 @@ fn respects_block_rules() { block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork (build B[2]) - let mut block_ok = client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); + let mut block_ok = BlockBuilderBuilder::new(&client) + .on_parent_block(block_ok_1_hash) + .with_parent_block_number(1) + .build() + .unwrap(); block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); let block_ok = block_ok.build().unwrap().block; assert_eq!(*block_ok.header().number(), 2); @@ -1471,8 +1711,11 @@ fn respects_block_rules() { assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // And now try bad fork (build B'[2]) - let mut block_not_ok = - client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); + let mut block_not_ok = BlockBuilderBuilder::new(&client) + .on_parent_block(block_ok_1_hash) + .with_parent_block_number(1) + .build() + .unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; assert_eq!(*block_not_ok.header().number(), 2); @@ -1527,15 +1770,19 @@ fn returns_status_for_pruned_blocks() { let mut client = TestClientBuilder::with_backend(backend).build(); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); // b1 is created, but not imported @@ -1571,8 +1818,10 @@ fn returns_status_for_pruned_blocks() { ); assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainWithState); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -1599,8 +1848,10 @@ fn returns_status_for_pruned_blocks() { ); assert_eq!(client.block_status(check_block_a2.hash).unwrap(), BlockStatus::InChainWithState); - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() @@ -1864,7 +2115,15 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // for some reason I can't seem to use `ClientBlockImportExt` let bake_and_import_block = |client: &mut TestClient, origin| { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let chain = client.chain_info(); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(chain.best_hash) + .with_parent_block_number(chain.best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); @@ -1907,25 +2166,32 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi let mut notification_stream = futures::executor::block_on_stream(client.import_notification_stream()); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() .block; block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); - let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let mut b1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); + // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1937,8 +2203,10 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); - let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + let b2 = BlockBuilderBuilder::new(&client) + .on_parent_block(b1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -1973,8 +2241,10 @@ fn use_dalek_ext_works() { ), ); - let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap() .build() .unwrap() @@ -1997,12 +2267,21 @@ fn finalize_after_best_block_updates_best() { let mut client = substrate_test_runtime_client::new(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = BlockBuilderBuilder::new(&client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + let a2 = BlockBuilderBuilder::new(&client) + .on_parent_block(a1.hash()) + .with_parent_block_number(1) + .build() .unwrap() .build() .unwrap() @@ -2010,8 +2289,10 @@ fn finalize_after_best_block_updates_best() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + let a3 = BlockBuilderBuilder::new(&client) + .on_parent_block(a2.hash()) + .with_parent_block_number(2) + .build() .unwrap() .build() .unwrap() diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 9700c7643c48514de6189fe619255516636a737a..456df73459a31f2a92561b8ee0e57761eb03e5ae 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -285,7 +285,7 @@ where base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); - fdlimit::raise_fd_limit(); + fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); let mut net = TestNet { runtime, diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs index 7bee2b1d99a34edd9516b924c54d56897117a053..623d30b098b62a41545d40b7c7f63096051378ba 100644 --- a/substrate/client/state-db/src/pruning.rs +++ b/substrate/client/state-db/src/pruning.rs @@ -406,12 +406,24 @@ impl RefWindow { commit: &mut CommitSet, ) -> Result<(), Error> { if self.base == 0 && self.is_empty() && number > 0 { - // assume that parent was canonicalized + // This branch is taken if the node imports the target block of a warp sync. + // assume that the block was canonicalized self.base = number; + // The parent of the block was the last block that got pruned. + commit + .meta + .inserted + .push((to_meta_key(LAST_PRUNED, &()), (number - 1).encode())); } else if (self.base + self.window_size()) != number { return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + trace!( + target: "state-db", + "Adding to pruning window: {:?} ({} inserted, {} deleted)", + hash, + commit.data.inserted.len(), + commit.data.deleted.len(), + ); let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() } else { @@ -869,4 +881,30 @@ mod tests { pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); } + + /// Ensure that after warp syncing the state is stored correctly in the db. The warp sync target + /// block is imported with all its state at once. This test ensures that after a restart + /// `pruning` still knows that this block was imported. + #[test] + fn store_correct_state_after_warp_syncing() { + for count_insertions in [true, false] { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + let block = 10000; + + // import blocks + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&block, block, &mut commit).unwrap(); + push_last_canonicalized(block, &mut commit); + db.commit(&commit); + + // load a new queue from db + // `cache` should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + + assert_eq!(HaveBlock::Yes, pruning.have_block(&block, block)); + } + } } diff --git a/substrate/client/storage-monitor/src/lib.rs b/substrate/client/storage-monitor/src/lib.rs index 655b940e8bedc0175c44464fd024a1b4ea0b37b4..b88b66d2d60d45badcfa60458194a37648bd1d82 100644 --- a/substrate/client/storage-monitor/src/lib.rs +++ b/substrate/client/storage-monitor/src/lib.rs @@ -42,9 +42,12 @@ pub enum Error { /// Parameters used to create the storage monitor. #[derive(Default, Debug, Clone, Args)] pub struct StorageMonitorParams { - /// Required available space on database storage. If available space for DB storage drops below - /// the given threshold, node will be gracefully terminated. If `0` is given monitoring will be - /// disabled. + /// Required available space on database storage. + /// + /// If available space for DB storage drops below the given threshold, node will + /// be gracefully terminated. + /// + /// If `0` is given monitoring will be disabled. #[arg(long = "db-storage-threshold", value_name = "MiB", default_value_t = 1024)] pub threshold: u64, diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index e582d0b7e4ffc1159be31ffaf6ecf5c347db38c7..569cd067f27d7f33122ac58b06d05f0b547a10cc 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0.48" sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index fdf987ed45f265f9bd54ec0060a23e2ffac1d585..86f03050c44e9caf9f4acdd92b0666e4dbec4eed 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -19,9 +19,10 @@ libc = "0.2" log = "0.4.17" rand = "0.8.5" rand_pcg = "0.3.1" +derive_more = "0.99" regex = "1" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" sc-telemetry = { path = "../telemetry" } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } diff --git a/substrate/client/sysinfo/src/sysinfo.rs b/substrate/client/sysinfo/src/sysinfo.rs index 41161fc685d3196d691e8cff2976415007fe8344..bef87a83e46f6774ca112e4a9ba83b927d8eef4d 100644 --- a/substrate/client/sysinfo/src/sysinfo.rs +++ b/substrate/client/sysinfo/src/sysinfo.rs @@ -23,9 +23,11 @@ use sp_core::{sr25519, Pair}; use sp_io::crypto::sr25519_verify; use sp_std::{fmt, fmt::Formatter, prelude::*}; +use derive_more::From; use rand::{seq::SliceRandom, Rng, RngCore}; use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use std::{ + fmt::Display, fs::File, io::{Seek, SeekFrom, Write}, ops::{Deref, DerefMut}, @@ -48,6 +50,37 @@ pub enum Metric { DiskRndWrite, } +/// Describes a checking failure for the hardware requirements. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct CheckFailure { + /// The metric that failed the check. + pub metric: Metric, + /// The expected minimum value. + pub expected: Throughput, + /// The measured value. + pub found: Throughput, +} + +/// A list of metrics that failed to meet the minimum hardware requirements. +#[derive(Debug, Clone, PartialEq, From)] +pub struct CheckFailures(pub Vec); + +impl Display for CheckFailures { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "Failed checks: ")?; + for failure in &self.0 { + write!( + formatter, + "{}(expected: {}, found: {}), ", + failure.metric.name(), + failure.expected, + failure.found + )? + } + Ok(()) + } +} + impl Metric { /// The category of the metric. pub fn category(&self) -> &'static str { @@ -626,33 +659,54 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { impl Requirements { /// Whether the hardware requirements are met by the provided benchmark results. - pub fn check_hardware(&self, hwbench: &HwBench) -> bool { + pub fn check_hardware(&self, hwbench: &HwBench) -> Result<(), CheckFailures> { + let mut failures = Vec::new(); for requirement in self.0.iter() { match requirement.metric { Metric::Blake2256 => if requirement.minimum > hwbench.cpu_hashrate_score { - return false + failures.push(CheckFailure { + metric: requirement.metric, + expected: requirement.minimum, + found: hwbench.cpu_hashrate_score, + }); }, Metric::MemCopy => if requirement.minimum > hwbench.memory_memcpy_score { - return false + failures.push(CheckFailure { + metric: requirement.metric, + expected: requirement.minimum, + found: hwbench.memory_memcpy_score, + }); }, Metric::DiskSeqWrite => if let Some(score) = hwbench.disk_sequential_write_score { if requirement.minimum > score { - return false + failures.push(CheckFailure { + metric: requirement.metric, + expected: requirement.minimum, + found: score, + }); } }, Metric::DiskRndWrite => if let Some(score) = hwbench.disk_random_write_score { if requirement.minimum > score { - return false + failures.push(CheckFailure { + metric: requirement.metric, + expected: requirement.minimum, + found: score, + }); } }, Metric::Sr25519Verify => {}, } } - true + if failures.is_empty() { + Ok(()) + } else { + Err(failures.into()) + } } } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 6d59b9dfc1a6980fa08d1ade2cd12ec25713f358..a693a2884b547dec53f6aa1f04341774e256bbeb 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -23,6 +23,6 @@ pin-project = "1.0.12" sc-utils = { path = "../utils" } rand = "0.8.5" serde = { version = "1.0.188", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" thiserror = "1.0.48" wasm-timer = "0.2.5" diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index b34b623b26e016c444458c66641e68adb795b45a..b55dc6482d64eb5342e684a82989f0ab2732b784 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -227,7 +227,7 @@ transactions that are prepared for block inclusion. Propagation is best effort, especially for block authors and is not directly incentivised. However the networking protocol might penalise peers that send invalid or useless transactions so we should be nice to others. Also see below a proposal -to instead of gossiping everyting have other peers request transactions they +to instead of gossiping everything have other peers request transactions they are interested in. Since the pool is expected to store more transactions than what can fit diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 5ff5a4149ca983a36d7b940b15bf10bf8c802b0f..f5760c271ad7de8a9924759bb78568290559c77e 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -20,4 +20,4 @@ sp-core = { path = "../../../primitives/core", default-features = false} sp-runtime = { path = "../../../primitives/runtime", default-features = false} [dev-dependencies] -serde_json = "1.0" +serde_json = "1.0.108" diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index e5ab01ffbf09f73bb4f86632aeada334ec6dafc0..6b1a197440c11e69805465ed4c7195f07eaafc0f 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -24,7 +24,7 @@ use futures::{ prelude::*, task::Poll, }; -use sc_block_builder::BlockBuilderProvider; +use sc_block_builder::BlockBuilderBuilder; use sc_client_api::client::BlockchainEvents; use sc_transaction_pool::*; use sc_transaction_pool_api::{ @@ -1011,7 +1011,11 @@ fn import_notification_to_pool_maintain_works() { let mut import_stream = block_on_stream(client.import_notification_stream()); // Build the block with the transaction included - let mut block_builder = client.new_block(Default::default()).unwrap(); + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(best_hash) + .with_parent_block_number(0) + .build() + .unwrap(); block_builder.push(xt).unwrap(); let block = block_builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); diff --git a/substrate/docs/CHANGELOG.md b/substrate/docs/CHANGELOG.md index 8a1b245a5b0cc1cbf81b11a03897b60bb701e283..042b7b8a2f012d6ae023d6a4a71653f0d926e4a5 100644 --- a/substrate/docs/CHANGELOG.md +++ b/substrate/docs/CHANGELOG.md @@ -45,7 +45,7 @@ board](https://github.com/paritytech/substrate/discussions). * Allow validators to block and kick their nominator set. (#7930) * Decouple Staking and Election - Part1: Support traits (#7908) * Introduces account existence providers reference counting (#7363) -* contracts: Cap the surcharge reward by the amount of rent that way payed by a contract (#7870) +* contracts: Cap the surcharge reward by the amount of rent that way paid by a contract (#7870) * Use checked math when calculating storage size (#7885) * Fix clear prefix check to avoid erasing child trie roots. (#7848) * contracts: Collect rent for the first block during deployment (#7847) diff --git a/substrate/docs/Upgrading-2.0-to-3.0.md b/substrate/docs/Upgrading-2.0-to-3.0.md index 3f2a3e7c5be22ee738321c6589bb4ce644a6c9d4..1be41a34ef340e4d07b397b0964b83b66011da0e 100644 --- a/substrate/docs/Upgrading-2.0-to-3.0.md +++ b/substrate/docs/Upgrading-2.0-to-3.0.md @@ -113,7 +113,7 @@ And update the overall definition for weights on frame and a few related types a ```diff= -const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); -+/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. ++/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9f2f73ffb151ad8e5936dd23d743f4a93fd37594 --- /dev/null +++ b/substrate/frame/Cargo.toml @@ -0,0 +1,92 @@ +[package] +name = "frame" +version = "0.0.1-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "paritytech.github.io" +repository.workspace = true +description = "The single package to get you started with building frame pallets and runtimes" +publish = false + +[package.metadata.docs.rs] +# enable `experimental` feature for docs +features = ["experimental"] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# external deps +parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } + +# primitive deps, used for developing FRAME pallets. +sp-runtime = { default-features = false, path = "../primitives/runtime" } +sp-std = { default-features = false, path = "../primitives/std" } +sp-io = { default-features = false, path = "../primitives/io" } +sp-core = { default-features = false, path = "../primitives/core" } +sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } + +# frame deps, for developing FRAME pallets. +frame-support = { default-features = false, path = "support" } +frame-system = { default-features = false, path = "system" } + +# primitive types used for developing FRAME runtimes. +sp-version = { default-features = false, path = "../primitives/version", optional = true } +sp-api = { default-features = false, path = "../primitives/api", optional = true } +sp-block-builder = { default-features = false, path = "../primitives/block-builder", optional = true } +sp-transaction-pool = { default-features = false, path = "../primitives/transaction-pool", optional = true } +sp-offchain = { default-features = false, path = "../primitives/offchain", optional = true } +sp-session = { default-features = false, path = "../primitives/session", optional = true } +sp-consensus-aura = { default-features = false, path = "../primitives/consensus/aura", optional = true } +sp-consensus-grandpa = { default-features = false, path = "../primitives/consensus/grandpa", optional = true } +sp-inherents = { default-features = false, path = "../primitives/inherents", optional = true } + +frame-executive = { default-features = false, path = "../frame/executive", optional = true } +frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } + +docify = "0.2.0" +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } +log = { version = "0.4.20", default-features = false } + +[dev-dependencies] +pallet-examples = { path = "./examples" } + +[features] +default = [ "runtime", "std" ] +experimental = [ "frame-support/experimental", "frame-system/experimental" ] +runtime = [ + "frame-executive", + "frame-system-rpc-runtime-api", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-inherents", + "sp-offchain", + "sp-session", + "sp-transaction-pool", + "sp-version", +] +std = [ + "frame-executive?/std", + "frame-support/std", + "frame-system-rpc-runtime-api?/std", + "frame-system/std", + "log/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-api?/std", + "sp-arithmetic/std", + "sp-block-builder?/std", + "sp-consensus-aura?/std", + "sp-consensus-grandpa?/std", + "sp-core/std", + "sp-inherents?/std", + "sp-io/std", + "sp-offchain?/std", + "sp-runtime/std", + "sp-session?/std", + "sp-std/std", + "sp-transaction-pool?/std", + "sp-version?/std", +] diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index f1ad9af50ef729ca6047b49df496d11f42c2bea3..d7d7352975aeafbbb78b72e93c861a0d9df5dbbe 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -17,7 +17,7 @@ array-bytes = { version = "6.1", optional = true } log = { version = "0.4.14", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index eb32c6c466c91a9ae1ef5b9b380025af2c6731ce..37cc3314037e5175039802fc2c39b225910ac912 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -17,6 +17,8 @@ //! Alliance pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use sp_std::{ cmp, @@ -25,7 +27,7 @@ use sp_std::{ prelude::*, }; -use frame_benchmarking::v1::{account, benchmarks_instance_pallet, BenchmarkError}; +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; use frame_support::traits::{EnsureOrigin, Get, UnfilteredDispatchable}; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin as SystemOrigin}; @@ -94,32 +96,31 @@ fn set_members, I: 'static>() { T::InitializeMembers::initialize_members(&[fellows.as_slice()].concat()); } -benchmarks_instance_pallet! { - // This tests when proposal is created and queued as "proposed" - propose_proposed { - let b in 1 .. MAX_BYTES; - let m in 2 .. T::MaxFellows::get(); - let p in 1 .. T::MaxProposals::get(); +#[instance_benchmarks] +mod benchmarks { + use super::*; + // This tests when proposal is created and queued as "proposed" + #[benchmark] + fn propose_proposed( + b: Linear<1, MAX_BYTES>, + m: Linear<2, { T::MaxFellows::get() }>, + p: Linear<1, { T::MaxProposals::get() }>, + ) -> Result<(), BenchmarkError> { let bytes_in_storage = b + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let proposer = fellows[0].clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; let threshold = m; // Add previous proposals. - for i in 0 .. p - 1 { + for i in 0..p - 1 { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; b as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; b as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -128,45 +129,45 @@ benchmarks_instance_pallet! { )?; } - let proposal: T::Proposal = AllianceCall::::set_rule { rule: rule(vec![p as u8; b as usize]) }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![p as u8; b as usize]) }.into(); + + #[extrinsic_call] + propose( + SystemOrigin::Signed(proposer.clone()), + threshold, + Box::new(proposal.clone()), + bytes_in_storage, + ); - }: propose(SystemOrigin::Signed(proposer.clone()), threshold, Box::new(proposal.clone()), bytes_in_storage) - verify { - // New proposal is recorded let proposal_hash = T::Hashing::hash_of(&proposal); assert_eq!(T::ProposalProvider::proposal_of(proposal_hash), Some(proposal)); + Ok(()) } - vote { - // We choose 5 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 5 .. T::MaxFellows::get(); - + #[benchmark] + fn vote(m: Linear<5, { T::MaxFellows::get() }>) -> Result<(), BenchmarkError> { let p = T::MaxProposals::get(); let b = MAX_BYTES; - let bytes_in_storage = b + size_of::() as u32 + 32; + let _bytes_in_storage = b + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let proposer = fellows[0].clone(); let members = fellows.clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; // Threshold is 1 less than the number of members so that one person can vote nay let threshold = m - 1; // Add previous proposals let mut last_hash = T::Hash::default(); - for i in 0 .. p { + for i in 0..p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; b as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; b as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -178,7 +179,7 @@ benchmarks_instance_pallet! { let index = p - 1; // Have almost everyone vote aye on last proposal, while keeping it from passing. - for j in 0 .. m - 3 { + for j in 0..m - 3 { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), @@ -203,28 +204,28 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: _(SystemOrigin::Signed(voter), last_hash.clone(), index, approve) - verify { - } - close_early_disapproved { - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxFellows::get(); - let p in 1 .. T::MaxProposals::get(); + #[extrinsic_call] + _(SystemOrigin::Signed(voter), last_hash.clone(), index, approve); + //nothing to verify + Ok(()) + } + + #[benchmark] + fn close_early_disapproved( + m: Linear<4, { T::MaxFellows::get() }>, + p: Linear<1, { T::MaxProposals::get() }>, + ) -> Result<(), BenchmarkError> { let bytes = 100; let bytes_in_storage = bytes + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let members = fellows.clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -234,11 +235,10 @@ benchmarks_instance_pallet! { // Add previous proposals let mut last_hash = T::Hash::default(); - for i in 0 .. p { + for i in 0..p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; bytes as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; bytes as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -251,7 +251,7 @@ benchmarks_instance_pallet! { let index = p - 1; // Have most everyone vote aye on last proposal, while keeping it from passing. - for j in 2 .. m - 1 { + for j in 2..m - 1 { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), @@ -280,44 +280,41 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) - verify { - // The last proposal is removed. + + #[extrinsic_call] + close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); + Ok(()) } - close_early_approved { - let b in 1 .. MAX_BYTES; - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxFellows::get(); - let p in 1 .. T::MaxProposals::get(); - + // We choose 4 as a minimum for param m, so we always trigger a vote in the voting loop (`for j + // in ...`) + #[benchmark] + fn close_early_approved( + b: Linear<1, MAX_BYTES>, + m: Linear<4, { T::MaxFellows::get() }>, + p: Linear<1, { T::MaxProposals::get() }>, + ) -> Result<(), BenchmarkError> { let bytes_in_storage = b + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let members = fellows.clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; let proposer = members[0].clone(); - let voter = members[1].clone(); - // Threshold is 2 so any two ayes will approve the vote let threshold = 2; // Add previous proposals let mut last_hash = T::Hash::default(); - for i in 0 .. p { + for i in 0..p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; b as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; b as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -329,7 +326,8 @@ benchmarks_instance_pallet! { } let index = p - 1; - // Caller switches vote to nay on their own proposal, allowing them to be the deciding approval vote + // Caller switches vote to nay on their own proposal, allowing them to be the deciding + // approval vote Alliance::::vote( SystemOrigin::Signed(proposer.clone()).into(), last_hash.clone(), @@ -338,7 +336,7 @@ benchmarks_instance_pallet! { )?; // Have almost everyone vote nay on last proposal, while keeping it from failing. - for j in 2 .. m - 1 { + for j in 2..m - 1 { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), @@ -364,30 +362,28 @@ benchmarks_instance_pallet! { index, true, )?; - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) - verify { - // The last proposal is removed. + + #[extrinsic_call] + close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); + Ok(()) } - close_disapproved { - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 2 .. T::MaxFellows::get(); - let p in 1 .. T::MaxProposals::get(); - + #[benchmark] + fn close_disapproved( + m: Linear<2, { T::MaxFellows::get() }>, + p: Linear<1, { T::MaxProposals::get() }>, + ) -> Result<(), BenchmarkError> { let bytes = 100; let bytes_in_storage = bytes + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let members = fellows.clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -397,11 +393,10 @@ benchmarks_instance_pallet! { // Add proposals let mut last_hash = T::Hash::default(); - for i in 0 .. p { + for i in 0..p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; bytes as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; bytes as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -415,7 +410,7 @@ benchmarks_instance_pallet! { let index = p - 1; // Have almost everyone vote aye on last proposal, while keeping it from passing. // A few abstainers will be the nay votes needed to fail the vote. - for j in 2 .. m - 1 { + for j in 2..m - 1 { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), @@ -434,44 +429,41 @@ benchmarks_instance_pallet! { System::::set_block_number(BlockNumberFor::::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) - verify { + #[extrinsic_call] + close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); + Ok(()) } - close_approved { - let b in 1 .. MAX_BYTES; - // We choose 4 fellows as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 5 .. T::MaxFellows::get(); - let p in 1 .. T::MaxProposals::get(); - + // We choose 5 fellows as a minimum so we always trigger a vote in the voting loop (`for j in + // ...`) + #[benchmark] + fn close_approved( + b: Linear<1, MAX_BYTES>, + m: Linear<5, { T::MaxFellows::get() }>, + p: Linear<1, { T::MaxProposals::get() }>, + ) -> Result<(), BenchmarkError> { let bytes_in_storage = b + size_of::() as u32 + 32; // Construct `members`. - let fellows = (0 .. m).map(fellow::).collect::>(); + let fellows = (0..m).map(fellow::).collect::>(); let members = fellows.clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows, vec![])?; let proposer = members[0].clone(); - let voter = members[1].clone(); - // Threshold is two, so any two ayes will pass the vote let threshold = 2; // Add proposals let mut last_hash = T::Hash::default(); - for i in 0 .. p { + for i in 0..p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = AllianceCall::::set_rule { - rule: rule(vec![i as u8; b as usize]) - }.into(); + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; b as usize]) }.into(); Alliance::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -487,70 +479,71 @@ benchmarks_instance_pallet! { SystemOrigin::Signed(proposer.clone()).into(), last_hash.clone(), p - 1, - true // Vote aye. + true, // Vote aye. )?; let index = p - 1; // Have almost everyone vote nay on last proposal, while keeping it from failing. // A few abstainers will be the aye votes needed to pass the vote. - for j in 2 .. m - 1 { + for j in 2..m - 1 { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, - false + false, )?; } // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(BlockNumberFor::::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) - verify { - // The last proposal is removed. + #[extrinsic_call] + close( + SystemOrigin::Signed(proposer), + last_hash.clone(), + index, + Weight::MAX, + bytes_in_storage, + ); + assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); + Ok(()) } - init_members { - // at least 1 fellow - let m in 1 .. T::MaxFellows::get(); - let z in 0 .. T::MaxAllies::get(); + #[benchmark] + fn init_members( + m: Linear<1, { T::MaxFellows::get() }>, + z: Linear<0, { T::MaxAllies::get() }>, + ) -> Result<(), BenchmarkError> { + let mut fellows = (0..m).map(fellow::).collect::>(); + let mut allies = (0..z).map(ally::).collect::>(); - let mut fellows = (0 .. m).map(fellow::).collect::>(); - let mut allies = (0 .. z).map(ally::).collect::>(); + #[extrinsic_call] + _(SystemOrigin::Root, fellows.clone(), allies.clone()); - }: _(SystemOrigin::Root, fellows.clone(), allies.clone()) - verify { fellows.sort(); allies.sort(); - assert_last_event::(Event::MembersInitialized { - fellows: fellows.clone(), - allies: allies.clone(), - }.into()); + assert_last_event::( + Event::MembersInitialized { fellows: fellows.clone(), allies: allies.clone() }.into(), + ); assert_eq!(Alliance::::members(MemberRole::Fellow), fellows); assert_eq!(Alliance::::members(MemberRole::Ally), allies); + Ok(()) } - disband { - // at least 1 founders - let x in 1 .. T::MaxFellows::get(); - let y in 0 .. T::MaxAllies::get(); - let z in 0 .. T::MaxMembersCount::get() / 2; - - let fellows = (0 .. x).map(fellow::).collect::>(); - let allies = (0 .. y).map(ally::).collect::>(); - let witness = DisbandWitness{ - fellow_members: x, - ally_members: y, - }; + #[benchmark] + fn disband( + x: Linear<1, { T::MaxFellows::get() }>, + y: Linear<0, { T::MaxAllies::get() }>, + z: Linear<0, { T::MaxMembersCount::get() / 2 }>, + ) -> Result<(), BenchmarkError> { + let fellows = (0..x).map(fellow::).collect::>(); + let allies = (0..y).map(ally::).collect::>(); + let witness = DisbandWitness { fellow_members: x, ally_members: y }; // setting the Alliance to disband on the benchmark call - Alliance::::init_members( - SystemOrigin::Root.into(), - fellows.clone(), - allies.clone(), - )?; + Alliance::::init_members(SystemOrigin::Root.into(), fellows.clone(), allies.clone())?; // reserve deposits let deposit = T::AllyDeposit::get(); @@ -561,18 +554,25 @@ benchmarks_instance_pallet! { assert_eq!(Alliance::::voting_members_count(), x); assert_eq!(Alliance::::ally_members_count(), y); - }: _(SystemOrigin::Root, witness) - verify { - assert_last_event::(Event::AllianceDisbanded { - fellow_members: x, - ally_members: y, - unreserved: cmp::min(z, x + y), - }.into()); + + #[extrinsic_call] + _(SystemOrigin::Root, witness); + + assert_last_event::( + Event::AllianceDisbanded { + fellow_members: x, + ally_members: y, + unreserved: cmp::min(z, x + y), + } + .into(), + ); assert!(!Alliance::::is_initialized()); + Ok(()) } - set_rule { + #[benchmark] + fn set_rule() -> Result<(), BenchmarkError> { set_members::(); let rule = rule(b"hello world"); @@ -580,61 +580,86 @@ benchmarks_instance_pallet! { let call = Call::::set_rule { rule: rule.clone() }; let origin = T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } assert_eq!(Alliance::::rule(), Some(rule.clone())); assert_last_event::(Event::NewRuleSet { rule }.into()); + Ok(()) } - announce { + #[benchmark] + fn announce() -> Result<(), BenchmarkError> { set_members::(); let announcement = announcement(b"hello world"); let call = Call::::announce { announcement: announcement.clone() }; - let origin = - T::AnnouncementOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + assert!(Alliance::::announcements().contains(&announcement)); assert_last_event::(Event::Announced { announcement }.into()); + Ok(()) } - remove_announcement { + #[benchmark] + fn remove_announcement() -> Result<(), BenchmarkError> { set_members::(); let announcement = announcement(b"hello world"); - let announcements: BoundedVec<_, T::MaxAnnouncementsCount> = BoundedVec::try_from(vec![announcement.clone()]).unwrap(); + let announcements: BoundedVec<_, T::MaxAnnouncementsCount> = + BoundedVec::try_from(vec![announcement.clone()]).unwrap(); Announcements::::put(announcements); let call = Call::::remove_announcement { announcement: announcement.clone() }; - let origin = - T::AnnouncementOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { - assert!(Alliance::::announcements().is_empty()); + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + + assert!(!Alliance::::announcements().contains(&announcement)); assert_last_event::(Event::AnnouncementRemoved { announcement }.into()); + Ok(()) } - join_alliance { + #[benchmark] + fn join_alliance() -> Result<(), BenchmarkError> { set_members::(); let outsider = outsider::(1); assert!(!Alliance::::is_member(&outsider)); assert_eq!(DepositOf::::get(&outsider), None); - }: _(SystemOrigin::Signed(outsider.clone())) - verify { + + #[extrinsic_call] + _(SystemOrigin::Signed(outsider.clone())); + assert!(Alliance::::is_member_of(&outsider, MemberRole::Ally)); // outsider is now an ally assert_eq!(DepositOf::::get(&outsider), Some(T::AllyDeposit::get())); // with a deposit assert!(!Alliance::::has_voting_rights(&outsider)); // allies don't have voting rights - assert_last_event::(Event::NewAllyJoined { - ally: outsider, - nominator: None, - reserved: Some(T::AllyDeposit::get()) - }.into()); + assert_last_event::( + Event::NewAllyJoined { + ally: outsider, + nominator: None, + reserved: Some(T::AllyDeposit::get()), + } + .into(), + ); + Ok(()) } - nominate_ally { + #[benchmark] + fn nominate_ally() -> Result<(), BenchmarkError> { set_members::(); let fellow1 = fellow::(1); @@ -645,19 +670,23 @@ benchmarks_instance_pallet! { assert_eq!(DepositOf::::get(&outsider), None); let outsider_lookup = T::Lookup::unlookup(outsider.clone()); - }: _(SystemOrigin::Signed(fellow1.clone()), outsider_lookup) - verify { + + #[extrinsic_call] + _(SystemOrigin::Signed(fellow1.clone()), outsider_lookup); + assert!(Alliance::::is_member_of(&outsider, MemberRole::Ally)); // outsider is now an ally assert_eq!(DepositOf::::get(&outsider), None); // without a deposit assert!(!Alliance::::has_voting_rights(&outsider)); // allies don't have voting rights - assert_last_event::(Event::NewAllyJoined { - ally: outsider, - nominator: Some(fellow1), - reserved: None - }.into()); + assert_last_event::( + Event::NewAllyJoined { ally: outsider, nominator: Some(fellow1), reserved: None } + .into(), + ); + + Ok(()) } - elevate_ally { + #[benchmark] + fn elevate_ally() -> Result<(), BenchmarkError> { set_members::(); let ally1 = ally::(1); @@ -665,59 +694,69 @@ benchmarks_instance_pallet! { let ally1_lookup = T::Lookup::unlookup(ally1.clone()); let call = Call::::elevate_ally { ally: ally1_lookup }; - let origin = - T::MembershipManager::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { + let origin = T::MembershipManager::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + assert!(!Alliance::::is_ally(&ally1)); assert!(Alliance::::has_voting_rights(&ally1)); assert_last_event::(Event::AllyElevated { ally: ally1 }.into()); + Ok(()) } - give_retirement_notice { + #[benchmark] + fn give_retirement_notice() -> Result<(), BenchmarkError> { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::has_voting_rights(&fellow2)); - }: _(SystemOrigin::Signed(fellow2.clone())) - verify { + + #[extrinsic_call] + _(SystemOrigin::Signed(fellow2.clone())); + assert!(Alliance::::is_member_of(&fellow2, MemberRole::Retiring)); assert_eq!( RetiringMembers::::get(&fellow2), Some(System::::block_number() + T::RetirementPeriod::get()) ); - assert_last_event::( - Event::MemberRetirementPeriodStarted {member: fellow2}.into() - ); + assert_last_event::(Event::MemberRetirementPeriodStarted { member: fellow2 }.into()); + Ok(()) } - retire { + #[benchmark] + fn retire() -> Result<(), BenchmarkError> { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::has_voting_rights(&fellow2)); assert_eq!( - Alliance::::give_retirement_notice( - SystemOrigin::Signed(fellow2.clone()).into() - ), + Alliance::::give_retirement_notice(SystemOrigin::Signed(fellow2.clone()).into()), Ok(()) ); System::::set_block_number(System::::block_number() + T::RetirementPeriod::get()); assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); - }: _(SystemOrigin::Signed(fellow2.clone())) - verify { + + #[extrinsic_call] + _(SystemOrigin::Signed(fellow2.clone())); + assert!(!Alliance::::is_member(&fellow2)); assert_eq!(DepositOf::::get(&fellow2), None); - assert_last_event::(Event::MemberRetired { - member: fellow2, - unreserved: Some(T::AllyDeposit::get()) - }.into()); + assert_last_event::( + Event::MemberRetired { member: fellow2, unreserved: Some(T::AllyDeposit::get()) } + .into(), + ); + Ok(()) } - kick_member { + #[benchmark] + fn kick_member() -> Result<(), BenchmarkError> { set_members::(); let fellow2 = fellow::(2); @@ -726,58 +765,71 @@ benchmarks_instance_pallet! { let fellow2_lookup = T::Lookup::unlookup(fellow2.clone()); let call = Call::::kick_member { who: fellow2_lookup }; - let origin = - T::MembershipManager::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { + let origin = T::MembershipManager::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + assert!(!Alliance::::is_member(&fellow2)); assert_eq!(DepositOf::::get(&fellow2), None); - assert_last_event::(Event::MemberKicked { - member: fellow2, - slashed: Some(T::AllyDeposit::get()) - }.into()); + assert_last_event::( + Event::MemberKicked { member: fellow2, slashed: Some(T::AllyDeposit::get()) }.into(), + ); + Ok(()) } - add_unscrupulous_items { - let n in 0 .. T::MaxUnscrupulousItems::get(); - let l in 0 .. T::MaxWebsiteUrlLength::get(); - + #[benchmark] + fn add_unscrupulous_items( + n: Linear<0, { T::MaxUnscrupulousItems::get() }>, + l: Linear<0, { T::MaxWebsiteUrlLength::get() }>, + ) -> Result<(), BenchmarkError> { set_members::(); - let accounts = (0 .. n) - .map(|i| generate_unscrupulous_account::(i)) + let accounts = (0..n).map(|i| generate_unscrupulous_account::(i)).collect::>(); + let websites = (0..n) + .map(|i| -> BoundedVec { + BoundedVec::try_from(vec![i as u8; l as usize]).unwrap() + }) .collect::>(); - let websites = (0 .. n).map(|i| -> BoundedVec { - BoundedVec::try_from(vec![i as u8; l as usize]).unwrap() - }).collect::>(); let mut unscrupulous_list = Vec::with_capacity(accounts.len() + websites.len()); unscrupulous_list.extend(accounts.into_iter().map(UnscrupulousItem::AccountId)); unscrupulous_list.extend(websites.into_iter().map(UnscrupulousItem::Website)); let call = Call::::add_unscrupulous_items { items: unscrupulous_list.clone() }; - let origin = - T::AnnouncementOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + assert_last_event::(Event::UnscrupulousItemAdded { items: unscrupulous_list }.into()); + Ok(()) } - remove_unscrupulous_items { - let n in 0 .. T::MaxUnscrupulousItems::get(); - let l in 0 .. T::MaxWebsiteUrlLength::get(); - + #[benchmark] + fn remove_unscrupulous_items( + n: Linear<0, { T::MaxUnscrupulousItems::get() }>, + l: Linear<0, { T::MaxWebsiteUrlLength::get() }>, + ) -> Result<(), BenchmarkError> { set_members::(); - let mut accounts = (0 .. n) - .map(|i| generate_unscrupulous_account::(i)) - .collect::>(); + let mut accounts = + (0..n).map(|i| generate_unscrupulous_account::(i)).collect::>(); accounts.sort(); let accounts: BoundedVec<_, T::MaxUnscrupulousItems> = accounts.try_into().unwrap(); UnscrupulousAccounts::::put(accounts.clone()); - let mut websites = (0 .. n).map(|i| -> BoundedVec<_, T::MaxWebsiteUrlLength> - { BoundedVec::try_from(vec![i as u8; l as usize]).unwrap() }).collect::>(); + let mut websites = (0..n) + .map(|i| -> BoundedVec<_, T::MaxWebsiteUrlLength> { + BoundedVec::try_from(vec![i as u8; l as usize]).unwrap() + }) + .collect::>(); websites.sort(); let websites: BoundedVec<_, T::MaxUnscrupulousItems> = websites.try_into().unwrap(); UnscrupulousWebsites::::put(websites.clone()); @@ -787,24 +839,31 @@ benchmarks_instance_pallet! { unscrupulous_list.extend(websites.into_iter().map(UnscrupulousItem::Website)); let call = Call::::remove_unscrupulous_items { items: unscrupulous_list.clone() }; - let origin = - T::AnnouncementOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: { call.dispatch_bypass_filter(origin)? } - verify { - assert_last_event::(Event::UnscrupulousItemRemoved { items: unscrupulous_list }.into()); + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[block] + { + call.dispatch_bypass_filter(origin)?; + } + + assert_last_event::( + Event::UnscrupulousItemRemoved { items: unscrupulous_list }.into(), + ); + Ok(()) } - abdicate_fellow_status { + #[benchmark] + fn abdicate_fellow_status() -> Result<(), BenchmarkError> { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::has_voting_rights(&fellow2)); - }: _(SystemOrigin::Signed(fellow2.clone())) - verify { - assert!(Alliance::::is_member_of(&fellow2, MemberRole::Ally)); - assert_last_event::( - Event::FellowAbdicated {fellow: fellow2}.into() - ); + #[extrinsic_call] + _(SystemOrigin::Signed(fellow2.clone())); + + assert_last_event::(Event::FellowAbdicated { fellow: fellow2 }.into()); + Ok(()) } impl_benchmark_test_suite!(Alliance, crate::mock::new_bench_ext(), crate::mock::Test); diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index 627399f805b3115617e03dd501f2d747e27c9c77..d4703db68dbba34988516ba08b5efcebcb79e5e9 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -112,7 +112,6 @@ use frame_support::{ }, weights::Weight, }; -use pallet_identity::IdentityField; use scale_info::TypeInfo; pub use pallet::*; @@ -135,9 +134,9 @@ type NegativeImbalanceOf = <>::Currency as Currency< /// Interface required for identity verification. pub trait IdentityVerifier { - /// Function that returns whether an account has an identity registered with the identity - /// provider. - fn has_identity(who: &AccountId, fields: u64) -> bool; + /// Function that returns whether an account has the required identities registered with the + /// identity provider. + fn has_required_identities(who: &AccountId) -> bool; /// Whether an account has been deemed "good" by the provider. fn has_good_judgement(who: &AccountId) -> bool; @@ -149,7 +148,7 @@ pub trait IdentityVerifier { /// The non-provider. Imposes no restrictions on account identity. impl IdentityVerifier for () { - fn has_identity(_who: &AccountId, _fields: u64) -> bool { + fn has_required_identities(_who: &AccountId) -> bool { true } @@ -339,7 +338,7 @@ pub mod pallet { /// Balance is insufficient for the required deposit. InsufficientFunds, /// The account's identity does not have display field and website field. - WithoutIdentityDisplayAndWebsite, + WithoutRequiredIdentityFields, /// The account's identity has no good judgement. WithoutGoodIdentityJudgement, /// The proposal hash is not found. @@ -1082,13 +1081,10 @@ impl, I: 'static> Pallet { } fn has_identity(who: &T::AccountId) -> DispatchResult { - const IDENTITY_FIELD_DISPLAY: u64 = IdentityField::Display as u64; - const IDENTITY_FIELD_WEB: u64 = IdentityField::Web as u64; - let judgement = |who: &T::AccountId| -> DispatchResult { ensure!( - T::IdentityVerifier::has_identity(who, IDENTITY_FIELD_DISPLAY | IDENTITY_FIELD_WEB), - Error::::WithoutIdentityDisplayAndWebsite + T::IdentityVerifier::has_required_identities(who), + Error::::WithoutRequiredIdentityFields ); ensure!( T::IdentityVerifier::has_good_judgement(who), diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 82dbbe9c0bb9102e1301001abfe9bc53e59a3b5e..ace5214f145f7683b22aa9a1ec8f9689cd97221c 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -31,7 +31,10 @@ pub use frame_support::{ BoundedVec, }; use frame_system::{EnsureRoot, EnsureSignedBy}; -use pallet_identity::{Data, IdentityInfo, Judgement}; +use pallet_identity::{ + legacy::{IdentityField, IdentityInfo}, + Data, Judgement, +}; pub use crate as pallet_alliance; @@ -69,6 +72,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -95,9 +99,9 @@ impl pallet_collective::Config for Test { } parameter_types! { - pub const BasicDeposit: u64 = 10; - pub const FieldDeposit: u64 = 10; - pub const SubAccountDeposit: u64 = 10; + pub const BasicDeposit: u64 = 100; + pub const ByteDeposit: u64 = 10; + pub const SubAccountDeposit: u64 = 100; pub const MaxSubAccounts: u32 = 2; pub const MaxAdditionalFields: u32 = 2; pub const MaxRegistrars: u32 = 20; @@ -116,10 +120,10 @@ impl pallet_identity::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; + type ByteDeposit = ByteDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; + type IdentityInformation = IdentityInfo; type MaxRegistrars = MaxRegistrars; type Slashed = (); type RegistrarOrigin = EnsureOneOrRoot; @@ -129,8 +133,8 @@ impl pallet_identity::Config for Test { pub struct AllianceIdentityVerifier; impl IdentityVerifier for AllianceIdentityVerifier { - fn has_identity(who: &AccountId, fields: u64) -> bool { - Identity::has_identity(who, fields) + fn has_required_identities(who: &AccountId) -> bool { + Identity::has_identity(who, (IdentityField::Display | IdentityField::Web).bits()) } fn has_good_judgement(who: &AccountId) -> bool { @@ -230,20 +234,40 @@ frame_support::construct_runtime!( } ); +fn test_identity_info() -> IdentityInfo { + IdentityInfo { + additional: BoundedVec::default(), + display: Data::Raw(b"name".to_vec().try_into().unwrap()), + legal: Data::default(), + web: Data::Raw(b"website".to_vec().try_into().unwrap()), + riot: Data::default(), + email: Data::default(), + pgp_fingerprint: None, + image: Data::default(), + twitter: Data::default(), + } +} + +pub(super) fn test_identity_info_deposit() -> ::Balance { + let basic_deposit: u64 = ::BasicDeposit::get(); + let byte_deposit: u64 = ::ByteDeposit::get(); + byte_deposit * test_identity_info().encoded_size() as u64 + basic_deposit +} + pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![ - (1, 50), - (2, 50), - (3, 50), - (4, 50), - (5, 30), - (6, 50), - (7, 50), - (8, 50), - (9, 50), + (1, 1000), + (2, 1000), + (3, 1000), + (4, 1000), + (5, test_identity_info_deposit() + 10), + (6, 1000), + (7, 1000), + (8, 1000), + (9, 1000), ], } .assimilate_storage(&mut t) @@ -261,17 +285,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext.execute_with(|| { assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 1)); - let info = IdentityInfo { - additional: BoundedVec::default(), - display: Data::Raw(b"name".to_vec().try_into().unwrap()), - legal: Data::default(), - web: Data::Raw(b"website".to_vec().try_into().unwrap()), - riot: Data::default(), - email: Data::default(), - pgp_fingerprint: None, - image: Data::default(), - twitter: Data::default(), - }; + let info = test_identity_info(); assert_ok!(Identity::set_identity(RuntimeOrigin::signed(1), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( RuntimeOrigin::signed(1), diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 098fd86bbae1ec6ed6d2abbd2c24430e690a6e93..8011627b237af15e8d30379bf14afc2564421050 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -105,6 +105,9 @@ fn init_members_works() { #[test] fn disband_works() { new_test_ext().execute_with(|| { + let id_deposit = test_identity_info_deposit(); + let expected_join_deposit = ::AllyDeposit::get(); + assert_eq!(Balances::free_balance(9), 1000 - id_deposit); // ensure alliance is set assert_eq!(Alliance::voting_members(), vec![1, 2, 3]); @@ -113,10 +116,10 @@ fn disband_works() { assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); // join alliance and reserve funds - assert_eq!(Balances::free_balance(9), 40); + assert_eq!(Balances::free_balance(9), 1000 - id_deposit); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); - assert_eq!(Alliance::deposit_of(9), Some(25)); - assert_eq!(Balances::free_balance(9), 15); + assert_eq!(Alliance::deposit_of(9), Some(expected_join_deposit)); + assert_eq!(Balances::free_balance(9), 1000 - id_deposit - expected_join_deposit); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); // fails without root @@ -146,7 +149,7 @@ fn disband_works() { // assert a retiring member from the previous Alliance not removed assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); // deposit unreserved - assert_eq!(Balances::free_balance(9), 40); + assert_eq!(Balances::free_balance(9), 1000 - id_deposit); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::AllianceDisbanded { fellow_members: 2, @@ -358,6 +361,9 @@ fn remove_announcement_works() { #[test] fn join_alliance_works() { new_test_ext().execute_with(|| { + let id_deposit = test_identity_info_deposit(); + let join_deposit = ::AllyDeposit::get(); + assert_eq!(Balances::free_balance(9), 1000 - id_deposit); // check already member assert_noop!( Alliance::join_alliance(RuntimeOrigin::signed(1)), @@ -384,8 +390,10 @@ fn join_alliance_works() { Error::::InsufficientFunds ); + assert_eq!(Balances::free_balance(4), 1000 - id_deposit); // success to submit assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); + assert_eq!(Balances::free_balance(4), 1000 - id_deposit - join_deposit); assert_eq!(Alliance::deposit_of(4), Some(25)); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); @@ -405,7 +413,7 @@ fn join_alliance_works() { #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( Alliance::join_alliance(RuntimeOrigin::signed(7)), - Error::::WithoutIdentityDisplayAndWebsite + Error::::WithoutRequiredIdentityFields ); }); } @@ -460,7 +468,7 @@ fn nominate_ally_works() { #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( Alliance::join_alliance(RuntimeOrigin::signed(7)), - Error::::WithoutIdentityDisplayAndWebsite + Error::::WithoutRequiredIdentityFields ); }); } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 62e71663c5b37abe0d9125e8517d02547b92cc5a..de898d4ccde14c1c9fa783cf98dabf4d63e756bf 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../../primitives/api", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} sp-io = { path = "../../primitives/io", default-features = false} @@ -39,6 +39,7 @@ std = [ "frame-system/std", "pallet-assets/std", "pallet-balances/std", + "primitive-types/std", "scale-info/std", "sp-api/std", "sp-arithmetic/std", diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 3a19f39e7ca62d38a18b77ff143b24bde25f5ede..4eee701f193e11e077e805593cd67ed1a1d043fd 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -87,6 +87,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 2338e8711eda8cfa63de6cd1ac65e9f1b5574c7e..734bc5ef43f5780ad023748f6e7720d9b6a1322b 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -7,7 +7,6 @@ homepage = "https://substrate.io" edition.workspace = true license = "Apache-2.0" repository.workspace = true -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/asset-rate/src/benchmarking.rs b/substrate/frame/asset-rate/src/benchmarking.rs index 21d53a89e3976987a54c3ce03e8fdc91260f2330..6fcc7c7fadb24d0a367ba15520d2822abe08d89f 100644 --- a/substrate/frame/asset-rate/src/benchmarking.rs +++ b/substrate/frame/asset-rate/src/benchmarking.rs @@ -25,7 +25,6 @@ use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_core::crypto::FromEntropy; -use sp_std::vec; /// Trait describing the factory function for the `AssetKind` parameter. pub trait AssetKindFactory { diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index 5fe0d4240af585b0383aea47a47fed5ae92afc34..9ca0f0f3cc388641eb27a148d405268e4b294552 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -73,6 +73,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 24c7a3b32b8f1f8c9e0e3ae33ecbcda7cbf2da4f..a48964f13668005aa6db14f82026adc603645de5 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false} # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { path = "../../primitives/runtime", default-features = false} diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index 32ad02da9041207cce23227f3fb6212790e95601..2c2203bcdada253106a2df6cbc361a513148f521 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -83,6 +83,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index 06d4ec1211737f368937a27141c21c5b562c1c0e..f1b116a0f4a0d4ec612aed393290f5f18adb8ddd 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1328,7 +1328,7 @@ fn freezer_should_work() { #[test] fn imbalances_should_work() { - use frame_support::traits::tokens::fungibles::Balanced; + use frame_support::traits::fungibles::Balanced; new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); @@ -1591,7 +1591,7 @@ fn assets_from_genesis_should_exist() { #[test] fn querying_name_symbol_and_decimals_should_work() { new_test_ext().execute_with(|| { - use frame_support::traits::tokens::fungibles::metadata::Inspect; + use frame_support::traits::fungibles::metadata::Inspect; assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_set_metadata( RuntimeOrigin::root(), @@ -1610,7 +1610,7 @@ fn querying_name_symbol_and_decimals_should_work() { #[test] fn querying_allowance_should_work() { new_test_ext().execute_with(|| { - use frame_support::traits::tokens::fungibles::approvals::{Inspect, Mutate}; + use frame_support::traits::fungibles::approvals::{Inspect, Mutate}; assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 2); @@ -1635,7 +1635,7 @@ fn transfer_large_asset() { #[test] fn querying_roles_should_work() { new_test_ext().execute_with(|| { - use frame_support::traits::tokens::fungibles::roles::Inspect; + use frame_support::traits::fungibles::roles::Inspect; assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::set_team( RuntimeOrigin::signed(1), diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 1b4eabaf0cff5e69bbe65a118b36b4c41e740cc1..8315330d7fee7333512e8233b26a4f526e073d69 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index e20e1df564c7363c5a500906e35e9cdffb1ef2a3..92eb9a04458575521ae25d2b76a1383f89c3a50a 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -77,6 +77,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 3d2879bb89f55294f8264bb31eba2ab20648e57b..bfe9193e9b528fa17da86cf85ab4e3c43a093621 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-timestamp = { path = "../timestamp", default-features = false} diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index b314a3601e15f123c162d50230172c59f04c6fc5..f7506db05d1d82f32a4ec27a20096bb2aaf7548a 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -230,6 +230,11 @@ impl Pallet { } } + /// Return current authorities length. + pub fn authorities_len() -> usize { + Authorities::::decode_len().unwrap_or(0) + } + /// Get the current slot from the pre-runtime digests. fn current_slot_from_digests() -> Option { let digest = frame_system::Pallet::::digest(); @@ -363,7 +368,7 @@ impl FindAuthor for Pallet { for (id, mut data) in digests.into_iter() { if id == AURA_ENGINE_ID { let slot = Slot::decode(&mut data).ok()?; - let author_index = *slot % Self::authorities().len() as u64; + let author_index = *slot % Self::authorities_len() as u64; return Some(author_index as u32) } } @@ -408,8 +413,9 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!( - CurrentSlot::::get() == timestamp_slot, + assert_eq!( + CurrentSlot::::get(), + timestamp_slot, "Timestamp slot must match `CurrentSlot`" ); } diff --git a/substrate/frame/aura/src/tests.rs b/substrate/frame/aura/src/tests.rs index d3ce877d3e60dd6fe67fcf4ab67b747c5db8f719..b3a5e144fad8e300059159e61ad8412e84e38d2d 100644 --- a/substrate/frame/aura/src/tests.rs +++ b/substrate/frame/aura/src/tests.rs @@ -29,7 +29,8 @@ use sp_runtime::{Digest, DigestItem}; fn initial_values() { build_ext_and_execute_test(vec![0, 1, 2, 3], || { assert_eq!(Aura::current_slot(), 0u64); - assert_eq!(Aura::authorities().len(), 4); + assert_eq!(Aura::authorities().len(), Aura::authorities_len()); + assert_eq!(Aura::authorities_len(), 4); }); } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index d1e37777adad8aac65e639d0b6a64f71108a3ca3..eb30ed3007c79e04be5e447e4458bdf867c1e671 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-session = { path = "../session", default-features = false, features = [ diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index ff089a8e7ad2f6760edf31a50fa37e983d4cfc5d..bc1e6221a4589615e52be6fa63892b4841373d1f 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index e610d34197b9978b3313df69143dd0fb33be8a40..2dc414a784d308d03cdd28d86111754ef33748ab 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 4b99cd517968d8ddcf336cb5ccb022b611d57a5a..57e1dbb6b53c4a543e93f955d1d71bd04f144ae3 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -900,8 +900,9 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!( - CurrentSlot::::get() == timestamp_slot, + assert_eq!( + CurrentSlot::::get(), + timestamp_slot, "Timestamp slot must match `CurrentSlot`" ); } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index a3f755902b598b7a55b005efcd675295991ad2a0..0003c6f9f11a1e3e1636cff00769cf114bad3d38 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -125,6 +125,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -173,7 +174,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index ec4e6fd972708a643d8daccbcf811866966fd37b..e65f1844f88f9ffd9dc4e0a5c0f81e4f5e1c9459 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -440,7 +440,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -481,7 +481,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, offending_validator_id), + Staking::eras_stakers(2, &offending_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -494,7 +494,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Balances::total_balance(validator), 10_000_000); assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -553,7 +553,7 @@ fn report_equivocation_old_session_works() { assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, offending_validator_id), + Staking::eras_stakers(3, &offending_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); }) diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index f4644890e2bafd4b9a25ec25dc81dddb020fae2e..cb07ef94ff59699e6755811c4fd12c1d769a104f 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -13,29 +13,33 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } # primitives -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } # FRAME -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } # third party log = { version = "0.4.17", default-features = false } -docify = "0.2.4" +docify = "0.2.6" aquamarine = { version = "0.3.2" } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false , optional = true} -pallet-balances = { path = "../balances", default-features = false , optional = true} -sp-core = { path = "../../primitives/core", default-features = false , optional = true} -sp-io = { path = "../../primitives/io", default-features = false , optional = true} -sp-tracing = { path = "../../primitives/tracing", default-features = false , optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../balances", default-features = false, optional = true } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } +sp-io = { path = "../../primitives/io", default-features = false, optional = true } +sp-tracing = { path = "../../primitives/tracing", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs index d8626080e25239197b60c952aa35a51cf20f5cb1..e90530341a1552b61e8d0caca39b1e5c021c4367 100644 --- a/substrate/frame/bags-list/src/list/mod.rs +++ b/substrate/frame/bags-list/src/list/mod.rs @@ -543,7 +543,16 @@ impl, I: 'static> List { thresholds.into_iter().filter_map(|t| Bag::::get(t)) }; - let _ = active_bags.clone().try_for_each(|b| b.do_try_state())?; + // build map of bags and the corresponding nodes to avoid multiple lookups + let mut bags_map = BTreeMap::>::new(); + + let _ = active_bags.clone().try_for_each(|b| { + bags_map.insert( + b.bag_upper, + b.iter().map(|n: Node| n.id().clone()).collect::>(), + ); + b.do_try_state() + })?; let nodes_in_bags_count = active_bags.clone().fold(0u32, |acc, cur| acc + cur.iter().count() as u32); @@ -554,6 +563,13 @@ impl, I: 'static> List { // check that all nodes are sane. We check the `ListNodes` storage item directly in case we // have some "stale" nodes that are not in a bag. for (_id, node) in crate::ListNodes::::iter() { + // check that the node is in the correct bag + let expected_bag = bags_map + .get(&node.bag_upper) + .ok_or("bag not found for the node in active bags")?; + frame_support::ensure!(expected_bag.contains(node.id()), "node not found in the bag"); + + // verify node state node.do_try_state()? } @@ -790,12 +806,6 @@ impl, I: 'static> Bag { pub fn std_iter(&self) -> impl Iterator> { sp_std::iter::successors(self.head(), |prev| prev.next()) } - - /// Check if the bag contains a node with `id`. - #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] - fn contains(&self, id: &T::AccountId) -> bool { - self.iter().any(|n| n.id() == id) - } } /// A Node is the fundamental element comprising the doubly-linked list described by `Bag`. @@ -900,11 +910,8 @@ impl, I: 'static> Node { #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] fn do_try_state(&self) -> Result<(), TryRuntimeError> { let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; - let id = self.id(); - frame_support::ensure!(expected_bag.contains(id), "node does not exist in the bag"); - let non_terminal_check = !self.is_terminal() && expected_bag.head.as_ref() != Some(id) && expected_bag.tail.as_ref() != Some(id); diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 8d0fc96fe5900aa0d1fe21565376d0ef11971596..b91257df7b20d319bd0aa7d29fd6f2190ec144fe 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index c64a4929dd568d2fda1a941cd05e3dabd36efaab..1ac882ade70dfb92df8ae5bbe3897fa7799ae53f 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -100,6 +100,11 @@ mod imbalances { mem::forget(self); (Self(first), Self(second)) } + fn extract(&mut self, amount: T::Balance) -> Self { + let new = self.0.min(amount); + self.0 = self.0 - new; + Self(new) + } fn merge(mut self, other: Self) -> Self { self.0 = self.0.saturating_add(other.0); mem::forget(other); @@ -159,6 +164,11 @@ mod imbalances { mem::forget(self); (Self(first), Self(second)) } + fn extract(&mut self, amount: T::Balance) -> Self { + let new = self.0.min(amount); + self.0 = self.0 - new; + Self(new) + } fn merge(mut self, other: Self) -> Self { self.0 = self.0.saturating_add(other.0); mem::forget(other); diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index a2cacc45369a206a88b2fcddb1d441df28a0e07f..d518f933df8dba7c0d8855be69052e6b98e8c46b 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -207,7 +207,7 @@ pub mod pallet { use super::*; use frame_support::{ pallet_prelude::*, - traits::{fungible::Credit, tokens::Precision}, + traits::{fungible::Credit, tokens::Precision, VariantCount}, }; use frame_system::pallet_prelude::*; @@ -229,6 +229,8 @@ pub mod pallet { type RuntimeEvent = (); #[inject_runtime_type] type RuntimeHoldReason = (); + #[inject_runtime_type] + type RuntimeFreezeReason = (); type Balance = u64; type ExistentialDeposit = ConstU64<1>; @@ -238,10 +240,10 @@ pub mod pallet { type DustRemoval = (); - type MaxLocks = (); - type MaxReserves = (); - type MaxFreezes = (); - type MaxHolds = (); + type MaxLocks = ConstU32<100>; + type MaxReserves = ConstU32<100>; + type MaxFreezes = ConstU32<100>; + type MaxHolds = ConstU32<100>; type WeightInfo = (); } @@ -256,7 +258,11 @@ pub mod pallet { /// The overarching hold reason. #[pallet::no_default_bounds] - type RuntimeHoldReason: Parameter + Member + MaxEncodedLen + Ord + Copy; + type RuntimeHoldReason: Parameter + Member + MaxEncodedLen + Copy + VariantCount; + + /// The overarching freeze reason. + #[pallet::no_default_bounds] + type RuntimeFreezeReason: VariantCount; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -300,7 +306,7 @@ pub mod pallet { type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; /// The ID type for freezes. - type FreezeIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + type FreezeIdentifier: Parameter + Member + MaxEncodedLen + Copy; /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. @@ -544,6 +550,19 @@ pub mod pallet { !>::ExistentialDeposit::get().is_zero(), "The existential deposit must be greater than zero!" ); + + assert!( + T::MaxHolds::get() >= ::VARIANT_COUNT, + "MaxHolds should be greater than or equal to the number of hold reasons: {} < {}", + T::MaxHolds::get(), + ::VARIANT_COUNT + ); + + assert!( + T::MaxFreezes::get() >= ::VARIANT_COUNT, + "MaxFreezes should be greater than or equal to the number of freeze reasons: {} < {}", + T::MaxFreezes::get(), ::VARIANT_COUNT, + ); } } @@ -916,8 +935,8 @@ pub mod pallet { if did_provide && !does_provide { // This could reap the account so must go last. frame_system::Pallet::::dec_providers(who).map_err(|r| { + // best-effort revert consumer change. if did_consume && !does_consume { - // best-effort revert consumer change. let _ = frame_system::Pallet::::inc_consumers(who).defensive(); } if !did_consume && does_consume { @@ -987,8 +1006,8 @@ pub mod pallet { let freezes = Freezes::::get(who); let mut prev_frozen = Zero::zero(); let mut after_frozen = Zero::zero(); - // TODO: Revisit this assumption. We no manipulate consumer/provider refs. // No way this can fail since we do not alter the existential balances. + // TODO: Revisit this assumption. let res = Self::mutate_account(who, |b| { prev_frozen = b.frozen; b.frozen = Zero::zero(); @@ -1005,26 +1024,9 @@ pub mod pallet { debug_assert!(maybe_dust.is_none(), "Not altering main balance; qed"); } - let existed = Locks::::contains_key(who); - if locks.is_empty() { - Locks::::remove(who); - if existed { - // TODO: use Locks::::hashed_key - // https://github.com/paritytech/substrate/issues/4969 - system::Pallet::::dec_consumers(who); - } - } else { - Locks::::insert(who, bounded_locks); - if !existed && system::Pallet::::inc_consumers_without_limit(who).is_err() { - // No providers for the locks. This is impossible under normal circumstances - // since the funds that are under the lock will themselves be stored in the - // account and therefore will need a reference. - log::warn!( - target: LOG_TARGET, - "Warning: Attempt to introduce lock consumer reference, yet no providers. \ - This is unexpected but should be safe." - ); - } + match locks.is_empty() { + true => Locks::::remove(who), + false => Locks::::insert(who, bounded_locks), } if prev_frozen > after_frozen { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 2449638788ddbfc8ea42a6d5b638a41f47ff1874..46a4c4caefc3d72ad45adf58243f3430344d4813 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -18,14 +18,18 @@ //! Tests regarding the functionality of the `Currency` trait set implementations. use super::*; -use crate::NegativeImbalance; -use frame_support::traits::{ - BalanceStatus::{Free, Reserved}, - Currency, - ExistenceRequirement::{self, AllowDeath, KeepAlive}, - Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, - WithdrawReasons, +use crate::{Event, NegativeImbalance}; +use frame_support::{ + traits::{ + BalanceStatus::{Free, Reserved}, + Currency, + ExistenceRequirement::{self, AllowDeath, KeepAlive}, + Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, + WithdrawReasons, + }, + StorageNoopGuard, }; +use frame_system::Event as SysEvent; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -144,7 +148,9 @@ fn lock_removal_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::remove_lock(ID_1, &1); + assert_eq!(System::consumers(&1), 0); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -156,7 +162,9 @@ fn lock_replacement_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -168,7 +176,9 @@ fn double_locking_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_2, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -179,8 +189,11 @@ fn combination_locking_should_work() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + assert_eq!(System::consumers(&1), 0); Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); + assert_eq!(System::consumers(&1), 0); Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 0); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -192,16 +205,19 @@ fn lock_value_extension_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), TokenError::Frozen ); Balances::extend_lock(ID_1, &1, 2, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), TokenError::Frozen ); Balances::extend_lock(ID_1, &1, 8, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 3, AllowDeath), TokenError::Frozen @@ -1324,9 +1340,14 @@ fn freezing_and_locking_should_work() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + // Consumer is shared between freezing and locking. + assert_eq!(System::consumers(&1), 0); assert_ok!(>::set_freeze(&TestId::Foo, &1, 4)); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - assert_eq!(System::consumers(&1), 2); + assert_eq!(System::consumers(&1), 1); + + // Frozen and locked balances update correctly. assert_eq!(Balances::account(&1).frozen, 5); assert_ok!(>::set_freeze(&TestId::Foo, &1, 6)); assert_eq!(Balances::account(&1).frozen, 6); @@ -1336,8 +1357,49 @@ fn freezing_and_locking_should_work() { assert_eq!(Balances::account(&1).frozen, 4); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); assert_eq!(Balances::account(&1).frozen, 5); + + // Locks update correctly. Balances::remove_lock(ID_1, &1); assert_eq!(Balances::account(&1).frozen, 4); assert_eq!(System::consumers(&1), 1); + assert_ok!(>::set_freeze(&TestId::Foo, &1, 0)); + assert_eq!(Balances::account(&1).frozen, 0); + assert_eq!(System::consumers(&1), 0); }); } + +#[test] +fn self_transfer_noop() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + assert_eq!(Balances::total_issuance(), 0); + let _ = Balances::deposit_creating(&1, 100); + + // The account is set up properly: + assert_eq!( + events(), + [ + Event::Deposit { who: 1, amount: 100 }.into(), + SysEvent::NewAccount { account: 1 }.into(), + Event::Endowed { account: 1, free_balance: 100 }.into(), + ] + ); + assert_eq!(Balances::free_balance(1), 100); + assert_eq!(Balances::total_issuance(), 100); + + // Transfers to self are No-OPs: + let _g = StorageNoopGuard::new(); + for i in 0..200 { + let r = Balances::transfer_allow_death(Some(1).into(), 1, i); + + if i <= 100 { + assert_ok!(r); + } else { + assert!(r.is_err()); + } + + assert!(events().is_empty()); + assert_eq!(Balances::free_balance(1), 100, "Balance unchanged by self transfer"); + assert_eq!(Balances::total_issuance(), 100, "TI unchanged by self transfers"); + } + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index d15f8e89118cc336cad226bfe1e16958cb653496..dd3e5b7a85a2eeeb470822612dabdcb548888489 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -26,8 +26,8 @@ use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo}, parameter_types, traits::{ - tokens::fungible, ConstU32, ConstU64, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, - StorageMapShim, StoredMap, WhitelistedStorageKeys, + fungible, ConstU32, ConstU64, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, + StorageMapShim, StoredMap, VariantCount, WhitelistedStorageKeys, }, weights::{IdentityFee, Weight}, }; @@ -70,6 +70,10 @@ pub enum TestId { Baz, } +impl VariantCount for TestId { + const VARIANT_COUNT: u32 = 3; +} + frame_support::construct_runtime!( pub struct Test { @@ -132,9 +136,10 @@ impl Config for Test { type ReserveIdentifier = TestId; type WeightInfo = (); type RuntimeHoldReason = TestId; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = TestId; type MaxFreezes = ConstU32<2>; - type MaxHolds = ConstU32<2>; + type MaxHolds = ConstU32<3>; } #[derive(Clone)] diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 020ca52a277f68581479c59d9b053df3a2c672ef..fe0321bea5167184eefc9c7e2c0265c3342d9d4d 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs index b2d8758a04be6f74236d60488760292d1ac87ca8..b85096813decad50226eccf8b3f62be669327dd0 100644 --- a/substrate/frame/beefy-mmr/src/mock.rs +++ b/substrate/frame/beefy-mmr/src/mock.rs @@ -19,16 +19,15 @@ use std::vec; use codec::Encode; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU16, ConstU32, ConstU64}, + construct_runtime, derive_impl, parameter_types, + traits::{ConstU32, ConstU64}, }; use sp_consensus_beefy::mmr::MmrLeafVersion; -use sp_core::H256; use sp_io::TestExternalities; use sp_runtime::{ app_crypto::ecdsa::Public, impl_opaque_keys, - traits::{BlakeTwo256, ConvertInto, IdentityLookup, Keccak256, OpaqueKeys}, + traits::{ConvertInto, Keccak256, OpaqueKeys}, BuildStorage, }; use sp_state_machine::BasicExternalities; @@ -58,30 +57,9 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl pallet_session::Config for Test { diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 1445658bafb53bc857489e9784ef6a8a4c282949..1da09321342e261046b358aab323996f5bd9893f 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } serde = { version = "1.0.188", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index b55a65dbd73aff5210dc6f74127aa8bbcea7c268..53d523cf724d92e2cb4b307fa485f1b02a8ebc31 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -22,19 +22,15 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU16, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, + construct_runtime, derive_impl, parameter_types, + traits::{ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; -use sp_core::{crypto::KeyTypeId, ConstU128, H256}; +use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_io::TestExternalities; use sp_runtime::{ - app_crypto::ecdsa::Public, - curve::PiecewiseLinear, - impl_opaque_keys, - testing::TestXt, - traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, - BuildStorage, Perbill, + app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, + traits::OpaqueKeys, BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -69,30 +65,10 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl frame_system::offchain::SendTransactionTypes for Test @@ -159,6 +135,7 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); type FreezeIdentifier = (); type MaxFreezes = (); @@ -215,7 +192,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index bf1b204e0260e29a2ce473416a43d0b641f8a7b9..bf5ae19510ce9320c7aac3b705db605db16ecebd 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -277,7 +277,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -314,7 +314,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, equivocation_validator_id), + Staking::eras_stakers(2, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -328,7 +328,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -363,7 +363,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -397,7 +397,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, equivocation_validator_id), + Staking::eras_stakers(3, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -411,7 +411,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(3, validator), + Staking::eras_stakers(3, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 107f3b7d56fef156171de1c19295338b23b427e1..79f35f6262591513ec8c0bf46b36280f03caa255 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = linregress = { version = "0.5.1", optional = true } log = { version = "0.4.17", default-features = false } paste = "1.0" -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } frame-support = { path = "../support", default-features = false} frame-support-procedural = { path = "../support/procedural", default-features = false} diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 3a08c7a67e192a6b8e789409299efe834d82ce38..0d935063e9e466aa6299cb1c9122b6801805ca7f 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false} frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 2fab40b3ef5c398aa3319d9e2d4292a556d280d7..7da21140542a8aa760f0d533609c97df07d01764 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 4083b05b629cd0bff61d0af62e2a940cc7e1c031..233e41b474c7497e17ee78e9f5d64b1bbb254a0e 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -98,6 +98,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index d6c5399d56eb178fe6e35f06638bf4409604ce42..142d0a0e35e0403e1470b320c9ccc1bff22d282c 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-std = { path = "../../primitives/std", default-features = false} sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} diff --git a/substrate/frame/broker/README.md b/substrate/frame/broker/README.md index a3c4b251a452752172c789bcfdab97fa5da6bc9c..eae0442dbf69be30f72ed74d96ef2b052279ed4f 100644 --- a/substrate/frame/broker/README.md +++ b/substrate/frame/broker/README.md @@ -2,7 +2,7 @@ Brokerage tool for managing Polkadot Core scheduling. -Properly described in RFC-0001 Agile Coretime. +Properly described in [RFC-0001 Agile Coretime](https://github.com/polkadot-fellows/RFCs/blob/main/text/0001-agile-coretime.md). ## Implementation Specifics diff --git a/substrate/frame/broker/src/test_fungibles.rs b/substrate/frame/broker/src/test_fungibles.rs index f6ac5a49dedd28060dae9c4802fcb2842054051e..d18bff1495331e96b699e708975f4942925354fb 100644 --- a/substrate/frame/broker/src/test_fungibles.rs +++ b/substrate/frame/broker/src/test_fungibles.rs @@ -168,7 +168,7 @@ where impl< Instance: Get, - AccountId: Encode, + AccountId: Encode + Eq, AssetId: tokens::AssetId + Copy, MinimumBalance: TypedGet, HoldReason, diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index b2ca01e3781914fc49a3fd601868544c77a24a58..ac29bc4997ba7a8e61cdccf9839140c2b844a7a3 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 1fa3d944f3de153117b71817f9e5c5c17cde18e4..46f8fa65dd37749d1c50130f6f24b3c65f0f95c0 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -101,6 +101,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index c9180d2bc710749372b382b38fb6e65d5f945f3b..7f5e305e4f59b42e67b6aadb7db1402824d032fa 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index d5c809e1bf7cf3df4b3e47c3f0995b429e392320..239b0865e0f0b1f11feaa6262e53fd6dd0e74807 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -9,7 +9,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for WASM contracts" readme = "README.md" -include = ["src/**/*", "build.rs", "README.md", "CHANGELOG.md"] +include = ["src/**/*", "benchmarks/**", "build.rs", "README.md", "CHANGELOG.md"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ @@ -48,20 +48,30 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} + [dev-dependencies] array-bytes = "6.1" assert_matches = "1" env_logger = "0.9" pretty_assertions = "1" wat = "1" +pallet-contracts-fixtures = { path = "./fixtures" } + +# Polkadot Dependencies +xcm-builder = {package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder"} # Substrate Dependencies pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } +pallet-message-queue = { path = "../message-queue" } pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } pallet-utility = { path = "../utility" } +pallet-assets = { path = "../assets" } pallet-proxy = { path = "../proxy" } sp-keystore = { path = "../../primitives/keystore" } +sp-tracing = { path = "../../primitives/tracing" } [features] default = [ "std" ] @@ -73,6 +83,7 @@ std = [ "frame-system/std", "log/std", "pallet-balances?/std", + "pallet-contracts-fixtures/std", "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", "pallet-insecure-randomness-collective-flip/std", @@ -90,12 +101,16 @@ std = [ "sp-std/std", "wasm-instrument/std", "wasmi/std", + "xcm-builder/std", + "xcm/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -103,12 +118,15 @@ runtime-benchmarks = [ "rand_pcg", "sp-runtime/runtime-benchmarks", "wasm-instrument", + "xcm-builder/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-utility/try-runtime", diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b44f36f2a5fe7d141ebb7b262e7d60b6489cabbe --- /dev/null +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "pallet-contracts-fixtures" +publish = false +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Fixtures for testing contracts pallet." + +[dependencies] +wat = "1" +frame-system = { path = "../../system", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false} + +[features] +default = [ "std" ] +std = [ "frame-system/std", "sp-runtime/std" ] + diff --git a/substrate/frame/contracts/fixtures/account_reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/account_reentrance_count_call.wat rename to substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat diff --git a/substrate/frame/contracts/fixtures/add_remove_delegate_dependency.wat b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat similarity index 100% rename from substrate/frame/contracts/fixtures/add_remove_delegate_dependency.wat rename to substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat diff --git a/substrate/frame/contracts/fixtures/balance.wat b/substrate/frame/contracts/fixtures/data/balance.wat similarity index 100% rename from substrate/frame/contracts/fixtures/balance.wat rename to substrate/frame/contracts/fixtures/data/balance.wat diff --git a/substrate/frame/contracts/fixtures/call.wat b/substrate/frame/contracts/fixtures/data/call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call.wat rename to substrate/frame/contracts/fixtures/data/call.wat diff --git a/substrate/frame/contracts/fixtures/call_return_code.wat b/substrate/frame/contracts/fixtures/data/call_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_return_code.wat rename to substrate/frame/contracts/fixtures/data/call_return_code.wat diff --git a/substrate/frame/contracts/fixtures/call_runtime.wat b/substrate/frame/contracts/fixtures/data/call_runtime.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_runtime.wat rename to substrate/frame/contracts/fixtures/data/call_runtime.wat diff --git a/substrate/frame/contracts/fixtures/call_runtime_and_call.wat b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_runtime_and_call.wat rename to substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat diff --git a/substrate/frame/contracts/fixtures/call_with_limit.wat b/substrate/frame/contracts/fixtures/data/call_with_limit.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_with_limit.wat rename to substrate/frame/contracts/fixtures/data/call_with_limit.wat diff --git a/substrate/frame/contracts/fixtures/caller_contract.wat b/substrate/frame/contracts/fixtures/data/caller_contract.wat similarity index 100% rename from substrate/frame/contracts/fixtures/caller_contract.wat rename to substrate/frame/contracts/fixtures/data/caller_contract.wat diff --git a/substrate/frame/contracts/fixtures/chain_extension.wat b/substrate/frame/contracts/fixtures/data/chain_extension.wat similarity index 100% rename from substrate/frame/contracts/fixtures/chain_extension.wat rename to substrate/frame/contracts/fixtures/data/chain_extension.wat diff --git a/substrate/frame/contracts/fixtures/chain_extension_temp_storage.wat b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat similarity index 100% rename from substrate/frame/contracts/fixtures/chain_extension_temp_storage.wat rename to substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat diff --git a/substrate/frame/contracts/fixtures/create_storage_and_call.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/create_storage_and_call.wat rename to substrate/frame/contracts/fixtures/data/create_storage_and_call.wat diff --git a/substrate/frame/contracts/fixtures/create_storage_and_instantiate.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat similarity index 100% rename from substrate/frame/contracts/fixtures/create_storage_and_instantiate.wat rename to substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat diff --git a/substrate/frame/contracts/fixtures/crypto_hashes.wat b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat similarity index 100% rename from substrate/frame/contracts/fixtures/crypto_hashes.wat rename to substrate/frame/contracts/fixtures/data/crypto_hashes.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_invalid_utf8.wat b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_invalid_utf8.wat rename to substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_logging_disabled.wat b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_logging_disabled.wat rename to substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_works.wat b/substrate/frame/contracts/fixtures/data/debug_message_works.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_works.wat rename to substrate/frame/contracts/fixtures/data/debug_message_works.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call.wat b/substrate/frame/contracts/fixtures/data/delegate_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call.wat rename to substrate/frame/contracts/fixtures/data/delegate_call.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call_lib.wat b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call_lib.wat rename to substrate/frame/contracts/fixtures/data/delegate_call_lib.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call_simple.wat b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call_simple.wat rename to substrate/frame/contracts/fixtures/data/delegate_call_simple.wat diff --git a/substrate/frame/contracts/fixtures/destroy_and_transfer.wat b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat similarity index 100% rename from substrate/frame/contracts/fixtures/destroy_and_transfer.wat rename to substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat diff --git a/substrate/frame/contracts/fixtures/drain.wat b/substrate/frame/contracts/fixtures/data/drain.wat similarity index 100% rename from substrate/frame/contracts/fixtures/drain.wat rename to substrate/frame/contracts/fixtures/data/drain.wat diff --git a/substrate/frame/contracts/fixtures/dummy.wat b/substrate/frame/contracts/fixtures/data/dummy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/dummy.wat rename to substrate/frame/contracts/fixtures/data/dummy.wat diff --git a/substrate/frame/contracts/fixtures/ecdsa_recover.wat b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat similarity index 100% rename from substrate/frame/contracts/fixtures/ecdsa_recover.wat rename to substrate/frame/contracts/fixtures/data/ecdsa_recover.wat diff --git a/substrate/frame/contracts/fixtures/event_and_return_on_deploy.wat b/substrate/frame/contracts/fixtures/data/event_and_return_on_deploy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/event_and_return_on_deploy.wat rename to substrate/frame/contracts/fixtures/data/event_and_return_on_deploy.wat diff --git a/substrate/frame/contracts/fixtures/event_size.wat b/substrate/frame/contracts/fixtures/data/event_size.wat similarity index 100% rename from substrate/frame/contracts/fixtures/event_size.wat rename to substrate/frame/contracts/fixtures/data/event_size.wat diff --git a/substrate/frame/contracts/fixtures/float_instruction.wat b/substrate/frame/contracts/fixtures/data/float_instruction.wat similarity index 100% rename from substrate/frame/contracts/fixtures/float_instruction.wat rename to substrate/frame/contracts/fixtures/data/float_instruction.wat diff --git a/substrate/frame/contracts/fixtures/instantiate_return_code.wat b/substrate/frame/contracts/fixtures/data/instantiate_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/instantiate_return_code.wat rename to substrate/frame/contracts/fixtures/data/instantiate_return_code.wat diff --git a/substrate/frame/contracts/fixtures/invalid_contract_no_call.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_contract_no_call.wat rename to substrate/frame/contracts/fixtures/data/invalid_contract_no_call.wat diff --git a/substrate/frame/contracts/fixtures/invalid_contract_no_memory.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_contract_no_memory.wat rename to substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat diff --git a/substrate/frame/contracts/fixtures/invalid_module.wat b/substrate/frame/contracts/fixtures/data/invalid_module.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_module.wat rename to substrate/frame/contracts/fixtures/data/invalid_module.wat diff --git a/substrate/frame/contracts/fixtures/multi_store.wat b/substrate/frame/contracts/fixtures/data/multi_store.wat similarity index 100% rename from substrate/frame/contracts/fixtures/multi_store.wat rename to substrate/frame/contracts/fixtures/data/multi_store.wat diff --git a/substrate/frame/contracts/fixtures/new_set_code_hash_contract.wat b/substrate/frame/contracts/fixtures/data/new_set_code_hash_contract.wat similarity index 100% rename from substrate/frame/contracts/fixtures/new_set_code_hash_contract.wat rename to substrate/frame/contracts/fixtures/data/new_set_code_hash_contract.wat diff --git a/substrate/frame/contracts/fixtures/ok_trap_revert.wat b/substrate/frame/contracts/fixtures/data/ok_trap_revert.wat similarity index 100% rename from substrate/frame/contracts/fixtures/ok_trap_revert.wat rename to substrate/frame/contracts/fixtures/data/ok_trap_revert.wat diff --git a/substrate/frame/contracts/fixtures/reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/reentrance_count_call.wat rename to substrate/frame/contracts/fixtures/data/reentrance_count_call.wat diff --git a/substrate/frame/contracts/fixtures/reentrance_count_delegated_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/reentrance_count_delegated_call.wat rename to substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat diff --git a/substrate/frame/contracts/fixtures/return_with_data.wat b/substrate/frame/contracts/fixtures/data/return_with_data.wat similarity index 100% rename from substrate/frame/contracts/fixtures/return_with_data.wat rename to substrate/frame/contracts/fixtures/data/return_with_data.wat diff --git a/substrate/frame/contracts/fixtures/run_out_of_gas.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas.wat similarity index 100% rename from substrate/frame/contracts/fixtures/run_out_of_gas.wat rename to substrate/frame/contracts/fixtures/data/run_out_of_gas.wat diff --git a/substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat similarity index 100% rename from substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat rename to substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_noop.wat b/substrate/frame/contracts/fixtures/data/seal_input_noop.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_noop.wat rename to substrate/frame/contracts/fixtures/data/seal_input_noop.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_once.wat b/substrate/frame/contracts/fixtures/data/seal_input_once.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_once.wat rename to substrate/frame/contracts/fixtures/data/seal_input_once.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_twice.wat b/substrate/frame/contracts/fixtures/data/seal_input_twice.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_twice.wat rename to substrate/frame/contracts/fixtures/data/seal_input_twice.wat diff --git a/substrate/frame/contracts/fixtures/self_destruct.wat b/substrate/frame/contracts/fixtures/data/self_destruct.wat similarity index 100% rename from substrate/frame/contracts/fixtures/self_destruct.wat rename to substrate/frame/contracts/fixtures/data/self_destruct.wat diff --git a/substrate/frame/contracts/fixtures/self_destructing_constructor.wat b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat similarity index 100% rename from substrate/frame/contracts/fixtures/self_destructing_constructor.wat rename to substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat diff --git a/substrate/frame/contracts/fixtures/set_code_hash.wat b/substrate/frame/contracts/fixtures/data/set_code_hash.wat similarity index 100% rename from substrate/frame/contracts/fixtures/set_code_hash.wat rename to substrate/frame/contracts/fixtures/data/set_code_hash.wat diff --git a/substrate/frame/contracts/fixtures/set_empty_storage.wat b/substrate/frame/contracts/fixtures/data/set_empty_storage.wat similarity index 100% rename from substrate/frame/contracts/fixtures/set_empty_storage.wat rename to substrate/frame/contracts/fixtures/data/set_empty_storage.wat diff --git a/substrate/frame/contracts/fixtures/sr25519_verify.wat b/substrate/frame/contracts/fixtures/data/sr25519_verify.wat similarity index 100% rename from substrate/frame/contracts/fixtures/sr25519_verify.wat rename to substrate/frame/contracts/fixtures/data/sr25519_verify.wat diff --git a/substrate/frame/contracts/fixtures/storage_size.wat b/substrate/frame/contracts/fixtures/data/storage_size.wat similarity index 100% rename from substrate/frame/contracts/fixtures/storage_size.wat rename to substrate/frame/contracts/fixtures/data/storage_size.wat diff --git a/substrate/frame/contracts/fixtures/store_call.wat b/substrate/frame/contracts/fixtures/data/store_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/store_call.wat rename to substrate/frame/contracts/fixtures/data/store_call.wat diff --git a/substrate/frame/contracts/fixtures/store_deploy.wat b/substrate/frame/contracts/fixtures/data/store_deploy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/store_deploy.wat rename to substrate/frame/contracts/fixtures/data/store_deploy.wat diff --git a/substrate/frame/contracts/fixtures/transfer_return_code.wat b/substrate/frame/contracts/fixtures/data/transfer_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/transfer_return_code.wat rename to substrate/frame/contracts/fixtures/data/transfer_return_code.wat diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat new file mode 100644 index 0000000000000000000000000000000000000000..b3459996a2e866a65ab4657ebcd54f9b90a77241 --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat @@ -0,0 +1,52 @@ +;; This passes its input to `seal_xcm_execute` and returns the return value to its caller. +(module + (import "seal0" "xcm_execute" (func $xcm_execute (param i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; Size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Pointer to the buffer length (before call) and to the copied data length (after call) + ) + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..) - message + + ;; Call xcm_execute with provided input. + (call $assert + (i32.eq + (call $xcm_execute + (i32.const 4) ;; Pointer where the message is stored + (i32.load (i32.const 0)) ;; Size of the message + (i32.const 100) ;; Pointer to the where the outcome is stored + ) + (i32.const 0) + ) + ) + + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 10) ;; length of returned value + ) + ) + + (func (export "deploy")) +) + diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat new file mode 100644 index 0000000000000000000000000000000000000000..9eec6388de9b71a91e715d20b7d5f450b125d317 --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat @@ -0,0 +1,59 @@ +;; This passes its input to `seal_xcm_send` and returns the return value to its caller. +(module + (import "seal0" "xcm_send" (func $xcm_send (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..7) - dest + ;; [7..) - message + + ;; Call xcm_send with provided input. + (call $assert + (i32.eq + (call $xcm_send + (i32.const 4) ;; Pointer where the dest is stored + (i32.const 7) ;; Pointer where the message is stored + (i32.sub + (i32.load (i32.const 0)) ;; length of the input buffer + (i32.const 3) ;; Size of the XCM dest + ) + (i32.const 100) ;; Pointer to the where the message_id is stored + ) + (i32.const 0) + ) + ) + + ;; Return the the message_id + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 32) ;; length of returned value + ) + ) + + (func (export "deploy")) +) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..48117f7ca944f3cb4004d79840d107f0c3108063 --- /dev/null +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_runtime::traits::Hash; +use std::{env::var, path::PathBuf}; + +fn fixtures_root_dir() -> PathBuf { + match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { + // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder + (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), + (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), + (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => + PathBuf::from(path).parent().unwrap().join("fixtures/data"), + (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), + } +} + +/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along +/// with it's hash. +/// +/// The fixture files are located under the `fixtures/` directory. +pub fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> +where + T: frame_system::Config, +{ + let fixture_path = fixtures_root_dir().join(format!("{fixture_name}.wat")); + let wasm_binary = wat::parse_file(fixture_path)?; + let code_hash = T::Hashing::hash(&wasm_binary); + Ok((wasm_binary, code_hash)) +} diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9d5fe1aaf4ec6385a43e3872f5d98520165ccd0c --- /dev/null +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "pallet-contracts-mock-network" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage = "https://substrate.io" +repository.workspace = true +description = "A mock network for testing pallet-contracts" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len"] } + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} +pallet-assets = { path = "../../assets" } +pallet-balances = { path = "../../balances" } +pallet-contracts = { path = ".." } +pallet-contracts-primitives = { path = "../primitives", default-features = false} +pallet-contracts-proc-macro = { path = "../proc-macro" } +pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } +pallet-message-queue = { path = "../../message-queue" } +pallet-proxy = { path = "../../proxy" } +pallet-timestamp = { path = "../../timestamp" } +pallet-utility = { path = "../../utility" } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } +polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-runtime-parachains = {path = "../../../../polkadot/runtime/parachains"} +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-api = { path = "../../../primitives/api", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false} +sp-keystore = { path = "../../../primitives/keystore" } +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} +sp-tracing = { path = "../../../primitives/tracing" } +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} +xcm-builder = {package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder"} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm-simulator = {path = "../../../../polkadot/xcm/xcm-simulator"} + +[dev-dependencies] +assert_matches = "1" +pretty_assertions = "1" +pallet-contracts-fixtures = { path = "../fixtures" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-contracts-primitives/std", + "pallet-contracts-proc-macro/full", + "pallet-contracts/std", + "pallet-insecure-randomness-collective-flip/std", + "pallet-proxy/std", + "pallet-timestamp/std", + "pallet-utility/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std", + "xcm-executor/std", + "xcm/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..345c69541b6f6f4c5b5c65e13c40a9af2d0ebcc5 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -0,0 +1,151 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod mocks; +pub mod parachain; +pub mod primitives; +pub mod relay_chain; + +#[cfg(test)] +mod tests; + +use crate::primitives::{AccountId, UNITS}; +use sp_runtime::BuildStorage; +use xcm::latest::{prelude::*, MultiLocation}; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +// Accounts +pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); +pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); + +// Balances +pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(1), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (1, ParaA), + ], + } +} + +pub fn relay_sovereign_account_id() -> AccountId { + let location: MultiLocation = (Parent,).into(); + parachain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_sovereign_account_id(para: u32) -> AccountId { + let location: MultiLocation = (Parachain(para),).into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_account_sovereign_account_id( + para: u32, + who: sp_runtime::AccountId32, +) -> AccountId { + let location: MultiLocation = ( + Parachain(para), + AccountId32 { network: Some(relay_chain::RelayNetwork::get()), id: who.into() }, + ) + .into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { + use parachain::{MsgQueue, Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (relay_sovereign_account_id(), INITIAL_BALANCE), + (BOB, INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_assets::GenesisConfig:: { + assets: vec![ + (0u128, ADMIN, false, 1u128), // Create derivative asset for relay's native token + ], + metadata: Default::default(), + accounts: vec![ + (0u128, ALICE, INITIAL_BALANCE), + (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + sp_tracing::try_init_simple(); + System::set_block_number(1); + MsgQueue::set_para_id(para_id.into()); + }); + ext +} + +pub fn relay_ext() -> sp_io::TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (parachain_sovereign_account_id(1), INITIAL_BALANCE), + (parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} + +pub type ParachainPalletXcm = pallet_xcm::Pallet; +pub type ParachainBalances = pallet_balances::Pallet; diff --git a/substrate/frame/contracts/mock-network/src/mocks.rs b/substrate/frame/contracts/mock-network/src/mocks.rs new file mode 100644 index 0000000000000000000000000000000000000000..bf3baec7a524abc0a9c5eee8e637b4b9a9cf3e04 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks.rs @@ -0,0 +1,18 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod msg_queue; +pub mod relay_message_queue; diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..82fb8590e269083547fd124a9058816107dde811 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -0,0 +1,168 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +use codec::{Decode, Encode}; + +use frame_support::weights::Weight; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use polkadot_primitives::BlockNumber as RelayBlockNumber; +use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn parachain_id)] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn received_dmp)] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), + Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::execute_xcm(Parent, x.clone(), id, limit); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..14099965e3f173d7f06b86c08b722ceeded26418 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs @@ -0,0 +1,52 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::{parameter_types, weights::Weight}; +use xcm::latest::prelude::*; +use xcm_simulator::{ + AggregateMessageOrigin, ProcessMessage, ProcessMessageError, UmpQueueId, WeightMeter, +}; + +use crate::relay_chain::{RuntimeCall, XcmConfig}; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..1465b02f903be33aa377f1cd6f1d9519c502ed4d --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -0,0 +1,353 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod contracts_config; +use crate::{ + mocks::msg_queue::pallet as mock_msg_queue, + primitives::{AccountId, AssetIdForAssets, Balance}, +}; +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, Contains, ContainsPair, Everything, EverythingBut, Nothing}, + weights::{ + constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, + Weight, + }, +}; +use frame_system::{EnsureRoot, EnsureSigned}; +use pallet_xcm::XcmPassthrough; +use sp_core::{ConstU32, ConstU64, H256}; +use sp_runtime::traits::{Get, IdentityLookup, MaybeEquivalence}; + +use sp_std::prelude::*; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, + ConvertedConcreteId, CurrencyAdapter as XcmCurrencyAdapter, EnsureXcmOrigin, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, IsConcrete, NativeAsset, NoChecking, + ParentAsSuperuser, ParentIsPreset, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{traits::JustTry, Config, XcmExecutor}; + +pub type SovereignAccountOf = + (AccountId32Aliases, ParentIsPreset); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type AccountStore = System; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type MaxHolds = ConstU32<1>; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type WeightInfo = (); +} + +parameter_types! { + pub const AssetDeposit: u128 = 1_000_000; + pub const MetadataDepositBase: u128 = 1_000_000; + pub const MetadataDepositPerByte: u128 = 100_000; + pub const AssetAccountDeposit: u128 = 1_000_000; + pub const ApprovalDeposit: u128 = 1_000_000; + pub const AssetsStringLimit: u32 = 50; + pub const RemoveItemsLimit: u32 = 50; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForAssets; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type AssetAccountDeposit = AssetAccountDeposit; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type RemoveItemsLimit = RemoveItemsLimit; + type AssetIdParameter = AssetIdForAssets; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +parameter_types! { + pub const KsmLocation: MultiLocation = MultiLocation::parent(); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorMultiLocation = Parachain(MsgQueue::parachain_id().into()).into(); +} + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + ParentAsSuperuser, + SignedAccountId32AsNative, + XcmPassthrough, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = (Concrete(Parent.into()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub ForeignPrefix: MultiLocation = (Parent,).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TrustedLockPairs: (MultiLocation, MultiAssetFilter) = + (Parent.into(), Wild(AllOf { id: Concrete(Parent.into()), fun: WildFungible })); +} + +pub fn estimate_message_fee(number_of_instructions: u64) -> u128 { + let weight = estimate_weight(number_of_instructions); + + estimate_fee_for_weight(weight) +} + +pub fn estimate_weight(number_of_instructions: u64) -> Weight { + XcmInstructionWeight::get().saturating_mul(number_of_instructions) +} + +pub fn estimate_fee_for_weight(weight: Weight) -> u128 { + let (_, units_per_second, units_per_mb) = TokensPerSecondPerMegabyte::get(); + + units_per_second * (weight.ref_time() as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) + + units_per_mb * (weight.proof_size() as u128) / (WEIGHT_PROOF_SIZE_PER_MB as u128) +} + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub struct FromMultiLocationToAsset(PhantomData<(MultiLocation, AssetId)>); +impl MaybeEquivalence + for FromMultiLocationToAsset +{ + fn convert(value: &MultiLocation) -> Option { + match *value { + MultiLocation { parents: 1, interior: Here } => Some(0 as AssetIdForAssets), + MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } => + Some(para_id as AssetIdForAssets), + _ => None, + } + } + + fn convert_back(_id: &AssetIdForAssets) -> Option { + None + } +} + +pub type ForeignAssetsTransactor = FungiblesAdapter< + Assets, + ConvertedConcreteId< + AssetIdForAssets, + Balance, + FromMultiLocationToAsset, + JustTry, + >, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, +>; + +/// Means for transacting assets on this chain +pub type AssetTransactors = (LocalBalancesTransactor, ForeignAssetsTransactor); + +pub struct ParentRelay; +impl Contains for ParentRelay { + fn contains(location: &MultiLocation) -> bool { + location.contains_parents_only(1) + } +} +pub struct ThisParachain; +impl Contains for ThisParachain { + fn contains(location: &MultiLocation) -> bool { + matches!( + location, + MultiLocation { parents: 0, interior: Junctions::X1(Junction::AccountId32 { .. }) } + ) + } +} + +pub type XcmRouter = crate::ParachainXcmRouter; + +pub type Barrier = ( + xcm_builder::AllowUnpaidExecutionFrom, + WithComputedOrigin< + (AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom), + UniversalLocation, + ConstU32<1>, + >, +); + +parameter_types! { + pub NftCollectionOne: MultiAssetFilter + = Wild(AllOf { fun: WildNonFungible, id: Concrete((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (MultiAssetFilter, MultiLocation) + = (NftCollectionOne::get(), Parent.into()); + pub RelayNativeAsset: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete((Parent, Here).into()) }); + pub RelayNativeAssetForRelay: (MultiAssetFilter, MultiLocation) = (RelayNativeAsset::get(), Parent.into()); +} +pub type TrustedTeleporters = + (xcm_builder::Case, xcm_builder::Case); +pub type TrustedReserves = EverythingBut>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = (NativeAsset, TrustedReserves); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair + for TrustedLockerCase +{ + fn contains(origin: &MultiLocation, asset: &MultiAsset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = TrustedLockerCase; + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + Contracts: pallet_contracts, + Assets: pallet_assets, + } +); diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..dadba394e26453366d0b90ce8ff18931ab00743f --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -0,0 +1,98 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::{ + parachain, + parachain::RuntimeHoldReason, + primitives::{Balance, CENTS}, +}; +use frame_support::{ + parameter_types, + traits::{ConstBool, ConstU32, Contains, Randomness}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use pallet_xcm::BalanceOf; +use sp_runtime::{traits::Convert, Perbill}; + +pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS +} + +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); + pub Schedule: pallet_contracts::Schedule = Default::default(); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const MaxDelegateDependencies: u32 = 32; +} + +pub struct DummyRandomness(sp_std::marker::PhantomData); + +impl Randomness> for DummyRandomness { + fn random(_subject: &[u8]) -> (T::Hash, BlockNumberFor) { + (Default::default(), Default::default()) + } +} + +impl Convert> for Runtime { + fn convert(w: Weight) -> BalanceOf { + w.ref_time().into() + } +} + +#[derive(Clone, Default)] +pub struct Filters; + +impl Contains for Filters { + fn contains(call: &RuntimeCall) -> bool { + match call { + parachain::RuntimeCall::Contracts(_) => true, + _ => false, + } + } +} + +impl pallet_contracts::Config for Runtime { + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type CallFilter = Filters; + type CallStack = [pallet_contracts::Frame; 5]; + type ChainExtension = (); + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Currency = Balances; + type DefaultDepositLimit = DefaultDepositLimit; + type DepositPerByte = DepositPerByte; + type DepositPerItem = DepositPerItem; + type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxStorageKeyLen = ConstU32<128>; + type Migrations = (); + type Randomness = DummyRandomness; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type Schedule = Schedule; + type Time = super::Timestamp; + type UnsafeUnstableInterface = ConstBool; + type WeightInfo = (); + type WeightPrice = Self; + type Debug = (); + type Environment = (); + type Xcm = pallet_xcm::Pallet; +} diff --git a/substrate/frame/contracts/mock-network/src/primitives.rs b/substrate/frame/contracts/mock-network/src/primitives.rs new file mode 100644 index 0000000000000000000000000000000000000000..efc42772f88ade5868492964b1b383c26ee689ac --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/primitives.rs @@ -0,0 +1,23 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub type Balance = u128; + +pub const UNITS: Balance = 10_000_000_000; +pub const CENTS: Balance = UNITS / 100; // 100_000_000 + +pub type AccountId = sp_runtime::AccountId32; +pub type AssetIdForAssets = u128; diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs new file mode 100644 index 0000000000000000000000000000000000000000..c59c8e4bfa84355d65582e63786ed4ee105f3952 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -0,0 +1,236 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Contains, Everything, Nothing}, + weights::Weight, +}; + +use frame_system::EnsureRoot; +use sp_core::{ConstU32, H256}; +use sp_runtime::traits::IdentityLookup; + +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::{configuration, origin, shared}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + DescribeFamily, FixedRateOfFungible, FixedWeightBounds, HashedDescription, IsConcrete, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{Config, XcmExecutor}; + +use super::{ + mocks::relay_message_queue::*, + primitives::{AccountId, Balance}, +}; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type Nonce = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<0>; + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +impl shared::Config for Runtime {} + +impl configuration::Config for Runtime { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub UniversalLocation: InteriorMultiLocation = Here; + pub UnitWeightCost: u64 = 1_000; +} + +pub type SovereignAccountOf = ( + HashedDescription>, + AccountId32Aliases, + ChildParachainConvertsVia, +); + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub type AssetTransactors = LocalBalancesTransactor; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = + (Concrete(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct ChildrenParachains; +impl Contains for ChildrenParachains { + fn contains(location: &MultiLocation) -> bool { + matches!(location, MultiLocation { parents: 0, interior: X1(Parachain(_)) }) + } +} + +pub type XcmRouter = crate::RelayChainXcmRouter; +pub type Barrier = WithComputedOrigin< + ( + AllowExplicitUnpaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<1>, +>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = (); + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl origin::Config for Runtime {} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type WeightInfo = (); + type QueuePausedQuery = (); +} + +construct_runtime!( + pub enum Runtime { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + MessageQueue: pallet_message_queue::{Pallet, Event}, + } +); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..5193f6570551673815fb287dfd6db1a2488849ff --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -0,0 +1,239 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + parachain::{self, Runtime}, + parachain_account_sovereign_account_id, + primitives::{AccountId, CENTS}, + relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, +}; +use assert_matches::assert_matches; +use codec::{Decode, Encode}; +use frame_support::{ + assert_err, + pallet_prelude::Weight, + traits::{fungibles::Mutate, Currency}, +}; +use pallet_balances::{BalanceLock, Reasons}; +use pallet_contracts::{CollectEvents, DebugInfo, Determinism}; +use pallet_contracts_fixtures::compile_module; +use pallet_contracts_primitives::Code; +use xcm::{v3::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_simulator::TestExt; + +type ParachainContracts = pallet_contracts::Pallet; + +/// Instantiate the tests contract, and fund it with some balance and assets. +fn instantiate_test_contract(name: &str) -> AccountId { + let (wasm, _) = compile_module::(name).unwrap(); + + // Instantiate contract. + let contract_addr = ParaA::execute_with(|| { + ParachainContracts::bare_instantiate( + ALICE, + 0, + Weight::MAX, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id + }); + + // Funds contract account with some balance and assets. + ParaA::execute_with(|| { + parachain::Balances::make_free_balance_be(&contract_addr, INITIAL_BALANCE); + parachain::Assets::mint_into(0u32.into(), &contract_addr, INITIAL_BALANCE).unwrap(); + }); + Relay::execute_with(|| { + let sovereign_account = parachain_account_sovereign_account_id(1u32, contract_addr.clone()); + relay_chain::Balances::make_free_balance_be(&sovereign_account, INITIAL_BALANCE); + }); + + contract_addr +} + +#[test] +fn test_xcm_execute() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + // Execute XCM instructions through the contract. + ParaA::execute_with(|| { + let amount: u128 = 10 * CENTS; + + // The XCM used to transfer funds to Bob. + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset(vec![(Here, amount).into()].into()), + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), + }, + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Complete(_)); + + // Check if the funds are subtracted from the account of Alice and added to the account of + // Bob. + let initial = INITIAL_BALANCE; + assert_eq!(parachain::Assets::balance(0, contract_addr), initial); + assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); + }); +} + +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm(vec![Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::MAX, + call: call.encode().into(), + }]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + assert_err!(result.result, frame_system::Error::::CallFiltered); + }); +} + +#[test] +fn test_xcm_execute_reentrant_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + let transact_call = parachain::RuntimeCall::Contracts(pallet_contracts::Call::call { + dest: contract_addr.clone(), + gas_limit: 1_000_000.into(), + storage_deposit_limit: None, + data: vec![], + value: 0u128, + }); + + // The XCM used to transfer funds to Bob. + let message: Xcm = Xcm(vec![ + Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: 1_000_000_000.into(), + call: transact_call.encode().into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Incomplete(_, XcmError::ExpectationFalse)); + + // Funds should not change hands as the XCM transact failed. + assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); + }); +} + +#[test] +fn test_xcm_send() { + MockNet::reset(); + let contract_addr = instantiate_test_contract("xcm_send"); + let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` + + // Send XCM instructions through the contract, to lock some funds on the relay chain. + ParaA::execute_with(|| { + let dest = MultiLocation::from(Parent); + let dest = VersionedMultiLocation::V3(dest); + + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset((Here, fee).into()), + BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, + LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, + ]); + let message = VersionedXcm::V3(message); + let exec = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + (dest, message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + let mut data = &exec.result.unwrap().data[..]; + XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); + }); + + Relay::execute_with(|| { + // Check if the funds are locked on the relay chain. + assert_eq!( + relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), + vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + ); + }); +} diff --git a/substrate/frame/contracts/primitives/Cargo.toml b/substrate/frame/contracts/primitives/Cargo.toml index 8a845f6db44bd68c0e8c98b35b3ff918ee23a360..0394841aa1f417f81f566eed052d831f2bec5fcf 100644 --- a/substrate/frame/contracts/primitives/Cargo.toml +++ b/substrate/frame/contracts/primitives/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } # Substrate Dependencies (This crate should not rely on frame) diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 9090aa9cb112b39623db93c007c36fffa0e2770c..c26d82f7f110c08e7853acd494774822b9d2ae60 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -100,7 +100,7 @@ impl Key { /// Call or instantiate both called into other contracts and pass through errors happening /// in those to the caller. This enum is for the caller to distinguish whether the error /// happened during the execution of the callee or in the current execution context. -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Copy, Clone, PartialEq, Eq, Debug, codec::Decode, codec::Encode)] pub enum ErrorOrigin { /// Caller error origin. /// @@ -112,7 +112,7 @@ pub enum ErrorOrigin { } /// Error returned by contract execution. -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Copy, Clone, PartialEq, Eq, Debug, codec::Decode, codec::Encode)] pub struct ExecError { /// The reason why the execution failed. pub error: DispatchError, diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 7d516fbe2496cbe5e0ad67cad4e46cae7d9ba447..188679dbf4902c1ab83b1ac6f9887cf3e4dcef2d 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -403,6 +403,14 @@ pub mod pallet { /// its type appears in the metadata. Only valid value is `()`. #[pallet::constant] type Environment: Get>; + + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and + /// execute XCM programs. + type Xcm: xcm_builder::Controller< + OriginFor, + ::RuntimeCall, + BlockNumberFor, + >; } #[pallet::hooks] @@ -1004,6 +1012,8 @@ pub mod pallet { /// in this error. Note that this usually shouldn't happen as deploying such contracts /// is rejected. NoChainExtension, + /// Failed to decode the XCM program. + XCMDecodeFailed, /// A contract with the same AccountId already exists. DuplicateContract, /// A contract self destructed in its constructor. diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 2714620731201b225485805e67a5ccc431217a2c..6d61cb6b1e1af158b9d4942efabc73267f8e44f7 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,14 +263,14 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let latest_version = >::current_storage_version(); - let storage_version = >::on_chain_storage_version(); + let current_version = >::current_storage_version(); + let on_chain_version = >::on_chain_storage_version(); - if storage_version == latest_version { + if on_chain_version == current_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", - &storage_version + &on_chain_version ); return T::WeightInfo::on_runtime_upgrade_noop() } @@ -281,7 +281,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration::set(Some(cursor)); #[cfg(feature = "try-runtime")] @@ -308,24 +308,24 @@ impl OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let target_version = >::current_storage_version(); + let on_chain_version = >::on_chain_storage_version(); + let current_version = >::current_storage_version(); - ensure!( - storage_version != target_version, - "No upgrade: Please remove this migration from your runtime upgrade configuration." - ); + if on_chain_version == current_version { + return Ok(Default::default()) + } log::debug!( target: LOG_TARGET, "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), storage_version, target_version + >::name(), on_chain_version, current_version ); ensure!( - T::Migrations::is_upgrade_supported(storage_version, target_version), + T::Migrations::is_upgrade_supported(on_chain_version, current_version), "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" ); + Ok(Default::default()) } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 0fea2b1559509a7719e42c9461f35733e6617efc..76fd012852a90858e6a1ad929d2e009b933e879d 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -53,6 +53,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system::{EventRecord, Phase}; +use pallet_contracts_fixtures::compile_module; use pallet_contracts_primitives::CodeUploadReturnValue; use pretty_assertions::{assert_eq, assert_ne}; use sp_core::ByteArray; @@ -370,6 +371,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = ConstU32<1>; } @@ -484,6 +486,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type Xcm = (); } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); @@ -554,29 +557,6 @@ impl ExtBuilder { } } -/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along -/// with it's hash. -/// -/// The fixture files are located under the `fixtures/` directory. -fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> -where - T: frame_system::Config, -{ - let fixture_path = [ - // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder - std::env::var("CARGO_MANIFEST_DIR") - .as_deref() - .unwrap_or("substrate/frame/contracts"), - "/fixtures/", - fixture_name, - ".wat", - ] - .concat(); - let wasm_binary = wat::parse_file(fixture_path)?; - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) -} - fn initialize_block(number: u64) { System::reset_events(); System::initialize(&number, &[0u8; 32].into(), &Default::default()); diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index b129c17e13eca5cbef72c40387a9f86d81fa3515..dfe8c4f8f9b91fe5e7d73032282fe50effd51d38 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -79,8 +79,7 @@ impl LoadedModule { } let engine = Engine::new(&config); - let module = - Module::new(&engine, code.clone()).map_err(|_| "Can't load the module into wasmi!")?; + let module = Module::new(&engine, code).map_err(|_| "Can't load the module into wasmi!")?; // Return a `LoadedModule` instance with // __valid__ module. diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 4fd52b471a0c6b41cabd364d530d2989891591d7..b3013adb790e2ec2e18c3072c36a0dd898fe8eaa 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -23,10 +23,16 @@ use crate::{ schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; - use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; -use frame_support::{ensure, traits::Get, weights::Weight}; +use frame_support::{ + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, +}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; @@ -36,6 +42,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -113,6 +122,17 @@ pub enum ReturnCode { EcdsaRecoverFailed = 11, /// sr25519 signature verification failed. Sr25519VerifyFailed = 12, + /// The `xcm_execute` call failed. + XcmExecutionFailed = 13, + /// The `xcm_send` call failed. + XcmSendFailed = 14, +} + +parameter_types! { + /// Getter types used by [`crate::api_doc::Current::call_runtime`] + const CallRuntimeFailed: ReturnCode = ReturnCode::CallRuntimeFailed; + /// Getter types used by [`crate::api_doc::Current::xcm_execute`] + const XcmExecutionFailed: ReturnCode = ReturnCode::XcmExecutionFailed; } impl From for ReturnCode { @@ -461,6 +481,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -558,6 +601,32 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { self.ext.gas_meter_mut().adjust_gas(charged, token); } + /// Charge, Run and adjust gas, for executing the given dispatchable. + fn call_dispatchable< + ErrorReturnCode: Get, + F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, + >( + &mut self, + dispatch_info: DispatchInfo, + run: F, + ) -> Result { + use frame_support::dispatch::extract_actual_weight; + let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let result = run(self); + let actual_weight = extract_actual_weight(&result, &dispatch_info); + self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + match result { + Ok(_) => Ok(ReturnCode::Success), + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("call failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ErrorReturnCode::get()) + }, + } + } + /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -633,8 +702,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let ptr = ptr as usize; let mut bound_checked = memory.get(ptr..ptr + len as usize).ok_or_else(|| Error::::OutOfBounds)?; + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) } @@ -1023,6 +1094,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + /// Set the value at the given key in the contract storage. /// /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the @@ -2584,7 +2656,7 @@ pub mod env { /// # Return Value /// /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was exeuted but returned an error + /// returned `Ok`. When the dispatchable was executed but returned an error /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not /// provided because it is not guaranteed to be stable. /// @@ -2605,23 +2677,118 @@ pub mod env { call_ptr: u32, call_len: u32, ) -> Result { - use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; + use frame_support::dispatch::GetDispatchInfo; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(memory, call_ptr, call_len)?; - let dispatch_info = call.get_dispatch_info(); - let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; - let result = ctx.ext.call_runtime(call); - let actual_weight = extract_actual_weight(&result, &dispatch_info); - ctx.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); - match result { - Ok(_) => Ok(ReturnCode::Success), + ctx.call_dispatchable::(call.get_dispatch_info(), |ctx| { + ctx.ext.call_runtime(call) + }) + } + + /// Execute an XCM program locally, using the contract's address as the origin. + /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::prelude::Outcome`] + /// message id is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the XCM was successfully executed. When the XCM + /// execution fails, `ReturnCode::XcmExecutionFailed` is returned. + #[unstable] + fn xcm_execute( + ctx: _, + memory: _, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { weight, ..Default::default() }; + + ensure_executable::(&message)?; + ctx.call_dispatchable::(dispatch_info, |ctx| { + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + let outcome = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + ctx.write_sandbox_memory(memory, output_ptr, &outcome.encode())?; + let pre_dispatch_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + Ok(Some(outcome.weight_used().saturating_add(pre_dispatch_weight)).into()) + }) + } + + /// Send an XCM program from the contract to the specified destination. + /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `dest_ptr`: the pointer into the linear memory where the + /// [`xcm::prelude::VersionedMultiLocation`] is placed. + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::v3::XcmHash`] message id + /// is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM + /// execution fails, `ReturnCode::CallRuntimeFailed` is returned. + #[unstable] + fn xcm_send( + ctx: _, + memory: _, + dest_ptr: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedMultiLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let dest: VersionedMultiLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; + + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; + Ok(ReturnCode::Success) + }, Err(e) => { if ctx.ext.append_debug_buffer("") { - ctx.ext.append_debug_buffer("seal0::call_runtime failed with: "); + ctx.ext.append_debug_buffer("seal0::xcm_send failed with: "); ctx.ext.append_debug_buffer(e.into()); }; - Ok(ReturnCode::CallRuntimeFailed) + Ok(ReturnCode::XcmSendFailed) }, } } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 666a02e9b2396d83b221cbd3502281d362bba86f..1dc723576dc183aff8e7cb84a77559f104c2cd87 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 45af8bb0561b12dc591587e7958ca0845aa93be8..9a13e8f97ddd6227caff913694486700c5927e36 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -90,6 +90,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = FreezeReason; type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 62e0186cd5c9f7a9c903ab9c2fe66ba8638d0d2b..523a5bb90a016e649eded2a7a72016ff021d375e 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 038e8d2cef47d7e86d69d932d67e97131b22de5c..870bfaa9b89213a6541a5102f5c85ac545e35e8e 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} @@ -39,6 +39,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "pallet-preimage/std", "pallet-scheduler/std", diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index ebccf32e342377a56999c49b6cdd6fa95504b943..07a0ef5c3d5a0a729b7bf21dce79984274c76e3c 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -140,6 +140,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index b4a3337e41826b931a9840ca471d459daeff0e1e..91be97d3e35003737f9fc27ca54732522aaea01b 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = [ +scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } log = { version = "0.4.17", default-features = false } diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index d4659eba5661a3059c8ab55b453ab8c9561f4e77..92144351e8f819896015f4ae0e3f7daf8bb3506b 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -253,6 +253,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index f2072b81723ded7be1438c87f29311ea7d3647df..f5d1991d1990c901b24529f2f7bf337bbb11f4a9 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.0.1", features = ["derive"] } +scale-info = { version = "2.10.0", features = ["derive"] } log = { version = "0.4.17", default-features = false } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index e40bac3e9fc44adb37b0d8e527d4ac6eae460ed2..1d3f4712b1d604988047f5643ddd86296f453878 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -338,7 +338,7 @@ fn ledger_consistency_active_balance_below_ed() { ExtBuilder::default().staking(StakingExtBuilder::default()).build_offchainify(); ext.execute_with(|| { - assert_eq!(Staking::ledger(&11).unwrap().active, 1000); + assert_eq!(Staking::ledger(11.into()).unwrap().active, 1000); // unbonding total of active stake fails because the active ledger balance would fall // below the `MinNominatorBond`. @@ -356,13 +356,13 @@ fn ledger_consistency_active_balance_below_ed() { // the active balance of the ledger entry is 0, while total balance is 1000 until // `withdraw_unbonded` is called. - assert_eq!(Staking::ledger(&11).unwrap().active, 0); - assert_eq!(Staking::ledger(&11).unwrap().total, 1000); + assert_eq!(Staking::ledger(11.into()).unwrap().active, 0); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000); // trying to withdraw the unbonded balance won't work yet because not enough bonding // eras have passed. assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); - assert_eq!(Staking::ledger(&11).unwrap().total, 1000); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000); // tries to reap stash after chilling, which fails since the stash total balance is // above ED. @@ -384,6 +384,6 @@ fn ledger_consistency_active_balance_below_ed() { pool_state, ); assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); - assert_eq!(Staking::ledger(&11), None); + assert!(Staking::ledger(11.into()).is_err()); }); } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 2e3cb15f9a4345f1a2804109c290b95166779dcd..751ffc07aa5dd11b3753957ec94a122b8dd38e44 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -114,6 +114,7 @@ impl pallet_balances::Config for Runtime { type MaxHolds = ConstU32<1>; type MaxFreezes = traits::ConstU32<1>; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type WeightInfo = (); } @@ -236,7 +237,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub const BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub const MaxNominatorRewardedPerValidator: u32 = 256; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); pub HistoryDepth: u32 = 84; } @@ -268,7 +268,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = (); type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = ConstU32<256>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -808,7 +808,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), *who)), + offender: (*who, Staking::eras_stakers(active_era(), who)), reporters: vec![], }], &[Perbill::from_percent(10)], diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 09e1794965cd337ee7d0c95777b9a4d666c46800..ed36630d0d04364640765c88906ccd9009d7ff0a 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index f4ea4ef6e361f13013a1f434b08ebcbdd6dfe37f..1e3002d5dc49f5f2b272c9fae0a4408e813a95dd 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -22,7 +22,7 @@ proc-macro-crate = "1.1.3" [dev-dependencies] parity-scale-codec = "3.6.1" -scale-info = "2.1.1" +scale-info = "2.10.0" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: frame-election-provider-support = { path = ".." } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index e485920145416dce3425b5e5942388c204b4d582..cc90ed119ad7e518a5e22a5816cbc0c1081fa999 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -18,7 +18,7 @@ honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 2dfe8e42151ae1ddab784e0a29a74873207bd56f..cb8bc1035a5f3f8350af30b7ce8f8d00fac812a3 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { version = "0.4.14", default-features = false } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 56ea19578c8f5425bc9f42029659d64ebf9cba84..9878f7fd41c068329574810c3fc8edff0e6b7b4a 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -379,7 +379,7 @@ benchmarks! { let root = RawOrigin::Root; }: _(root, v, d) verify { - assert_eq!(>::iter().count() as u32, 0); + assert_eq!(>::iter().count() as u32, v - d); } election_phragmen { diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index 6912649bd122d8d66014b9f805f54732c01bc18b..e4c56e68f9a5ed5a67ff6376834caf8465dc5971 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -591,15 +591,18 @@ pub mod pallet { /// ## Complexity /// - Check is_defunct_voter() details. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))] + #[pallet::weight(T::WeightInfo::clean_defunct_voters(*num_voters, *num_defunct))] pub fn clean_defunct_voters( origin: OriginFor, - _num_voters: u32, - _num_defunct: u32, + num_voters: u32, + num_defunct: u32, ) -> DispatchResult { let _ = ensure_root(origin)?; + >::iter() + .take(num_voters as usize) .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) + .take(num_defunct as usize) .for_each(|(dv, _)| Self::do_remove_voter(&dv)); Ok(()) @@ -1357,6 +1360,7 @@ mod tests { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 9c47d7442111fdfa0313af5b9d1ae2048ae90b12..1b21502271546c06d04a959c800bd34125ccbdcc 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -7,16 +7,18 @@ license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true description = "The single package with examples of various types of FRAME pallets" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-example-basic = { path = "basic", default-features = false} pallet-default-config-example = { path = "default-config", default-features = false} -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} -pallet-example-kitchensink = { path = "kitchensink", default-features = false} pallet-dev-mode = { path = "dev-mode", default-features = false} +pallet-example-basic = { path = "basic", default-features = false} +pallet-example-frame-crate = { path = "frame-crate", default-features = false } +pallet-example-kitchensink = { path = "kitchensink", default-features = false} +pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} pallet-example-split = { path = "split", default-features = false} [features] @@ -25,6 +27,7 @@ std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", "pallet-example-basic/std", + "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", "pallet-example-split/std", diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index cb3bc3f2c822a521a75f1d01cf44918424441bc5..d39a93e7abb142a881160da43293e5a3d1f6a4db 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index addf219dc3c3938b41ab831cc42669a0f3af52bf..c7b5b9e9a84511c99d4507d4ab405878df44cf41 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -84,6 +84,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 3a6b56b57fad03422b34773ebf53ae6ee3e967dd..13b6ce745437e8b596e1da5c263599fc13d75458 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index d2eade0ccff1eeab28ada5b4af9ed3b810d0bcb8..8a1f6f9d6a82cffbd557ab2d3f1e92ad966abad0 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -26,7 +26,7 @@ //! Study the following types: //! //! - [`pallet::DefaultConfig`], and how it differs from [`pallet::Config`]. -//! - [`pallet::config_preludes::TestDefaultConfig`] and how it implements +//! - [`struct@pallet::config_preludes::TestDefaultConfig`] and how it implements //! [`pallet::DefaultConfig`]. //! - Notice how [`pallet::DefaultConfig`] is independent of [`frame_system::Config`]. @@ -83,11 +83,12 @@ pub mod pallet { // This will help use not need to disambiguate anything when using `derive_impl`. use super::*; use frame_support::derive_impl; + use frame_system::config_preludes::TestDefaultConfig as SystemTestDefaultConfig; /// A type providing default configurations for this pallet in testing environment. pub struct TestDefaultConfig; - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + #[derive_impl(SystemTestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] impl frame_system::DefaultConfig for TestDefaultConfig {} #[frame_support::register_default_impl(TestDefaultConfig)] @@ -109,7 +110,7 @@ pub mod pallet { /// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again. pub struct OtherDefaultConfig; - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + #[derive_impl(SystemTestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] impl frame_system::DefaultConfig for OtherDefaultConfig {} #[frame_support::register_default_impl(OtherDefaultConfig)] diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 8cd3fda671204ead77044bb156deeb751dfb162f..806af334bb01d3e184d8919239a4d2caf5446b18 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} pallet-balances = { path = "../../balances", default-features = false} diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index ba98f5174ce2049a31372c8d7ee03eba044556fc..c7722bc0524298565ab3f1c27bf2c56014bb3c12 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -78,6 +78,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = (); } diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ceb8c7bfb81931ed8444abcee88b817ec4fcd138 --- /dev/null +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "pallet-example-frame-crate" +version = "0.0.1" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet with umbrella crate" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +frame = { path = "../..", default-features = false, features = ["runtime", "experimental"] } + + +[features] +default = [ "std" ] +std = [ "codec/std", "frame/std", "scale-info/std" ] diff --git a/substrate/frame/examples/frame-crate/src/lib.rs b/substrate/frame/examples/frame-crate/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0fea2c22cf5454cac1c767bc356c27ad9591d15a --- /dev/null +++ b/substrate/frame/examples/frame-crate/src/lib.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: IsType<::RuntimeEvent> + From>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + pub enum Event {} + + #[pallet::storage] + pub type Value = StorageValue; + + #[pallet::call] + impl Pallet { + pub fn some_dispatchable(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use crate::pallet as my_pallet; + use frame::testing_prelude::*; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + MyPallet: my_pallet, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl my_pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } +} diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index 26018ad7d97f871c6e42fe18d079710ccdb5dc96..1275ef0b53f664a4420732c6277a05835c125084 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 0fbffc971da6225180b1548b3f87c88b8231ca48..89759dd0bf635a8b89bf54435a3975199e8c5a27 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -74,7 +74,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// This is a normal Rust type, nothing specific to FRAME here. - type Currency: frame_support::traits::tokens::fungible::Inspect; + type Currency: frame_support::traits::fungible::Inspect; /// Similarly, let the runtime decide this. fn some_function() -> u32; @@ -206,6 +206,10 @@ pub mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_foo_benchmark())] + /// Marks this call as feeless if `new_foo` is zero. + #[pallet::feeless_if(|_origin: &OriginFor, new_foo: &u32, _other_compact: &u128| -> bool { + *new_foo == 0 + })] pub fn set_foo( _: OriginFor, new_foo: u32, diff --git a/substrate/frame/examples/kitchensink/src/tests.rs b/substrate/frame/examples/kitchensink/src/tests.rs index abded83e48201ef0b518ba9f6f7a1e2d11eb3418..3b88f68d1c39cb6420c652cda838acc79a7a8710 100644 --- a/substrate/frame/examples/kitchensink/src/tests.rs +++ b/substrate/frame/examples/kitchensink/src/tests.rs @@ -56,6 +56,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index f33de594a2dcde7878b9fb10155182e6947a08f0..e6b7715655d16d7da0ac5a022678141cabb6ad4d 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} sp-core = { path = "../../../primitives/core", default-features = false} diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index e8714009c5e80fb1d9016775494a43aac5df1bc2..db2a75e388d5e9e7af4717051f66c909640e5a56 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false} frame-system = { path = "../../system", default-features = false} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index a7084fc6ef9bcb465952ec1203323b3d6db99322..8d65639f835264c1e6220a4e19072cfe913445e0 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -40,4 +40,7 @@ //! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to //! split sections across multiple files. //! +//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be +//! built using only the `frame` umbrella crate. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index b396100912344153aceb7aebdc6596cd5731ba9a..32983a32c4fffabdb561e92659ce3cd878573ad9 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 1d9afdfa60aa4441a4701822066c21711c41eb0c..dec1fe158bd60925389ceef2f069d2c829103b48 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -121,8 +121,8 @@ use frame_support::{ dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::InvalidTransaction, traits::{ - EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, - OnRuntimeUpgrade, + BeforeAllRuntimeMigrations, EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, + OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, }, weights::Weight, }; @@ -139,9 +139,15 @@ use sp_runtime::{ use sp_std::{marker::PhantomData, prelude::*}; #[cfg(feature = "try-runtime")] -use log; -#[cfg(feature = "try-runtime")] -use sp_runtime::TryRuntimeError; +use ::{ + frame_support::{ + traits::{TryDecodeEntireStorage, TryDecodeEntireStorageError, TryState}, + StorageNoopGuard, + }, + frame_try_runtime::{TryStateSelect, UpgradeCheckSelect}, + log, + sp_runtime::TryRuntimeError, +}; #[allow(dead_code)] const LOG_TARGET: &str = "runtime::executive"; @@ -188,6 +194,7 @@ impl< Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade + + BeforeAllRuntimeMigrations + OnInitialize> + OnIdle> + OnFinalize> @@ -225,11 +232,13 @@ impl< Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade + + BeforeAllRuntimeMigrations + OnInitialize> + OnIdle> + OnFinalize> + OffchainWorker> - + frame_support::traits::TryState>, + + TryState> + + TryDecodeEntireStorage, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -308,11 +317,15 @@ where let _guard = frame_support::StorageNoopGuard::default(); , - >>::try_state(*header.number(), select) + >>::try_state(*header.number(), select.clone()) .map_err(|e| { log::error!(target: LOG_TARGET, "failure: {:?}", e); e })?; + if select.any() { + let res = AllPalletsWithSystem::try_decode_entire_state(); + Self::log_decode_result(res)?; + } drop(_guard); // do some of the checks that would normally happen in `final_checks`, but perhaps skip @@ -349,39 +362,65 @@ where Ok(frame_system::Pallet::::block_weight().total()) } - /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. + /// Execute all `OnRuntimeUpgrade` of this runtime. /// - /// Runs the try-state code both before and after the migration function if `checks` is set to - /// `true`. Also, if set to `true`, it runs the `pre_upgrade` and `post_upgrade` hooks. - pub fn try_runtime_upgrade( - checks: frame_try_runtime::UpgradeCheckSelect, - ) -> Result { - if checks.try_state() { - let _guard = frame_support::StorageNoopGuard::default(); - , - >>::try_state( - frame_system::Pallet::::block_number(), - frame_try_runtime::TryStateSelect::All, - )?; - } - - let weight = + /// The `checks` param determines whether to execute `pre/post_upgrade` and `try_state` hooks. + pub fn try_runtime_upgrade(checks: UpgradeCheckSelect) -> Result { + let before_all_weight = + ::before_all_runtime_migrations(); + let try_on_runtime_upgrade_weight = <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::try_on_runtime_upgrade( checks.pre_and_post(), )?; + // Nothing should modify the state after the migrations ran: + let _guard = StorageNoopGuard::default(); + + // The state must be decodable: + if checks.any() { + let res = AllPalletsWithSystem::try_decode_entire_state(); + Self::log_decode_result(res)?; + } + // Check all storage invariants: if checks.try_state() { - let _guard = frame_support::StorageNoopGuard::default(); - , - >>::try_state( + AllPalletsWithSystem::try_state( frame_system::Pallet::::block_number(), - frame_try_runtime::TryStateSelect::All, + TryStateSelect::All, )?; } - Ok(weight) + Ok(before_all_weight.saturating_add(try_on_runtime_upgrade_weight)) + } + + /// Logs the result of trying to decode the entire state. + fn log_decode_result( + res: Result>, + ) -> Result<(), TryRuntimeError> { + match res { + Ok(bytes) => { + log::debug!( + target: LOG_TARGET, + "decoded the entire state ({bytes} bytes)", + ); + + Ok(()) + }, + Err(errors) => { + log::error!( + target: LOG_TARGET, + "`try_decode_entire_state` failed with {} errors", + errors.len(), + ); + + for (i, err) in errors.iter().enumerate() { + // We log the short version to `error` and then the full debug info to `debug`: + log::error!(target: LOG_TARGET, "- {i}. error: {err}"); + log::debug!(target: LOG_TARGET, "- {i}. error: {err:?}"); + } + + Err("`try_decode_entire_state` failed".into()) + }, + } } } @@ -394,6 +433,7 @@ impl< Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade + + BeforeAllRuntimeMigrations + OnInitialize> + OnIdle> + OnFinalize> @@ -410,7 +450,10 @@ where { /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. pub fn execute_on_runtime_upgrade() -> Weight { + let before_all_weight = + ::before_all_runtime_migrations(); <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() + .saturating_add(before_all_weight) } /// Start the execution of a particular block. @@ -444,6 +487,12 @@ where let mut weight = Weight::zero(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); } >::initialize(block_number, parent_hash, digest); weight = weight.saturating_add(::note_finished_initialize(); } - /// Returns if the runtime was upgraded since the last time this function was called. + /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. fn runtime_upgraded() -> bool { let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); - if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::::put( - frame_system::LastRuntimeUpgradeInfo::from(current), - ); - true - } else { - false - } + last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } fn initial_checks(block: &Block) { @@ -712,7 +754,7 @@ mod tests { traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; - use frame_system::{ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; @@ -904,6 +946,7 @@ mod tests { type FreezeIdentifier = (); type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = ConstU32<1>; } @@ -950,6 +993,9 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + Weight::from_parts(100, 0) } } @@ -1312,17 +1358,13 @@ mod tests { new_test_ext(1).execute_with(|| { RuntimeVersionTestValues::mutate(|v| *v = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::::exists()); + assert!(LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { @@ -1332,27 +1374,18 @@ mod tests { } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), + spec_version: 0, impl_version: 2, ..Default::default() } }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::::take(); + LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); }) } @@ -1400,6 +1433,10 @@ mod tests { assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) }); } @@ -1475,6 +1512,9 @@ mod tests { #[test] fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity RuntimeVersionTestValues::mutate(|v| { @@ -1491,6 +1531,10 @@ mod tests { Digest::default(), )); + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + // All weights that show up in the `initialize_block_impl` let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); let runtime_upgrade_weight = diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 85548c6600db827f316549b91c4a120be406745d..2aa2e918f3e3c307ca3b90c78d8b6ada4812e6fe 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -docify = "0.2.4" +docify = "0.2.6" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index 5ec997e8eaa2ae8acd452cdb158799e3dcca7635..851483e3697bfdffe0e9dcabf8f9a04111a25d0b 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -74,9 +74,9 @@ fn setup_staking(v: u32, until: EraIndex) { .collect::>(); for era in 0..=until { - let others = (0..T::MaxBackersPerValidator::get()) + let others = (0..T::Staking::max_exposure_page_size()) .map(|s| { - let who = frame_benchmarking::account::("nominator", era, s); + let who = frame_benchmarking::account::("nominator", era, s.into()); let value = ed; (who, value) }) diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index 2b99ad79a7dfdd16d0aae764c289d36f569011ab..153b6c2c353f6cc40ac0689fc1ea26830653ede9 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -203,10 +203,6 @@ pub mod pallet { /// The weight information of this pallet. type WeightInfo: WeightInfo; - - /// Use only for benchmarking. - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator: Get; } /// The current "head of the queue" being unstaked. diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index cf274c784f9f99b87150c1bde0845f2d867cd3a2..df133bdfd47f6e56b81eca11048d53b14bb51319 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -76,6 +76,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -133,7 +134,7 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type HistoryDepth = ConstU32<84>; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; @@ -174,8 +175,6 @@ impl fast_unstake::Config for Runtime { type BatchSize = BatchSize; type WeightInfo = (); type MaxErasToCheckPerBlock = ConstU32<16>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = ConstU32<128>; } type Block = frame_system::mocking::MockBlock; diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index c1dc926ff5ef3efdb971ac5987cfeaf11c7ed779..368fcab65cc2a74a4ddc042f5e544f00410a93ea 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} @@ -36,6 +36,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "scale-info/std", "sp-core/std", diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index f5e90a17c735ec9f4c0fcb21b35824eea33d1136..344a70becaeb9eb35cb17c640977f4c8401684ab 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -21,9 +21,8 @@ //! //! # Glutton Pallet //! -//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the -//! `Compute` and `Storage` parameters the pallet consumes the adequate amount -//! of weight. +//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the `Compute` and +//! `Storage` parameters the pallet consumes the adequate amount of weight. #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 116e786a6c8918ffefce7606f304b1af5550f141..5eacc21721badedecb02ca5953ecf213bba8ad2a 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 95d1c8aa609495f567e23a0650365c924bad040b..0b9f2b3582792e9d93695d34656912a8d70a131f 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -30,14 +30,13 @@ // Re-export since this is necessary for `impl_apis` in runtime. pub use sp_consensus_grandpa::{ - self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList, + self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, }; -use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::Get, - storage, traits::OneSessionHandler, weights::Weight, WeakBoundedVec, @@ -45,8 +44,8 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_consensus_grandpa::{ - ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, - GRANDPA_ENGINE_ID, RUNTIME_LOG_TARGET as LOG_TARGET, + ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_ENGINE_ID, + RUNTIME_LOG_TARGET as LOG_TARGET, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -75,7 +74,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -145,7 +144,7 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); + Authorities::::put(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); @@ -342,6 +341,11 @@ pub mod pallet { #[pallet::getter(fn session_for_set)] pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + /// The current list of authorities. + #[pallet::storage] + pub(crate) type Authorities = + StorageValue<_, BoundedAuthorityList, ValueQuery>; + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { @@ -354,7 +358,7 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { CurrentSetId::::put(SetId::default()); - Pallet::::initialize(&self.authorities) + Pallet::::initialize(self.authorities.clone()) } } @@ -428,12 +432,7 @@ pub enum StoredState { impl Pallet { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { - storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() - } - - /// Set the current set of authorities, along with their respective weights. - fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); + Authorities::::get().into_inner() } /// Schedule GRANDPA to pause starting in the given number of blocks. @@ -522,10 +521,14 @@ impl Pallet { // Perform module initialization, abstracted so that it can be called either through genesis // config builder or through `on_genesis_session`. - fn initialize(authorities: &AuthorityList) { + fn initialize(authorities: AuthorityList) { if !authorities.is_empty() { assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); - Self::set_grandpa_authorities(authorities); + Authorities::::put( + &BoundedAuthorityList::::try_from(authorities).expect( + "Grandpa: `Config::MaxAuthorities` is smaller than the number of genesis authorities!", + ), + ); } // NOTE: initialize first session of first set. this is necessary for @@ -568,7 +571,7 @@ where I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize(&authorities); + Self::initialize(authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) diff --git a/substrate/frame/grandpa/src/migrations.rs b/substrate/frame/grandpa/src/migrations.rs index 6307cbdd3b05685906b14a3c519325457bf9545e..3a484eb60d284c3988a1c2e1fad49bdc10d03b6a 100644 --- a/substrate/frame/grandpa/src/migrations.rs +++ b/substrate/frame/grandpa/src/migrations.rs @@ -22,8 +22,11 @@ use frame_support::{ use crate::{Config, CurrentSetId, SetIdSession, LOG_TARGET}; +pub use v5::MigrateV4ToV5; + /// Version 4. pub mod v4; +mod v5; /// This migration will clean up all stale set id -> session entries from the /// `SetIdSession` storage map, only the latest `max_set_id_session_entries` diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs new file mode 100644 index 0000000000000000000000000000000000000000..24cfc34104b5f31cca70f93bc53309ee954a2d42 --- /dev/null +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{BoundedAuthorityList, Pallet}; +use codec::Decode; +use frame_support::{ + migrations::VersionedMigration, + storage, + traits::{Get, OnRuntimeUpgrade}, + weights::Weight, +}; +use sp_consensus_grandpa::AuthorityList; +use sp_std::{marker::PhantomData, vec::Vec}; + +const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; + +fn load_authority_list() -> AuthorityList { + storage::unhashed::get_raw(GRANDPA_AUTHORITIES_KEY).map_or_else( + || Vec::new(), + |l| <(u8, AuthorityList)>::decode(&mut &l[..]).unwrap_or_default().1, + ) +} + +/// Actual implementation of [`MigrateV4ToV5`]. +pub struct MigrateImpl(PhantomData); + +impl OnRuntimeUpgrade for MigrateImpl { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + let authority_list_len = load_authority_list().len() as u32; + + if authority_list_len > T::MaxAuthorities::get() { + return Err( + "Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.".into() + ) + } + + if authority_list_len == 0 { + return Err("Grandpa: Authority list is empty!".into()) + } + + Ok(authority_list_len.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let len = u32::decode(&mut &state[..]).unwrap(); + + frame_support::ensure!( + len == crate::Pallet::::grandpa_authorities().len() as u32, + "Grandpa: pre-migrated and post-migrated list should have the same length" + ); + + frame_support::ensure!( + load_authority_list().is_empty(), + "Old authority list shouldn't exist anymore" + ); + + Ok(()) + } + + fn on_runtime_upgrade() -> Weight { + crate::Authorities::::put( + &BoundedAuthorityList::::force_from( + load_authority_list(), + Some("Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.") + ) + ); + + storage::unhashed::kill(GRANDPA_AUTHORITIES_KEY); + + T::DbWeight::get().reads_writes(1, 2) + } +} + +/// Migrate the storage from V4 to V5. +/// +/// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. +pub type MigrateV4ToV5 = + VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index fd4d737dc493faebcb311d8a845029deb3b7e233..9afcec1c797a3440cc8c572a77eb16f34d193aac 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -141,6 +141,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -195,7 +196,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 59d73ee729ee8c9c9b9c04672ba05568986d5987..993d72af6d41062a24362bbbdaf394cb651dd2c7 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -333,7 +333,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -371,7 +371,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, equivocation_validator_id), + Staking::eras_stakers(2, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -385,7 +385,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -417,7 +417,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -450,7 +450,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, equivocation_validator_id), + Staking::eras_stakers(3, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -464,7 +464,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(3, validator), + Staking::eras_stakers(3, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 533e8e68374ee7d2396ec5ddb7cbab39b1f7de0f..309c0aab55003fdaa8ea5d2720481710244ab458 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -31,6 +31,7 @@ sp-core = { path = "../../primitives/core" } default = [ "std" ] std = [ "codec/std", + "enumflags2/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index 4b51d23f6b34f99463e29bb8054628ff62bd2945..3d976bd6c8812d4ccb202aa6537692d5201ef86f 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -22,7 +22,9 @@ use super::*; use crate::Pallet as Identity; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::{ + account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, +}; use frame_support::{ ensure, traits::{EnsureOrigin, Get}, @@ -46,14 +48,7 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { .expect("RegistrarOrigin has no successful origin required for the benchmark"); Identity::::add_registrar(registrar_origin, registrar_lookup)?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; - let fields = - IdentityFields( - IdentityField::Display | - IdentityField::Legal | IdentityField::Web | - IdentityField::Riot | IdentityField::Email | - IdentityField::PgpFingerprint | - IdentityField::Image | IdentityField::Twitter, - ); + let fields = T::IdentityInformation::all_fields(); Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i, fields)?; } @@ -79,7 +74,7 @@ fn create_sub_accounts( // Set identity so `set_subs` does not fail. if IdentityOf::::get(who).is_none() { let _ = T::Currency::make_free_balance_be(who, BalanceOf::::max_value() / 2u32.into()); - let info = create_identity_info::(1); + let info = T::IdentityInformation::create_identity_info(); Identity::::set_identity(who_origin.into(), Box::new(info))?; } @@ -100,128 +95,127 @@ fn add_sub_accounts( Ok(subs) } -// This creates an `IdentityInfo` object with `num_fields` extra fields. -// All data is pre-populated with some arbitrary bytes. -fn create_identity_info(num_fields: u32) -> IdentityInfo { - let data = Data::Raw(vec![0; 32].try_into().unwrap()); +#[benchmarks] +mod benchmarks { + use super::*; - IdentityInfo { - additional: vec![(data.clone(), data.clone()); num_fields as usize].try_into().unwrap(), - display: data.clone(), - legal: data.clone(), - web: data.clone(), - riot: data.clone(), - email: data.clone(), - pgp_fingerprint: Some([0; 20]), - image: data.clone(), - twitter: data, - } -} - -benchmarks! { - add_registrar { - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + #[benchmark] + fn add_registrar(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> { + add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); let origin = T::RegistrarOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); - }: _(origin, account) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, account); + ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); + Ok(()) } - set_identity { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); - let caller = { - // The target user - let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - // Add an initial identity - let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?; - - // User requests judgement from all the registrars, and they approve - for i in 0..r { - let registrar: T::AccountId = account("registrar", i, SEED); - let registrar_lookup = T::Lookup::unlookup(registrar.clone()); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); - let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); - - Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; - Identity::::provide_judgement( - RawOrigin::Signed(registrar).into(), - i, - caller_lookup.clone(), - Judgement::Reasonable, - T::Hashing::hash_of(&initial_info), - )?; - } - caller - }; - }: _(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::(x))) - verify { + #[benchmark] + fn set_identity(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> { + add_registrars::(r)?; + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller_origin: ::RuntimeOrigin = + RawOrigin::Signed(caller.clone()).into(); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + // Add an initial identity + let initial_info = T::IdentityInformation::create_identity_info(); + Identity::::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?; + + // User requests judgement from all the registrars, and they approve + for i in 0..r { + let registrar: T::AccountId = account("registrar", i, SEED); + let _ = T::Lookup::unlookup(registrar.clone()); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); + + Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(registrar).into(), + i, + caller_lookup.clone(), + Judgement::Reasonable, + T::Hashing::hash_of(&initial_info), + )?; + } + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + Box::new(T::IdentityInformation::create_identity_info()), + ); + assert_last_event::(Event::::IdentitySet { who: caller }.into()); + Ok(()) } // We need to split `set_subs` into two benchmarks to accurately isolate the potential // writes caused by new or old sub accounts. The actual weight should simply be // the sum of these two weights. - set_subs_new { + #[benchmark] + fn set_subs_new(s: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); - // Create a new subs vec with s sub accounts - let s in 0 .. T::MaxSubAccounts::get() => (); + + // Create a new subs vec with sub accounts let subs = create_sub_accounts::(&caller, s)?; ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); - }: set_subs(RawOrigin::Signed(caller.clone()), subs) - verify { + + #[extrinsic_call] + set_subs(RawOrigin::Signed(caller.clone()), subs); + ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not added"); + Ok(()) } - set_subs_old { + #[benchmark] + fn set_subs_old(p: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); + // Give them p many previous sub accounts. - let p in 0 .. T::MaxSubAccounts::get() => { - let _ = add_sub_accounts::(&caller, p)?; - }; + let _ = add_sub_accounts::(&caller, p)?; + // Remove all subs. let subs = create_sub_accounts::(&caller, 0)?; - ensure!( - SubsOf::::get(&caller).1.len() as u32 == p, - "Caller does have subs", - ); - }: set_subs(RawOrigin::Signed(caller.clone()), subs) - verify { + ensure!(SubsOf::::get(&caller).1.len() as u32 == p, "Caller does have subs",); + + #[extrinsic_call] + set_subs(RawOrigin::Signed(caller.clone()), subs); + ensure!(SubsOf::::get(&caller).1.len() == 0, "Subs not removed"); + Ok(()) } - clear_identity { + #[benchmark] + fn clear_identity( + r: Linear<1, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get() => { - // Give them s many sub accounts - let caller: T::AccountId = whitelisted_caller(); - let _ = add_sub_accounts::(&caller, s)?; - }; - let x in 0 .. T::MaxAdditionalFields::get(); + // Register the registrars + add_registrars::(r)?; + + // Add sub accounts + let _ = add_sub_accounts::(&caller, s)?; // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let info = T::IdentityInformation::create_identity_info(); Identity::::set_identity(caller_origin.clone(), Box::new(info.clone()))?; // User requests judgement from all the registrars, and they approve for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; @@ -233,111 +227,160 @@ benchmarks! { T::Hashing::hash_of(&info), )?; } + ensure!(IdentityOf::::contains_key(&caller), "Identity does not exist."); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + ensure!(!IdentityOf::::contains_key(&caller), "Identity not cleared."); + Ok(()) } - request_judgement { + #[benchmark] + fn request_judgement(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, Box::new(info))?; - }; - }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) - verify { - assert_last_event::(Event::::JudgementRequested { who: caller, registrar_index: r-1 }.into()); + // Register the registrars + add_registrars::(r)?; + + // Create their main identity with x additional fields + let info = T::IdentityInformation::create_identity_info(); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + Identity::::set_identity(caller_origin.clone(), Box::new(info))?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()); + + assert_last_event::( + Event::::JudgementRequested { who: caller, registrar_index: r - 1 }.into(), + ); + + Ok(()) } - cancel_request { + #[benchmark] + fn cancel_request(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, Box::new(info))?; - }; - - Identity::::request_judgement(caller_origin, r - 1, 10u32.into())?; - }: _(RawOrigin::Signed(caller.clone()), r - 1) - verify { - assert_last_event::(Event::::JudgementUnrequested { who: caller, registrar_index: r-1 }.into()); + // Register the registrars + add_registrars::(r)?; + + // Create their main identity with x additional fields + let info = T::IdentityInformation::create_identity_info(); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + Identity::::set_identity(caller_origin.clone(), Box::new(info))?; + + Identity::::request_judgement(caller_origin.clone(), r - 1, 10u32.into())?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), r - 1); + + assert_last_event::( + Event::::JudgementUnrequested { who: caller, registrar_index: r - 1 }.into(), + ); + + Ok(()) } - set_fee { + #[benchmark] + fn set_fee(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); Identity::::add_registrar(registrar_origin, caller_lookup)?; + let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); - }: _(RawOrigin::Signed(caller), r, 100u32.into()) - verify { - let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), "Fee not changed."); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, 100u32.into()); + + let updated_registrars = Registrars::::get(); + ensure!( + updated_registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), + "Fee not changed." + ); + + Ok(()) } - set_account_id { + #[benchmark] + fn set_account_id(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); Identity::::add_registrar(registrar_origin, caller_lookup)?; + let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); + let new_account = T::Lookup::unlookup(account("new", 0, SEED)); - }: _(RawOrigin::Signed(caller), r, new_account) - verify { - let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, new_account); + + let updated_registrars = Registrars::::get(); + ensure!( + updated_registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), + "id not changed." + ); + + Ok(()) } - set_fields { + #[benchmark] + fn set_fields(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); Identity::::add_registrar(registrar_origin, caller_lookup)?; - let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter - ); - let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fields == Default::default(), "fields already set."); - }: _(RawOrigin::Signed(caller), r, fields) - verify { + let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fields != Default::default(), "fields not set."); + ensure!( + registrars[r as usize].as_ref().unwrap().fields == Default::default(), + "fields already set." + ); + let fields = T::IdentityInformation::all_fields(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, fields); + + let updated_registrars = Registrars::::get(); + ensure!( + updated_registrars[r as usize].as_ref().unwrap().fields != Default::default(), + "fields not set." + ); + + Ok(()) } - provide_judgement { + #[benchmark] + fn provide_judgement( + r: Linear<1, { T::MaxRegistrars::get() - 1 }>, + ) -> Result<(), BenchmarkError> { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); + let user_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -345,10 +388,9 @@ benchmarks! { let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); + add_registrars::(r)?; - let info = create_identity_info::(x); + let info = T::IdentityInformation::create_identity_info(); let info_hash = T::Hashing::hash_of(&info); Identity::::set_identity(user_origin.clone(), Box::new(info))?; @@ -356,29 +398,38 @@ benchmarks! { .expect("RegistrarOrigin has no successful origin required for the benchmark"); Identity::::add_registrar(registrar_origin, caller_lookup)?; Identity::::request_judgement(user_origin, r, 10u32.into())?; - }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash) - verify { - assert_last_event::(Event::::JudgementGiven { target: user, registrar_index: r }.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash); + + assert_last_event::( + Event::::JudgementGiven { target: user, registrar_index: r }.into(), + ); + + Ok(()) } - kill_identity { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get(); - let x in 0 .. T::MaxAdditionalFields::get(); + #[benchmark] + fn kill_identity( + r: Linear<1, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { + add_registrars::(r)?; let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::RuntimeOrigin = + RawOrigin::Signed(target.clone()).into(); let target_lookup = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); - let info = create_identity_info::(x); + let info = T::IdentityInformation::create_identity_info(); Identity::::set_identity(target_origin.clone(), Box::new(info.clone()))?; let _ = add_sub_accounts::(&target, s)?; // User requests judgement from all the registrars, and they approve for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); Identity::::request_judgement(target_origin.clone(), i, 10u32.into())?; @@ -390,62 +441,86 @@ benchmarks! { T::Hashing::hash_of(&info), )?; } + ensure!(IdentityOf::::contains_key(&target), "Identity not set"); + let origin = T::ForceOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, target_lookup) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target_lookup); + ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); - } - add_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + Ok(()) + } + #[benchmark] + fn add_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; let sub = account("new_sub", 0, SEED); let data = Data::Raw(vec![0; 32].try_into().unwrap()); + ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not set."); - }: _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data); + ensure!(SubsOf::::get(&caller).1.len() as u32 == s + 1, "Subs not added."); - } - rename_sub { - let s in 1 .. T::MaxSubAccounts::get(); + Ok(()) + } + #[benchmark] + fn rename_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); let data = Data::Raw(vec![1; 32].try_into().unwrap()); + ensure!(SuperOf::::get(&sub).unwrap().1 != data, "data already set"); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone()); + ensure!(SuperOf::::get(&sub).unwrap().1 == data, "data not set"); - } - remove_sub { - let s in 1 .. T::MaxSubAccounts::get(); + Ok(()) + } + #[benchmark] + fn remove_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); ensure!(SuperOf::::contains_key(&sub), "Sub doesn't exists"); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone())); + ensure!(!SuperOf::::contains_key(&sub), "Sub not removed"); - } - quit_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + Ok(()) + } + #[benchmark] + fn quit_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let sup = account("super", 0, SEED); let _ = add_sub_accounts::(&sup, s)?; let sup_origin = RawOrigin::Signed(sup).into(); - Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32].try_into().unwrap()))?; + Identity::::add_sub( + sup_origin, + T::Lookup::unlookup(caller.clone()), + Data::Raw(vec![0; 32].try_into().unwrap()), + )?; ensure!(SuperOf::::contains_key(&caller), "Sub doesn't exists"); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + ensure!(!SuperOf::::contains_key(&caller), "Sub not removed"); + + Ok(()) } impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/substrate/frame/identity/src/legacy.rs b/substrate/frame/identity/src/legacy.rs new file mode 100644 index 0000000000000000000000000000000000000000..a7953f7e1786949cacb27a6af13e70ad8c8737c5 --- /dev/null +++ b/substrate/frame/identity/src/legacy.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Decode, Encode, MaxEncodedLen}; +#[cfg(feature = "runtime-benchmarks")] +use enumflags2::BitFlag; +use enumflags2::{bitflags, BitFlags}; +use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use scale_info::{build::Variants, Path, Type, TypeInfo}; +use sp_runtime::{BoundedVec, RuntimeDebug}; +use sp_std::prelude::*; + +use crate::types::{Data, IdentityInformationProvider}; + +/// The fields that we use to identify the owner of an account with. Each corresponds to a field +/// in the `IdentityInfo` struct. +#[bitflags] +#[repr(u64)] +#[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug)] +pub enum IdentityField { + Display, + Legal, + Web, + Riot, + Email, + PgpFingerprint, + Image, + Twitter, +} + +impl TypeInfo for IdentityField { + type Identity = Self; + + fn type_info() -> scale_info::Type { + Type::builder().path(Path::new("IdentityField", module_path!())).variant( + Variants::new() + .variant("Display", |v| v.index(0)) + .variant("Legal", |v| v.index(1)) + .variant("Web", |v| v.index(2)) + .variant("Riot", |v| v.index(3)) + .variant("Email", |v| v.index(4)) + .variant("PgpFingerprint", |v| v.index(5)) + .variant("Image", |v| v.index(6)) + .variant("Twitter", |v| v.index(7)), + ) + } +} + +/// Information concerning the identity of the controller of an account. +/// +/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra +/// fields in a backwards compatible way through a specialized `Decode` impl. +#[derive( + CloneNoBound, + Encode, + Decode, + EqNoBound, + MaxEncodedLen, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, +)] +#[codec(mel_bound())] +#[cfg_attr(test, derive(frame_support::DefaultNoBound))] +#[scale_info(skip_type_params(FieldLimit))] +pub struct IdentityInfo> { + /// Additional fields of the identity that are not catered for with the struct's explicit + /// fields. + pub additional: BoundedVec<(Data, Data), FieldLimit>, + + /// A reasonable display name for the controller of the account. This should be whatever it is + /// that it is typically known as and should not be confusable with other entities, given + /// reasonable context. + /// + /// Stored as UTF-8. + pub display: Data, + + /// The full legal name in the local jurisdiction of the entity. This might be a bit + /// long-winded. + /// + /// Stored as UTF-8. + pub legal: Data, + + /// A representative website held by the controller of the account. + /// + /// NOTE: `https://` is automatically prepended. + /// + /// Stored as UTF-8. + pub web: Data, + + /// The Riot/Matrix handle held by the controller of the account. + /// + /// Stored as UTF-8. + pub riot: Data, + + /// The email address of the controller of the account. + /// + /// Stored as UTF-8. + pub email: Data, + + /// The PGP/GPG public key of the controller of the account. + pub pgp_fingerprint: Option<[u8; 20]>, + + /// A graphic image representing the controller of the account. Should be a company, + /// organization or project logo or a headshot in the case of a human. + pub image: Data, + + /// The Twitter identity. The leading `@` character may be elided. + pub twitter: Data, +} + +impl + 'static> IdentityInformationProvider for IdentityInfo { + type FieldsIdentifier = u64; + + fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool { + self.fields().bits() & fields == fields + } + + #[cfg(feature = "runtime-benchmarks")] + fn create_identity_info() -> Self { + let data = Data::Raw(vec![0; 32].try_into().unwrap()); + + IdentityInfo { + additional: vec![(data.clone(), data.clone()); FieldLimit::get().try_into().unwrap()] + .try_into() + .unwrap(), + display: data.clone(), + legal: data.clone(), + web: data.clone(), + riot: data.clone(), + email: data.clone(), + pgp_fingerprint: Some([0; 20]), + image: data.clone(), + twitter: data, + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn all_fields() -> Self::FieldsIdentifier { + IdentityField::all().bits() + } +} + +impl> IdentityInfo { + pub(crate) fn fields(&self) -> BitFlags { + let mut res = >::empty(); + if !self.display.is_none() { + res.insert(IdentityField::Display); + } + if !self.legal.is_none() { + res.insert(IdentityField::Legal); + } + if !self.web.is_none() { + res.insert(IdentityField::Web); + } + if !self.riot.is_none() { + res.insert(IdentityField::Riot); + } + if !self.email.is_none() { + res.insert(IdentityField::Email); + } + if self.pgp_fingerprint.is_some() { + res.insert(IdentityField::PgpFingerprint); + } + if !self.image.is_none() { + res.insert(IdentityField::Image); + } + if !self.twitter.is_none() { + res.insert(IdentityField::Twitter); + } + res + } +} diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index f192ee2b461b3f6309fb4001079efd9f80344121..133f9eeb4befcb6a004dfc4792e1227d027d1602 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -73,20 +73,25 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod legacy; #[cfg(test)] mod tests; mod types; pub mod weights; -use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; +use codec::Encode; +use frame_support::{ + ensure, + pallet_prelude::{DispatchError, DispatchResult}, + traits::{BalanceStatus, Currency, Get, OnUnbalanced, ReservableCurrency}, +}; use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; pub use types::{ - Data, IdentityField, IdentityFields, IdentityInfo, Judgement, RegistrarIndex, RegistrarInfo, - Registration, + Data, IdentityInformationProvider, Judgement, RegistrarIndex, RegistrarInfo, Registration, }; type BalanceOf = @@ -114,9 +119,9 @@ pub mod pallet { #[pallet::constant] type BasicDeposit: Get>; - /// The amount held on deposit per additional field for a registered identity. + /// The amount held on deposit per encoded byte for a registered identity. #[pallet::constant] - type FieldDeposit: Get>; + type ByteDeposit: Get>; /// The amount held on deposit for a registered subaccount. This should account for the fact /// that one storage item's value will increase by the size of an account ID, and there will @@ -128,10 +133,8 @@ pub mod pallet { #[pallet::constant] type MaxSubAccounts: Get; - /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O - /// required to access an identity, but can be pretty high. - #[pallet::constant] - type MaxAdditionalFields: Get; + /// Structure holding information about an identity. + type IdentityInformation: IdentityInformationProvider; /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. @@ -163,7 +166,7 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - Registration, T::MaxRegistrars, T::MaxAdditionalFields>, + Registration, T::MaxRegistrars, T::IdentityInformation>, OptionQuery, >; @@ -197,7 +200,16 @@ pub mod pallet { #[pallet::getter(fn registrars)] pub(super) type Registrars = StorageValue< _, - BoundedVec, T::AccountId>>, T::MaxRegistrars>, + BoundedVec< + Option< + RegistrarInfo< + BalanceOf, + T::AccountId, + ::FieldsIdentifier, + >, + >, + T::MaxRegistrars, + >, ValueQuery, >; @@ -225,8 +237,6 @@ pub mod pallet { InvalidIndex, /// The target is invalid. InvalidTarget, - /// Too many additional fields. - TooManyFields, /// Maximum amount of registrars reached. Cannot add any more. TooManyRegistrars, /// Account ID is already named. @@ -277,9 +287,6 @@ pub mod pallet { /// - `account`: the account of the registrar. /// /// Emits `RegistrarAdded` if successful. - /// - /// ## Complexity - /// - `O(R)` where `R` registrar-count (governance-bounded and code-bounded). #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] pub fn add_registrar( @@ -317,24 +324,16 @@ pub mod pallet { /// - `info`: The identity information. /// /// Emits `IdentitySet` if successful. - /// - /// ## Complexity - /// - `O(X + X' + R)` - /// - where `X` additional-field-count (deposit-bounded and code-bounded) - /// - where `R` judgements-count (registrar-count-bounded) #[pallet::call_index(1)] - #[pallet::weight( T::WeightInfo::set_identity( - T::MaxRegistrars::get(), // R - T::MaxAdditionalFields::get(), // X - ))] + #[pallet::weight(T::WeightInfo::set_identity(T::MaxRegistrars::get()))] pub fn set_identity( origin: OriginFor, - info: Box>, + info: Box, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let extra_fields = info.additional.len() as u32; - ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); - let fd = >::from(extra_fields) * T::FieldDeposit::get(); + let encoded_byte_size = info.encoded_size() as u32; + let byte_deposit = + T::ByteDeposit::get().saturating_mul(>::from(encoded_byte_size)); let mut id = match >::get(&sender) { Some(mut id) => { @@ -351,7 +350,7 @@ pub mod pallet { }; let old_deposit = id.deposit; - id.deposit = T::BasicDeposit::get() + fd; + id.deposit = T::BasicDeposit::get().saturating_add(byte_deposit); if id.deposit > old_deposit { T::Currency::reserve(&sender, id.deposit - old_deposit)?; } @@ -364,11 +363,7 @@ pub mod pallet { >::insert(&sender, id); Self::deposit_event(Event::IdentitySet { who: sender }); - Ok(Some(T::WeightInfo::set_identity( - judgements as u32, // R - extra_fields, // X - )) - .into()) + Ok(Some(T::WeightInfo::set_identity(judgements as u32)).into()) } /// Set the sub-accounts of the sender. @@ -380,11 +375,6 @@ pub mod pallet { /// identity. /// /// - `subs`: The identity's (new) sub-accounts. - /// - /// ## Complexity - /// - `O(P + S)` - /// - where `P` old-subs-count (hard- and deposit-bounded). - /// - where `S` subs-count (hard- and deposit-bounded). // TODO: This whole extrinsic screams "not optimized". For example we could // filter any overlap between new and old subs, and avoid reading/writing // to those values... We could also ideally avoid needing to write to @@ -392,8 +382,8 @@ pub mod pallet { // is a large overestimate due to the fact that it could potentially write // to 2 x T::MaxSubAccounts::get(). #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. - .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. + #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) + .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) )] pub fn set_subs( origin: OriginFor, @@ -407,7 +397,7 @@ pub mod pallet { ); let (old_deposit, old_ids) = >::get(&sender); - let new_deposit = T::SubAccountDeposit::get() * >::from(subs.len() as u32); + let new_deposit = Self::subs_deposit(subs.len() as u32); let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| i.0 == sender); @@ -453,24 +443,17 @@ pub mod pallet { /// identity. /// /// Emits `IdentityCleared` if successful. - /// - /// ## Complexity - /// - `O(R + S + X)` - /// - where `R` registrar-count (governance-bounded). - /// - where `S` subs-count (hard- and deposit-bounded). - /// - where `X` additional-field-count (deposit-bounded and code-bounded). #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::clear_identity( - T::MaxRegistrars::get(), // R - T::MaxSubAccounts::get(), // S - T::MaxAdditionalFields::get(), // X + T::MaxRegistrars::get(), + T::MaxSubAccounts::get(), ))] pub fn clear_identity(origin: OriginFor) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let (subs_deposit, sub_ids) = >::take(&sender); let id = >::take(&sender).ok_or(Error::::NotNamed)?; - let deposit = id.total_deposit() + subs_deposit; + let deposit = id.total_deposit().saturating_add(subs_deposit); for sub in sub_ids.iter() { >::remove(sub); } @@ -480,10 +463,10 @@ pub mod pallet { Self::deposit_event(Event::IdentityCleared { who: sender, deposit }); + #[allow(deprecated)] Ok(Some(T::WeightInfo::clear_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32, // X + id.judgements.len() as u32, + sub_ids.len() as u32, )) .into()) } @@ -504,16 +487,8 @@ pub mod pallet { /// ``` /// /// Emits `JudgementRequested` if successful. - /// - /// ## Complexity - /// - `O(R + X)`. - /// - where `R` registrar-count (governance-bounded). - /// - where `X` additional-field-count (deposit-bounded and code-bounded). #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::request_judgement( - T::MaxRegistrars::get(), // R - T::MaxAdditionalFields::get(), // X - ))] + #[pallet::weight(T::WeightInfo::request_judgement(T::MaxRegistrars::get(),))] pub fn request_judgement( origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, @@ -543,7 +518,6 @@ pub mod pallet { T::Currency::reserve(&sender, registrar.fee)?; let judgements = id.judgements.len(); - let extra_fields = id.info.additional.len(); >::insert(&sender, id); Self::deposit_event(Event::JudgementRequested { @@ -551,8 +525,7 @@ pub mod pallet { registrar_index: reg_index, }); - Ok(Some(T::WeightInfo::request_judgement(judgements as u32, extra_fields as u32)) - .into()) + Ok(Some(T::WeightInfo::request_judgement(judgements as u32)).into()) } /// Cancel a previous request. @@ -565,16 +538,8 @@ pub mod pallet { /// - `reg_index`: The index of the registrar whose judgement is no longer requested. /// /// Emits `JudgementUnrequested` if successful. - /// - /// ## Complexity - /// - `O(R + X)`. - /// - where `R` registrar-count (governance-bounded). - /// - where `X` additional-field-count (deposit-bounded and code-bounded). #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::cancel_request( - T::MaxRegistrars::get(), // R - T::MaxAdditionalFields::get(), // X - ))] + #[pallet::weight(T::WeightInfo::cancel_request(T::MaxRegistrars::get()))] pub fn cancel_request( origin: OriginFor, reg_index: RegistrarIndex, @@ -595,7 +560,6 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&sender, fee); debug_assert!(err_amount.is_zero()); let judgements = id.judgements.len(); - let extra_fields = id.info.additional.len(); >::insert(&sender, id); Self::deposit_event(Event::JudgementUnrequested { @@ -603,7 +567,7 @@ pub mod pallet { registrar_index: reg_index, }); - Ok(Some(T::WeightInfo::cancel_request(judgements as u32, extra_fields as u32)).into()) + Ok(Some(T::WeightInfo::cancel_request(judgements as u32)).into()) } /// Set the fee required for a judgement to be requested from a registrar. @@ -613,12 +577,8 @@ pub mod pallet { /// /// - `index`: the index of the registrar whose fee is to be set. /// - `fee`: the new fee. - /// - /// ## Complexity - /// - `O(R)`. - /// - where `R` registrar-count (governance-bounded). #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] // R + #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] pub fn set_fee( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, @@ -640,7 +600,7 @@ pub mod pallet { .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; - Ok(Some(T::WeightInfo::set_fee(registrars as u32)).into()) // R + Ok(Some(T::WeightInfo::set_fee(registrars as u32)).into()) } /// Change the account associated with a registrar. @@ -650,12 +610,8 @@ pub mod pallet { /// /// - `index`: the index of the registrar whose fee is to be set. /// - `new`: the new account ID. - /// - /// ## Complexity - /// - `O(R)`. - /// - where `R` registrar-count (governance-bounded). #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] // R + #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] pub fn set_account_id( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, @@ -678,7 +634,7 @@ pub mod pallet { .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; - Ok(Some(T::WeightInfo::set_account_id(registrars as u32)).into()) // R + Ok(Some(T::WeightInfo::set_account_id(registrars as u32)).into()) } /// Set the field information for a registrar. @@ -688,37 +644,27 @@ pub mod pallet { /// /// - `index`: the index of the registrar whose fee is to be set. /// - `fields`: the fields that the registrar concerns themselves with. - /// - /// ## Complexity - /// - `O(R)`. - /// - where `R` registrar-count (governance-bounded). #[pallet::call_index(8)] - #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] // R + #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] pub fn set_fields( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, - fields: IdentityFields, + fields: ::FieldsIdentifier, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let registrars = >::mutate(|rs| -> Result { - rs.get_mut(index as usize) - .and_then(|x| x.as_mut()) - .and_then(|r| { - if r.account == who { - r.fields = fields; - Some(()) - } else { - None - } - }) - .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; - Ok(rs.len()) - })?; - Ok(Some(T::WeightInfo::set_fields( - registrars as u32, // R - )) - .into()) + let registrars = + >::mutate(|registrars| -> Result { + let registrar = registrars + .get_mut(index as usize) + .and_then(|r| r.as_mut()) + .filter(|r| r.account == who) + .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; + registrar.fields = fields; + + Ok(registrars.len()) + })?; + Ok(Some(T::WeightInfo::set_fields(registrars as u32)).into()) } /// Provide a judgement for an account's identity. @@ -730,19 +676,12 @@ pub mod pallet { /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. - /// - `identity`: The hash of the [`IdentityInfo`] for that the judgement is provided. + /// - `identity`: The hash of the [`IdentityInformationProvider`] for that the judgement is + /// provided. /// /// Emits `JudgementGiven` if successful. - /// - /// ## Complexity - /// - `O(R + X)`. - /// - where `R` registrar-count (governance-bounded). - /// - where `X` additional-field-count (deposit-bounded and code-bounded). #[pallet::call_index(9)] - #[pallet::weight(T::WeightInfo::provide_judgement( - T::MaxRegistrars::get(), // R - T::MaxAdditionalFields::get(), // X - ))] + #[pallet::weight(T::WeightInfo::provide_judgement(T::MaxRegistrars::get()))] pub fn provide_judgement( origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, @@ -785,12 +724,10 @@ pub mod pallet { } let judgements = id.judgements.len(); - let extra_fields = id.info.additional.len(); >::insert(&target, id); Self::deposit_event(Event::JudgementGiven { target, registrar_index: reg_index }); - Ok(Some(T::WeightInfo::provide_judgement(judgements as u32, extra_fields as u32)) - .into()) + Ok(Some(T::WeightInfo::provide_judgement(judgements as u32)).into()) } /// Remove an account's identity and sub-account information and slash the deposits. @@ -805,17 +742,10 @@ pub mod pallet { /// with a registered identity. /// /// Emits `IdentityKilled` if successful. - /// - /// ## Complexity - /// - `O(R + S + X)` - /// - where `R` registrar-count (governance-bounded). - /// - where `S` subs-count (hard- and deposit-bounded). - /// - where `X` additional-field-count (deposit-bounded and code-bounded). #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::kill_identity( - T::MaxRegistrars::get(), // R - T::MaxSubAccounts::get(), // S - T::MaxAdditionalFields::get(), // X + T::MaxRegistrars::get(), + T::MaxSubAccounts::get(), ))] pub fn kill_identity( origin: OriginFor, @@ -828,7 +758,7 @@ pub mod pallet { // Grab their deposit (and check that they have one). let (subs_deposit, sub_ids) = >::take(&target); let id = >::take(&target).ok_or(Error::::NotNamed)?; - let deposit = id.total_deposit() + subs_deposit; + let deposit = id.total_deposit().saturating_add(subs_deposit); for sub in sub_ids.iter() { >::remove(sub); } @@ -837,12 +767,9 @@ pub mod pallet { Self::deposit_event(Event::IdentityKilled { who: target, deposit }); - Ok(Some(T::WeightInfo::kill_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32, // X - )) - .into()) + #[allow(deprecated)] + Ok(Some(T::WeightInfo::kill_identity(id.judgements.len() as u32, sub_ids.len() as u32)) + .into()) } /// Add the given account to the sender's subs. @@ -972,9 +899,138 @@ impl Pallet { .collect() } + /// Calculate the deposit required for a number of `sub` accounts. + fn subs_deposit(subs: u32) -> BalanceOf { + T::SubAccountDeposit::get().saturating_mul(>::from(subs)) + } + + /// Take the `current` deposit that `who` is holding, and update it to a `new` one. + fn rejig_deposit( + who: &T::AccountId, + current: BalanceOf, + new: BalanceOf, + ) -> DispatchResult { + if new > current { + T::Currency::reserve(who, new - current)?; + } else if new < current { + let err_amount = T::Currency::unreserve(who, current - new); + debug_assert!(err_amount.is_zero()); + } + Ok(()) + } + /// Check if the account has corresponding identity information by the identity field. - pub fn has_identity(who: &T::AccountId, fields: u64) -> bool { + pub fn has_identity( + who: &T::AccountId, + fields: ::FieldsIdentifier, + ) -> bool { IdentityOf::::get(who) - .map_or(false, |registration| (registration.info.fields().0.bits() & fields) == fields) + .map_or(false, |registration| (registration.info.has_identity(fields))) + } + + /// Reap an identity, clearing associated storage items and refunding any deposits. This + /// function is very similar to (a) `clear_identity`, but called on a `target` account instead + /// of self; and (b) `kill_identity`, but without imposing a slash. + /// + /// Parameters: + /// - `target`: The account for which to reap identity state. + /// + /// Return type is a tuple of the number of registrars, `IdentityInfo` bytes, and sub accounts, + /// respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn reap_identity(who: &T::AccountId) -> Result<(u32, u32, u32), DispatchError> { + // `take` any storage items keyed by `target` + // identity + let id = >::take(&who).ok_or(Error::::NotNamed)?; + let registrars = id.judgements.len() as u32; + let encoded_byte_size = id.info.encoded_size() as u32; + + // subs + let (subs_deposit, sub_ids) = >::take(&who); + let actual_subs = sub_ids.len() as u32; + for sub in sub_ids.iter() { + >::remove(sub); + } + + // unreserve any deposits + let deposit = id.total_deposit().saturating_add(subs_deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); + Ok((registrars, encoded_byte_size, actual_subs)) + } + + /// Update the deposits held by `target` for its identity info. + /// + /// Parameters: + /// - `target`: The account for which to update deposits. + /// + /// Return type is a tuple of the new Identity and Subs deposits, respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn poke_deposit( + target: &T::AccountId, + ) -> Result<(BalanceOf, BalanceOf), DispatchError> { + // Identity Deposit + let new_id_deposit = IdentityOf::::try_mutate( + &target, + |registration| -> Result, DispatchError> { + let reg = registration.as_mut().ok_or(Error::::NoIdentity)?; + // Calculate what deposit should be + let encoded_byte_size = reg.info.encoded_size() as u32; + let byte_deposit = + T::ByteDeposit::get().saturating_mul(>::from(encoded_byte_size)); + let new_id_deposit = T::BasicDeposit::get().saturating_add(byte_deposit); + + // Update account + Self::rejig_deposit(&target, reg.deposit, new_id_deposit)?; + + reg.deposit = new_id_deposit; + Ok(new_id_deposit) + }, + )?; + + // Subs Deposit + let new_subs_deposit = SubsOf::::try_mutate( + &target, + |(current_subs_deposit, subs_of)| -> Result, DispatchError> { + let new_subs_deposit = Self::subs_deposit(subs_of.len() as u32); + Self::rejig_deposit(&target, *current_subs_deposit, new_subs_deposit)?; + *current_subs_deposit = new_subs_deposit; + Ok(new_subs_deposit) + }, + )?; + Ok((new_id_deposit, new_subs_deposit)) + } + + /// Set an identity with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_identity_no_deposit( + who: &T::AccountId, + info: T::IdentityInformation, + ) -> DispatchResult { + IdentityOf::::insert( + &who, + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: info.clone(), + }, + ); + Ok(()) + } + + /// Set subs with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_sub_no_deposit(who: &T::AccountId, sub: T::AccountId) -> DispatchResult { + use frame_support::BoundedVec; + let subs = BoundedVec::<_, T::MaxSubAccounts>::try_from(vec![sub]).unwrap(); + SubsOf::::insert::< + &T::AccountId, + (BalanceOf, BoundedVec), + >(&who, (Zero::zero(), subs)); + Ok(()) } } diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 1532980574c2a58cb37f81807f306ac6f85ae96d..78074df933a74429cc678c281f795ed62f5fc115 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -18,12 +18,15 @@ // Tests for Identity Pallet use super::*; -use crate as pallet_identity; +use crate::{ + self as pallet_identity, + legacy::{IdentityField, IdentityInfo}, +}; use codec::{Decode, Encode}; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64, EitherOfDiverse}, + traits::{ConstU32, ConstU64, EitherOfDiverse, Get}, BoundedVec, }; use frame_system::{EnsureRoot, EnsureSignedBy}; @@ -83,6 +86,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -101,11 +105,11 @@ impl pallet_identity::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type Slashed = (); - type BasicDeposit = ConstU64<10>; - type FieldDeposit = ConstU64<10>; - type SubAccountDeposit = ConstU64<10>; + type BasicDeposit = ConstU64<100>; + type ByteDeposit = ConstU64<10>; + type SubAccountDeposit = ConstU64<100>; type MaxSubAccounts = ConstU32<2>; - type MaxAdditionalFields = MaxAdditionalFields; + type IdentityInformation = IdentityInfo; type MaxRegistrars = MaxRegistrars; type RegistrarOrigin = EnsureOneOrRoot; type ForceOrigin = EnsureTwoOrRoot; @@ -115,7 +119,7 @@ impl pallet_identity::Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], + balances: vec![(1, 100), (2, 100), (3, 100), (10, 1000), (20, 1000), (30, 1000)], } .assimilate_storage(&mut t) .unwrap(); @@ -138,6 +142,47 @@ fn twenty() -> IdentityInfo { } } +fn id_deposit(id: &IdentityInfo) -> u64 { + let base_deposit: u64 = <::BasicDeposit as Get>::get(); + let byte_deposit: u64 = <::ByteDeposit as Get>::get() * + TryInto::::try_into(id.encoded_size()).unwrap(); + base_deposit + byte_deposit +} + +#[test] +fn identity_fields_repr_works() { + // `IdentityField` sanity checks. + assert_eq!(IdentityField::Display as u64, 1 << 0); + assert_eq!(IdentityField::Legal as u64, 1 << 1); + assert_eq!(IdentityField::Web as u64, 1 << 2); + assert_eq!(IdentityField::Riot as u64, 1 << 3); + assert_eq!(IdentityField::Email as u64, 1 << 4); + assert_eq!(IdentityField::PgpFingerprint as u64, 1 << 5); + assert_eq!(IdentityField::Image as u64, 1 << 6); + assert_eq!(IdentityField::Twitter as u64, 1 << 7); + + let fields = IdentityField::Legal | + IdentityField::Web | + IdentityField::Riot | + IdentityField::PgpFingerprint | + IdentityField::Twitter; + + assert!(!fields.contains(IdentityField::Display)); + assert!(fields.contains(IdentityField::Legal)); + assert!(fields.contains(IdentityField::Web)); + assert!(fields.contains(IdentityField::Riot)); + assert!(!fields.contains(IdentityField::Email)); + assert!(fields.contains(IdentityField::PgpFingerprint)); + assert!(!fields.contains(IdentityField::Image)); + assert!(fields.contains(IdentityField::Twitter)); + + // Ensure that the `u64` representation matches what we expect. + assert_eq!( + fields.bits(), + 0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_10101110 + ); +} + #[test] fn editing_subaccounts_should_work() { new_test_ext().execute_with(|| { @@ -148,18 +193,23 @@ fn editing_subaccounts_should_work() { Error::::NoIdentity ); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); + let id_deposit = id_deposit(&ten); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); + + let sub_deposit: u64 = <::SubAccountDeposit as Get>::get(); // first sub account assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); - assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - sub_deposit); // second sub account assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 2, data(2))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 70); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 2 * sub_deposit); // third sub account is too many assert_noop!( @@ -171,20 +221,20 @@ fn editing_subaccounts_should_work() { assert_ok!(Identity::rename_sub(RuntimeOrigin::signed(10), 1, data(11))); assert_eq!(SuperOf::::get(1), Some((10, data(11)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 70); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 2 * sub_deposit); // remove first sub account assert_ok!(Identity::remove_sub(RuntimeOrigin::signed(10), 1)); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - sub_deposit); // add third sub account assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 3, data(3))); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(SuperOf::::get(3), Some((10, data(3)))); - assert_eq!(Balances::free_balance(10), 70); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 2 * sub_deposit); }); } @@ -192,15 +242,22 @@ fn editing_subaccounts_should_work() { fn resolving_subaccount_ownership_works() { new_test_ext().execute_with(|| { let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); + let sub_deposit: u64 = <::SubAccountDeposit as Get>::get(); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(20), Box::new(twenty()))); + let ten = ten(); + let ten_deposit = id_deposit(&ten); + let twenty = twenty(); + let twenty_deposit = id_deposit(&twenty); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten))); + assert_eq!(Balances::free_balance(10), 1000 - ten_deposit); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(20), Box::new(twenty))); + assert_eq!(Balances::free_balance(20), 1000 - twenty_deposit); // 10 claims 1 as a subaccount assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Balances::reserved_balance(10), 20); + assert_eq!(Balances::free_balance(1), 100); + assert_eq!(Balances::free_balance(10), 1000 - ten_deposit - sub_deposit); + assert_eq!(Balances::reserved_balance(10), ten_deposit + sub_deposit); // 20 cannot claim 1 now assert_noop!( Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1)), @@ -209,9 +266,9 @@ fn resolving_subaccount_ownership_works() { // 1 wants to be with 20 so it quits from 10 assert_ok!(Identity::quit_sub(RuntimeOrigin::signed(1))); // 1 gets the 10 that 10 paid. - assert_eq!(Balances::free_balance(1), 20); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Balances::reserved_balance(10), 10); + assert_eq!(Balances::free_balance(1), 100 + sub_deposit); + assert_eq!(Balances::free_balance(10), 1000 - ten_deposit - sub_deposit); + assert_eq!(Balances::reserved_balance(10), ten_deposit); // 20 can claim 1 now assert_ok!(Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1))); }); @@ -227,16 +284,29 @@ fn trailing_zeros_decodes_into_default_data() { assert_eq!(b, Data::None); } +#[test] +fn adding_registrar_invalid_index() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + let fields = IdentityField::Display | IdentityField::Legal; + assert_noop!( + Identity::set_fields(RuntimeOrigin::signed(3), 100, fields.bits()), + Error::::InvalidIndex + ); + }); +} + #[test] fn adding_registrar_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); - assert_ok!(Identity::set_fields(RuntimeOrigin::signed(3), 0, fields)); + let fields = IdentityField::Display | IdentityField::Legal; + assert_ok!(Identity::set_fields(RuntimeOrigin::signed(3), 0, fields.bits())); assert_eq!( Identity::registrars(), - vec![Some(RegistrarInfo { account: 3, fee: 10, fields })] + vec![Some(RegistrarInfo { account: 3, fee: 10, fields: fields.bits() })] ); }); } @@ -264,11 +334,13 @@ fn registration_should_work() { three_fields.additional.try_push(Default::default()).unwrap(); three_fields.additional.try_push(Default::default()).unwrap(); assert!(three_fields.additional.try_push(Default::default()).is_err()); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_eq!(Identity::identity(10).unwrap().info, ten()); - assert_eq!(Balances::free_balance(10), 90); + let ten = ten(); + let id_deposit = id_deposit(&ten); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); + assert_eq!(Identity::identity(10).unwrap().info, ten); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); + assert_eq!(Balances::free_balance(10), 1000); assert_noop!(Identity::clear_identity(RuntimeOrigin::signed(10)), Error::::NotNamed); }); } @@ -365,11 +437,13 @@ fn clearing_judgement_should_work() { #[test] fn killing_slashing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + let id_deposit = id_deposit(&ten); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten))); assert_noop!(Identity::kill_identity(RuntimeOrigin::signed(1), 10), BadOrigin); assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); assert_eq!(Identity::identity(10), None); - assert_eq!(Balances::free_balance(10), 90); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_noop!( Identity::kill_identity(RuntimeOrigin::signed(2), 10), Error::::NotNamed @@ -380,38 +454,43 @@ fn killing_slashing_should_work() { #[test] fn setting_subaccounts_should_work() { new_test_ext().execute_with(|| { + let ten = ten(); + let id_deposit = id_deposit(&ten); + let sub_deposit: u64 = <::SubAccountDeposit as Get>::get(); let mut subs = vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))]; assert_noop!( Identity::set_subs(RuntimeOrigin::signed(10), subs.clone()), Error::::NotFound ); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Identity::subs_of(10), (10, vec![20].try_into().unwrap())); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - sub_deposit); + assert_eq!(Identity::subs_of(10), (sub_deposit, vec![20].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); // push another item and re-set it. subs.push((30, Data::Raw(vec![50; 1].try_into().unwrap()))); assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); - assert_eq!(Identity::subs_of(10), (20, vec![20, 30].try_into().unwrap())); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 2 * sub_deposit); + assert_eq!(Identity::subs_of(10), (2 * sub_deposit, vec![20, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1].try_into().unwrap())))); // switch out one of the items and re-set. subs[0] = (40, Data::Raw(vec![60; 1].try_into().unwrap())); assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); // no change in the balance - assert_eq!(Identity::subs_of(10), (20, vec![40, 30].try_into().unwrap())); + // no change in the balance + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 2 * sub_deposit); + assert_eq!(Identity::subs_of(10), (2 * sub_deposit, vec![40, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), None); assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1].try_into().unwrap())))); assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1].try_into().unwrap())))); // clear assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), vec![])); - assert_eq!(Balances::free_balance(10), 90); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_eq!(Identity::subs_of(10), (0, BoundedVec::default())); assert_eq!(Identity::super_of(30), None); assert_eq!(Identity::super_of(40), None); @@ -427,13 +506,15 @@ fn setting_subaccounts_should_work() { #[test] fn clearing_account_should_remove_subaccounts_and_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit(&ten)); assert_ok!(Identity::set_subs( RuntimeOrigin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); + assert_eq!(Balances::free_balance(10), 1000); assert!(Identity::super_of(20).is_none()); }); } @@ -441,13 +522,18 @@ fn clearing_account_should_remove_subaccounts_and_refund() { #[test] fn killing_account_should_remove_subaccounts_and_not_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + let id_deposit = id_deposit(&ten); + let sub_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_ok!(Identity::set_subs( RuntimeOrigin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - sub_deposit); assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); - assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - sub_deposit); assert!(Identity::super_of(20).is_none()); }); } @@ -461,10 +547,12 @@ fn cancelling_requested_judgement_should_work() { Identity::cancel_request(RuntimeOrigin::signed(10), 0), Error::::NoIdentity ); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit(&ten)); assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); assert_ok!(Identity::cancel_request(RuntimeOrigin::signed(10), 0)); - assert_eq!(Balances::free_balance(10), 90); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit(&ten)); assert_noop!( Identity::cancel_request(RuntimeOrigin::signed(10), 0), Error::::NotFound @@ -475,7 +563,7 @@ fn cancelling_requested_judgement_should_work() { 0, 10, Judgement::Reasonable, - BlakeTwo256::hash_of(&ten()) + BlakeTwo256::hash_of(&ten) )); assert_noop!( Identity::cancel_request(RuntimeOrigin::signed(10), 0), @@ -489,14 +577,17 @@ fn requesting_judgement_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + let ten = ten(); + let id_deposit = id_deposit(&ten); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); assert_noop!( Identity::request_judgement(RuntimeOrigin::signed(10), 0, 9), Error::::FeeChanged ); assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); - // 10 for the judgement request, 10 for the identity. - assert_eq!(Balances::free_balance(10), 80); + // 10 for the judgement request and the deposit for the identity. + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 10); // Re-requesting won't work as we already paid. assert_noop!( @@ -508,10 +599,11 @@ fn requesting_judgement_should_work() { 0, 10, Judgement::Erroneous, - BlakeTwo256::hash_of(&ten()) + BlakeTwo256::hash_of(&ten) )); // Registrar got their payment now. - assert_eq!(Balances::free_balance(3), 20); + // 100 initial balance and 10 for the judgement. + assert_eq!(Balances::free_balance(3), 100 + 10); // Re-requesting still won't work as it's erroneous. assert_noop!( @@ -529,7 +621,7 @@ fn requesting_judgement_should_work() { 0, 10, Judgement::OutOfDate, - BlakeTwo256::hash_of(&ten()) + BlakeTwo256::hash_of(&ten) )); assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); }); @@ -538,12 +630,14 @@ fn requesting_judgement_should_work() { #[test] fn provide_judgement_should_return_judgement_payment_failed_error() { new_test_ext().execute_with(|| { + let ten = ten(); + let id_deposit = id_deposit(&ten); assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten.clone()))); assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); - // 10 for the judgement request, 10 for the identity. - assert_eq!(Balances::free_balance(10), 80); + // 10 for the judgement request and the deposit for the identity. + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - 10); // This forces judgement payment failed error Balances::make_free_balance_be(&3, 0); @@ -553,7 +647,7 @@ fn provide_judgement_should_return_judgement_payment_failed_error() { 0, 10, Judgement::Erroneous, - BlakeTwo256::hash_of(&ten()) + BlakeTwo256::hash_of(&ten) ), Error::::JudgementPaymentFailed ); @@ -565,25 +659,24 @@ fn field_deposit_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity( - RuntimeOrigin::signed(10), - Box::new(IdentityInfo { - additional: vec![ - ( - Data::Raw(b"number".to_vec().try_into().unwrap()), - Data::Raw(10u32.encode().try_into().unwrap()) - ), - ( - Data::Raw(b"text".to_vec().try_into().unwrap()), - Data::Raw(b"10".to_vec().try_into().unwrap()) - ), - ] - .try_into() - .unwrap(), - ..Default::default() - }) - )); - assert_eq!(Balances::free_balance(10), 70); + let id = IdentityInfo { + additional: vec![ + ( + Data::Raw(b"number".to_vec().try_into().unwrap()), + Data::Raw(10u32.encode().try_into().unwrap()), + ), + ( + Data::Raw(b"text".to_vec().try_into().unwrap()), + Data::Raw(b"10".to_vec().try_into().unwrap()), + ), + ] + .try_into() + .unwrap(), + ..Default::default() + }; + let id_deposit = id_deposit(&id); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(id))); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit); }); } @@ -619,3 +712,70 @@ fn test_has_identity() { )); }); } + +#[test] +fn reap_identity_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten_info.clone()))); + assert_ok!(Identity::set_subs( + RuntimeOrigin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); + // deposit is correct + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // reap + assert_ok!(Identity::reap_identity(&10)); + // no identity or subs + assert!(Identity::identity(10).is_none()); + assert!(Identity::super_of(20).is_none()); + // balance is unreserved + assert_eq!(Balances::free_balance(10), 1000); + }); +} + +#[test] +fn poke_deposit_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + // Set a custom registration with 0 deposit + IdentityOf::::insert( + &10, + Registration { + judgements: BoundedVec::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, + ); + assert!(Identity::identity(10).is_some()); + // Set a sub with zero deposit + SubsOf::::insert::<&u64, (u64, BoundedVec>)>( + &10, + (0, vec![20].try_into().unwrap()), + ); + SuperOf::::insert(&20, (&10, Data::Raw(vec![1; 1].try_into().unwrap()))); + // Balance is free + assert_eq!(Balances::free_balance(10), 1000); + + // poke + assert_ok!(Identity::poke_deposit(&10)); + + // free balance reduced correctly + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // new registration deposit is 10 + assert_eq!( + Identity::identity(&10), + Some(Registration { + judgements: BoundedVec::default(), + deposit: id_deposit, + info: ten() + }) + ); + // new subs deposit is 10 vvvvvvvvvvvv + assert_eq!(Identity::subs_of(10), (subs_deposit, vec![20].try_into().unwrap())); + }); +} diff --git a/substrate/frame/identity/src/types.rs b/substrate/frame/identity/src/types.rs index 1837b30b027f9eca4327919c81c34917ab168f3b..d3e6bf3973f0b3130df4217097ac96015f2e83b3 100644 --- a/substrate/frame/identity/src/types.rs +++ b/substrate/frame/identity/src/types.rs @@ -17,18 +17,23 @@ use super::*; use codec::{Decode, Encode, MaxEncodedLen}; -use enumflags2::{bitflags, BitFlags}; use frame_support::{ traits::{ConstU32, Get}, BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::{ build::{Fields, Variants}, - meta_type, Path, Type, TypeInfo, TypeParameter, + Path, Type, TypeInfo, +}; +use sp_runtime::{ + traits::{Member, Zero}, + RuntimeDebug, }; -use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*}; +/// An identifier for a single name registrar/identity verification service. +pub type RegistrarIndex = u32; + /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. /// @@ -180,9 +185,6 @@ impl Default for Data { } } -/// An identifier for a single name registrar/identity verification service. -pub type RegistrarIndex = u32; - /// An attestation of a registrar over how accurate some `IdentityInfo` is in describing an account. /// /// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear @@ -228,143 +230,26 @@ impl` that implements `Codec`. -#[derive(Clone, Copy, PartialEq, Default, RuntimeDebug)] -pub struct IdentityFields(pub BitFlags); - -impl MaxEncodedLen for IdentityFields { - fn max_encoded_len() -> usize { - u64::max_encoded_len() - } -} - -impl Eq for IdentityFields {} -impl Encode for IdentityFields { - fn using_encoded R>(&self, f: F) -> R { - self.0.bits().using_encoded(f) - } -} -impl Decode for IdentityFields { - fn decode(input: &mut I) -> sp_std::result::Result { - let field = u64::decode(input)?; - Ok(Self(>::from_bits(field as u64).map_err(|_| "invalid value")?)) - } -} -impl TypeInfo for IdentityFields { - type Identity = Self; +/// Information concerning the identity of the controller of an account. +pub trait IdentityInformationProvider: + Encode + Decode + MaxEncodedLen + Clone + Debug + Eq + PartialEq + TypeInfo +{ + /// Type capable of holding information on which identity fields are set. + type FieldsIdentifier: Member + Encode + Decode + MaxEncodedLen + TypeInfo + Default; - fn type_info() -> Type { - Type::builder() - .path(Path::new("BitFlags", module_path!())) - .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) - .composite(Fields::unnamed().field(|f| f.ty::().type_name("IdentityField"))) - } -} + /// Check if an identity registered information for some given `fields`. + fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool; -/// Information concerning the identity of the controller of an account. -/// -/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra -/// fields in a backwards compatible way through a specialized `Decode` impl. -#[derive( - CloneNoBound, Encode, Decode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, -)] -#[codec(mel_bound())] -#[cfg_attr(test, derive(frame_support::DefaultNoBound))] -#[scale_info(skip_type_params(FieldLimit))] -pub struct IdentityInfo> { - /// Additional fields of the identity that are not catered for with the struct's explicit - /// fields. - pub additional: BoundedVec<(Data, Data), FieldLimit>, - - /// A reasonable display name for the controller of the account. This should be whatever it is - /// that it is typically known as and should not be confusable with other entities, given - /// reasonable context. - /// - /// Stored as UTF-8. - pub display: Data, - - /// The full legal name in the local jurisdiction of the entity. This might be a bit - /// long-winded. - /// - /// Stored as UTF-8. - pub legal: Data, - - /// A representative website held by the controller of the account. - /// - /// NOTE: `https://` is automatically prepended. - /// - /// Stored as UTF-8. - pub web: Data, - - /// The Riot/Matrix handle held by the controller of the account. - /// - /// Stored as UTF-8. - pub riot: Data, - - /// The email address of the controller of the account. - /// - /// Stored as UTF-8. - pub email: Data, - - /// The PGP/GPG public key of the controller of the account. - pub pgp_fingerprint: Option<[u8; 20]>, - - /// A graphic image representing the controller of the account. Should be a company, - /// organization or project logo or a headshot in the case of a human. - pub image: Data, - - /// The Twitter identity. The leading `@` character may be elided. - pub twitter: Data, -} + /// Create a basic instance of the identity information. + #[cfg(feature = "runtime-benchmarks")] + fn create_identity_info() -> Self; -impl> IdentityInfo { - pub(crate) fn fields(&self) -> IdentityFields { - let mut res = >::empty(); - if !self.display.is_none() { - res.insert(IdentityField::Display); - } - if !self.legal.is_none() { - res.insert(IdentityField::Legal); - } - if !self.web.is_none() { - res.insert(IdentityField::Web); - } - if !self.riot.is_none() { - res.insert(IdentityField::Riot); - } - if !self.email.is_none() { - res.insert(IdentityField::Email); - } - if self.pgp_fingerprint.is_some() { - res.insert(IdentityField::PgpFingerprint); - } - if !self.image.is_none() { - res.insert(IdentityField::Image); - } - if !self.twitter.is_none() { - res.insert(IdentityField::Twitter); - } - IdentityFields(res) - } + /// The identity information representation for all identity fields enabled. + #[cfg(feature = "runtime-benchmarks")] + fn all_fields() -> Self::FieldsIdentifier; } -/// Information concerning the identity of the controller of an account. +/// Information on an identity along with judgements from registrars. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a /// backwards compatible way through a specialized `Decode` impl. @@ -372,11 +257,11 @@ impl> IdentityInfo { CloneNoBound, Encode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] #[codec(mel_bound())] -#[scale_info(skip_type_params(MaxJudgements, MaxAdditionalFields))] +#[scale_info(skip_type_params(MaxJudgements))] pub struct Registration< Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, MaxJudgements: Get, - MaxAdditionalFields: Get, + IdentityInfo: IdentityInformationProvider, > { /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There /// may be only a single judgement from each registrar. @@ -386,14 +271,14 @@ pub struct Registration< pub deposit: Balance, /// Information on the identity. - pub info: IdentityInfo, + pub info: IdentityInfo, } impl< Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, MaxJudgements: Get, - MaxAdditionalFields: Get, - > Registration + IdentityInfo: IdentityInformationProvider, + > Registration { pub(crate) fn total_deposit(&self) -> Balance { self.deposit + @@ -407,8 +292,8 @@ impl< impl< Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, MaxJudgements: Get, - MaxAdditionalFields: Get, - > Decode for Registration + IdentityInfo: IdentityInformationProvider, + > Decode for Registration { fn decode(input: &mut I) -> sp_std::result::Result { let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; @@ -421,6 +306,7 @@ impl< pub struct RegistrarInfo< Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, + IdField: Encode + Decode + Clone + Debug + Default + Eq + PartialEq + TypeInfo + MaxEncodedLen, > { /// The account of the registrar. pub account: AccountId, @@ -430,7 +316,7 @@ pub struct RegistrarInfo< /// Relevant fields for this registrar. Registrar judgements are limited to attestations on /// these fields. - pub fields: IdentityFields, + pub fields: IdField, } #[cfg(test)] diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index 02fcd7db3c95371ee32e050f37c2db7a3490063a..95898e6c6cdff0a18ace79b1311d82bb4109e89b 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -53,17 +53,17 @@ use core::marker::PhantomData; /// Weight functions needed for pallet_identity. pub trait WeightInfo { fn add_registrar(r: u32, ) -> Weight; - fn set_identity(r: u32, x: u32, ) -> Weight; + fn set_identity(r: u32, ) -> Weight; fn set_subs_new(s: u32, ) -> Weight; fn set_subs_old(p: u32, ) -> Weight; - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; - fn request_judgement(r: u32, x: u32, ) -> Weight; - fn cancel_request(r: u32, x: u32, ) -> Weight; + fn clear_identity(r: u32, s: u32, ) -> Weight; + fn request_judgement(r: u32, ) -> Weight; + fn cancel_request(r: u32, ) -> Weight; fn set_fee(r: u32, ) -> Weight; fn set_account_id(r: u32, ) -> Weight; fn set_fields(r: u32, ) -> Weight; - fn provide_judgement(r: u32, x: u32, ) -> Weight; - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn provide_judgement(r: u32, ) -> Weight; + fn kill_identity(r: u32, s: u32, ) -> Weight; fn add_sub(s: u32, ) -> Weight; fn rename_sub(s: u32, ) -> Weight; fn remove_sub(s: u32, ) -> Weight; @@ -90,8 +90,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` @@ -99,8 +98,6 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(31_329_634, 11003) // Standard Error: 4_496 .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) - // Standard Error: 877 - .saturating_add(Weight::from_parts(429_346, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,8 +149,7 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -163,8 +159,6 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) // Standard Error: 1_937 .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(247_578, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -174,8 +168,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn request_judgement(r: u32, x: u32, ) -> Weight { + fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -183,16 +176,13 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(32_207_018, 11003) // Standard Error: 5_247 .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) - // Standard Error: 1_023 - .saturating_add(Weight::from_parts(458_329, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` @@ -200,8 +190,6 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(31_967_170, 11003) // Standard Error: 5_387 .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) - // Standard Error: 1_051 - .saturating_add(Weight::from_parts(446_213, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -252,8 +240,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. - fn provide_judgement(r: u32, x: u32, ) -> Weight { + fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -261,8 +248,6 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(17_817_684, 11003) // Standard Error: 8_612 .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) - // Standard Error: 1_593 - .saturating_add(Weight::from_parts(755_225, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -276,8 +261,7 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -287,8 +271,6 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) // Standard Error: 2_472 .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(240_907, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -384,8 +366,7 @@ impl WeightInfo for () { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` @@ -393,8 +374,6 @@ impl WeightInfo for () { Weight::from_parts(31_329_634, 11003) // Standard Error: 4_496 .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) - // Standard Error: 877 - .saturating_add(Weight::from_parts(429_346, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -446,8 +425,7 @@ impl WeightInfo for () { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -457,8 +435,6 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) // Standard Error: 1_937 .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(247_578, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -468,8 +444,7 @@ impl WeightInfo for () { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn request_judgement(r: u32, x: u32, ) -> Weight { + fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -477,16 +452,13 @@ impl WeightInfo for () { Weight::from_parts(32_207_018, 11003) // Standard Error: 5_247 .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) - // Standard Error: 1_023 - .saturating_add(Weight::from_parts(458_329, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` @@ -494,8 +466,6 @@ impl WeightInfo for () { Weight::from_parts(31_967_170, 11003) // Standard Error: 5_387 .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) - // Standard Error: 1_051 - .saturating_add(Weight::from_parts(446_213, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -546,8 +516,7 @@ impl WeightInfo for () { /// Storage: Identity IdentityOf (r:1 w:1) /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. - fn provide_judgement(r: u32, x: u32, ) -> Weight { + fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -555,8 +524,6 @@ impl WeightInfo for () { Weight::from_parts(17_817_684, 11003) // Standard Error: 8_612 .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) - // Standard Error: 1_593 - .saturating_add(Weight::from_parts(755_225, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -570,8 +537,7 @@ impl WeightInfo for () { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` @@ -581,8 +547,6 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) // Standard Error: 2_472 .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(240_907, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 5f612c229f077eea56834366265c387afcf39ff8..d83ff540648aaff9205e81893873ce5441bd86d7 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 2018c7a063e79048852546359f202db957065c81..d392522718a5b724fb89a2a7c9ee844758595d5b 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index d63081e0b73f8d3425f0a017cb7544705065d433..7dc6730d34e5ae3f78fbed511adf0bd336787092 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -74,6 +74,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index 57d4da45268d20fd208cb6f6c308701c44a43f13..07c5e3997d2fb608e96462ed441a0871968a7798 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 6b6109fbc53d56acf9952055292f5c4c06caee55..a4942abf243ca7d43fb80e1fadfc7772f9ba332b 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index aefb6a1cce2bfc7de0677513c0ef55217e4fdd8c..e50ec3441b2e5e15e84ad33bd6a3cce267a3dfea 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -86,6 +86,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 52d8ee560f73ee26fcb872ab378799ac5fd7025d..18c771bf72c7e8fa0a7ba76bea93a2d048c75021 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 3a21f5bc86f154c9f11b8b9a032be7e648416c8d..2c30af43b67de254a6b377fded141fee5a608049 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/merkle-mountain-range/src/mock.rs b/substrate/frame/merkle-mountain-range/src/mock.rs index ecc254278bf0fa1661d57e1fd2f1b97edbf181c1..d3cb4e57029727f395dcafb263a2033c5291dbe9 100644 --- a/substrate/frame/merkle-mountain-range/src/mock.rs +++ b/substrate/frame/merkle-mountain-range/src/mock.rs @@ -19,13 +19,9 @@ use crate as pallet_mmr; use crate::*; use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - traits::{ConstU32, ConstU64}, -}; -use sp_core::H256; +use frame_support::{derive_impl, parameter_types}; use sp_mmr_primitives::{Compact, LeafDataProvider}; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Keccak256}; +use sp_runtime::traits::Keccak256; type Block = frame_system::mocking::MockBlock; @@ -37,30 +33,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = sp_core::sr25519::Public; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl Config for Test { diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 19ea25198f3489d5e6e8d9baf970bbef376bceae..48304cd882c19e53eca51f6b2821657672f150f6 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -10,7 +10,7 @@ description = "FRAME pallet to queue and process messages" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } @@ -37,6 +37,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-arithmetic/std", "sp-core/std", diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index a1003edf3c92f20e57142ecb978cc62cfef28a28..965b96a99ca522b11336b9469b6cfde04787ad8f 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -17,6 +17,13 @@ //! Stress tests pallet-message-queue. Defines its own runtime config to use larger constants for //! `HeapSize` and `MaxStale`. +//! +//! The tests in this file are ignored by default, since they are quite slow. You can run them +//! manually like this: +//! +//! ```sh +//! RUST_LOG=info cargo test -p pallet-message-queue --profile testnet -- --ignored +//! ``` #![cfg(test)] @@ -96,9 +103,6 @@ impl Config for Test { /// Simulates heavy usage by enqueueing and processing large amounts of messages. /// -/// Best to run with `RUST_LOG=info RUSTFLAGS='-Cdebug-assertions=y' cargo test -r -p -/// pallet-message-queue -- --ignored`. -/// /// # Example output /// /// ```pre @@ -121,7 +125,7 @@ fn stress_test_enqueue_and_service() { let max_queues = 10_000; let max_messages_per_queue = 10_000; let max_msg_len = MaxMessageLenOf::::get(); - let mut rng = StdRng::seed_from_u64(42); + let mut rng = StdRng::seed_from_u64(43); build_and_execute::(|| { let mut msgs_remaining = 0; @@ -145,9 +149,6 @@ fn stress_test_enqueue_and_service() { /// Simulates heavy usage of the suspension logic via `Yield`. /// -/// Best to run with `RUST_LOG=info RUSTFLAGS='-Cdebug-assertions=y' cargo test -r -p -/// pallet-message-queue -- --ignored`. -/// /// # Example output /// /// ```pre @@ -169,7 +170,7 @@ fn stress_test_queue_suspension() { let max_messages_per_queue = 10_000; let (max_suspend_per_block, max_resume_per_block) = (100, 50); let max_msg_len = MaxMessageLenOf::::get(); - let mut rng = StdRng::seed_from_u64(41); + let mut rng = StdRng::seed_from_u64(43); build_and_execute::(|| { let mut suspended = BTreeSet::::new(); diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 7c38dec4b08052f0847204447938bbdac725e422..12d289478b37c5a0eb81aeb5f673a6d34d8dcf4f 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -195,7 +195,7 @@ use frame_support::{ pallet_prelude::*, traits::{ DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, - ProcessMessageError, QueuePausedQuery, ServiceQueues, + ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -423,14 +423,23 @@ impl Default for BookState { } } +impl From> for QueueFootprint { + fn from(book: BookState) -> Self { + QueueFootprint { + pages: book.count, + storage: Footprint { count: book.message_count, size: book.size }, + } + } +} + /// Handler code for when the items in a queue change. pub trait OnQueueChanged { /// Note that the queue `id` now has `item_count` items in it, taking up `items_size` bytes. - fn on_queue_changed(id: Id, items_count: u64, items_size: u64); + fn on_queue_changed(id: Id, fp: QueueFootprint); } impl OnQueueChanged for () { - fn on_queue_changed(_: Id, _: u64, _: u64) {} + fn on_queue_changed(_: Id, _: QueueFootprint) {} } #[frame_support::pallet] @@ -584,8 +593,9 @@ pub mod pallet { } /// Check all compile-time assumptions about [`crate::Config`]. + #[cfg(test)] fn integrity_test() { - assert!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); + Self::do_integrity_test().expect("Pallet config is valid; qed") } } @@ -759,6 +769,47 @@ impl Pallet { } } + /// The maximal weight that a single message can consume. + /// + /// Any message using more than this will be marked as permanently overweight and not + /// automatically re-attempted. Returns `None` if the servicing of a message cannot begin. + /// `Some(0)` means that only messages with no weight may be served. + fn max_message_weight(limit: Weight) -> Option { + limit.checked_sub(&Self::single_msg_overhead()) + } + + /// The overhead of servicing a single message. + fn single_msg_overhead() -> Weight { + T::WeightInfo::bump_service_head() + .saturating_add(T::WeightInfo::service_queue_base()) + .saturating_add( + T::WeightInfo::service_page_base_completion() + .max(T::WeightInfo::service_page_base_no_completion()), + ) + .saturating_add(T::WeightInfo::service_page_item()) + .saturating_add(T::WeightInfo::ready_ring_unknit()) + } + + /// Checks invariants of the pallet config. + /// + /// The results of this can only be relied upon if the config values are set to constants. + #[cfg(test)] + fn do_integrity_test() -> Result<(), String> { + ensure!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); + + if let Some(service) = T::ServiceWeight::get() { + if Self::max_message_weight(service).is_none() { + return Err(format!( + "ServiceWeight too low: {}. Must be at least {}", + service, + Self::single_msg_overhead(), + )) + } + } + + Ok(()) + } + fn do_enqueue_message( origin: &MessageOriginOf, message: BoundedSlice>, @@ -865,11 +916,7 @@ impl Pallet { T::WeightInfo::execute_overweight_page_updated() }; BookStateFor::::insert(&origin, &book_state); - T::QueueChangeHandler::on_queue_changed( - origin, - book_state.message_count, - book_state.size, - ); + T::QueueChangeHandler::on_queue_changed(origin, book_state.into()); Ok(weight_counter.consumed().saturating_add(page_weight)) }, } @@ -934,11 +981,7 @@ impl Pallet { book_state.message_count.saturating_reduce(page.remaining.into() as u64); book_state.size.saturating_reduce(page.remaining_size.into() as u64); BookStateFor::::insert(origin, &book_state); - T::QueueChangeHandler::on_queue_changed( - origin.clone(), - book_state.message_count, - book_state.size, - ); + T::QueueChangeHandler::on_queue_changed(origin.clone(), book_state.into()); Self::deposit_event(Event::PageReaped { origin: origin.clone(), index: page_index }); Ok(()) @@ -993,11 +1036,7 @@ impl Pallet { } BookStateFor::::insert(&origin, &book_state); if total_processed > 0 { - T::QueueChangeHandler::on_queue_changed( - origin, - book_state.message_count, - book_state.size, - ); + T::QueueChangeHandler::on_queue_changed(origin, book_state.into()); } (total_processed > 0, next_ready) } @@ -1360,10 +1399,14 @@ impl ServiceQueues for Pallet { type OverweightMessageAddress = (MessageOriginOf, PageIndex, T::Size); fn service_queues(weight_limit: Weight) -> Weight { - // The maximum weight that processing a single message may take. - let overweight_limit = weight_limit; let mut weight = WeightMeter::with_limit(weight_limit); + // Get the maximum weight that processing a single message may take: + let max_weight = Self::max_message_weight(weight_limit).unwrap_or_else(|| { + defensive!("Not enough weight to service a single message."); + Weight::zero() + }); + let mut next = match Self::bump_service_head(&mut weight) { Some(h) => h, None => return weight.consumed(), @@ -1374,7 +1417,7 @@ impl ServiceQueues for Pallet { let mut last_no_progress = None; loop { - let (progressed, n) = Self::service_queue(next.clone(), &mut weight, overweight_limit); + let (progressed, n) = Self::service_queue(next.clone(), &mut weight, max_weight); next = match n { Some(n) => if !progressed { @@ -1436,7 +1479,7 @@ impl EnqueueMessage> for Pallet { ) { Self::do_enqueue_message(&origin, message); let book_state = BookStateFor::::get(&origin); - T::QueueChangeHandler::on_queue_changed(origin, book_state.message_count, book_state.size); + T::QueueChangeHandler::on_queue_changed(origin, book_state.into()); } fn enqueue_messages<'a>( @@ -1447,7 +1490,7 @@ impl EnqueueMessage> for Pallet { Self::do_enqueue_message(&origin, message); } let book_state = BookStateFor::::get(&origin); - T::QueueChangeHandler::on_queue_changed(origin, book_state.message_count, book_state.size); + T::QueueChangeHandler::on_queue_changed(origin, book_state.into()); } fn sweep_queue(origin: MessageOriginOf) { @@ -1462,8 +1505,7 @@ impl EnqueueMessage> for Pallet { BookStateFor::::insert(&origin, &book_state); } - fn footprint(origin: MessageOriginOf) -> Footprint { - let book_state = BookStateFor::::get(&origin); - Footprint { count: book_state.message_count, size: book_state.size } + fn footprint(origin: MessageOriginOf) -> QueueFootprint { + BookStateFor::::get(&origin).into() } } diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 473c5faac4c5d045cc18cca43b88afd11325c2d0..55a6457435423f541b8f53c4cf003dadfd67bacf 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -71,7 +71,7 @@ impl frame_system::Config for Test { parameter_types! { pub const HeapSize: u32 = 24; pub const MaxStale: u32 = 2; - pub const ServiceWeight: Option = Some(Weight::from_parts(10, 10)); + pub const ServiceWeight: Option = Some(Weight::from_parts(100, 100)); } impl Config for Test { type RuntimeEvent = RuntimeEvent; @@ -91,6 +91,7 @@ pub struct MockedWeightInfo; parameter_types! { /// Storage for `MockedWeightInfo`, do not use directly. pub static WeightForCall: BTreeMap = Default::default(); + pub static DefaultWeightForCall: Weight = Weight::zero(); } /// Set the return value for a function from the `WeightInfo` trait. @@ -111,40 +112,55 @@ impl crate::weights::WeightInfo for MockedWeightInfo { WeightForCall::get() .get("execute_overweight_page_updated") .copied() - .unwrap_or_default() + .unwrap_or(DefaultWeightForCall::get()) } fn execute_overweight_page_removed() -> Weight { WeightForCall::get() .get("execute_overweight_page_removed") .copied() - .unwrap_or_default() + .unwrap_or(DefaultWeightForCall::get()) } fn service_page_base_completion() -> Weight { WeightForCall::get() .get("service_page_base_completion") .copied() - .unwrap_or_default() + .unwrap_or(DefaultWeightForCall::get()) } fn service_page_base_no_completion() -> Weight { WeightForCall::get() .get("service_page_base_no_completion") .copied() - .unwrap_or_default() + .unwrap_or(DefaultWeightForCall::get()) } fn service_queue_base() -> Weight { - WeightForCall::get().get("service_queue_base").copied().unwrap_or_default() + WeightForCall::get() + .get("service_queue_base") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } fn bump_service_head() -> Weight { - WeightForCall::get().get("bump_service_head").copied().unwrap_or_default() + WeightForCall::get() + .get("bump_service_head") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } fn service_page_item() -> Weight { - WeightForCall::get().get("service_page_item").copied().unwrap_or_default() + WeightForCall::get() + .get("service_page_item") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } fn ready_ring_knit() -> Weight { - WeightForCall::get().get("ready_ring_knit").copied().unwrap_or_default() + WeightForCall::get() + .get("ready_ring_knit") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } fn ready_ring_unknit() -> Weight { - WeightForCall::get().get("ready_ring_unknit").copied().unwrap_or_default() + WeightForCall::get() + .get("ready_ring_unknit") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } } @@ -262,8 +278,8 @@ parameter_types! { /// Records all queue changes into [`QueueChanges`]. pub struct RecordingQueueChangeHandler; impl OnQueueChanged for RecordingQueueChangeHandler { - fn on_queue_changed(id: MessageOrigin, items_count: u64, items_size: u64) { - QueueChanges::mutate(|cs| cs.push((id, items_count, items_size))); + fn on_queue_changed(id: MessageOrigin, fp: QueueFootprint) { + QueueChanges::mutate(|cs| cs.push((id, fp.storage.count, fp.storage.size))); } } @@ -350,3 +366,7 @@ pub fn num_overweight_enqueued_events() -> u32 { }) .count() as u32 } + +pub fn fp(pages: u32, count: u64, size: u64) -> QueueFootprint { + QueueFootprint { storage: Footprint { count, size }, pages } +} diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index 092bd1d833450502f383e30fedca033d28a22179..d94ad581ea0d53a4ee445df25b806dd3ac3417fb 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -49,6 +49,7 @@ fn enqueue_within_one_page_works() { MessageQueue::enqueue_message(msg("c"), Here); assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(b"a".to_vec(), Here), (b"b".to_vec(), Here)]); + assert_eq!(MessageQueue::footprint(Here).pages, 1); assert_eq!(MessageQueue::service_queues(2.into_weight()), 1.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(b"c".to_vec(), Here)]); @@ -266,6 +267,44 @@ fn service_queues_suspension_works() { }); } +#[test] +#[cfg(debug_assertions)] +#[should_panic(expected = "Not enough weight to service a single message.")] +fn service_queues_low_weight_defensive() { + use MessageOrigin::*; + build_and_execute::(|| { + DefaultWeightForCall::set(21.into()); + // Check that the integrity test would catch this: + assert!(MessageQueue::do_integrity_test().is_err()); + + MessageQueue::enqueue_message(msg("weight=0"), Here); + MessageQueue::service_queues(104.into_weight()); + }); +} + +/// Regression test for . +#[test] +fn service_queues_regression_1873() { + use MessageOrigin::*; + build_and_execute::(|| { + DefaultWeightForCall::set(20.into()); + + MessageQueue::enqueue_message(msg("weight=100"), Here); + assert_eq!(MessageQueue::service_queues(100.into_weight()), 100.into()); + + // Before the MQ this would not emit any events: + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"weight=100"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + }); +} + #[test] fn reap_page_permanent_overweight_works() { use MessageOrigin::*; @@ -276,6 +315,7 @@ fn reap_page_permanent_overweight_works() { MessageQueue::enqueue_message(msg("weight=2"), Here); } assert_eq!(Pages::::iter().count(), n); + assert_eq!(MessageQueue::footprint(Here).pages, n as u32); assert_eq!(QueueChanges::take().len(), n); // Mark all pages as stale since their message is permanently overweight. MessageQueue::service_queues(1.into_weight()); @@ -301,6 +341,7 @@ fn reap_page_permanent_overweight_works() { assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); assert!(QueueChanges::take().is_empty()); } + assert_eq!(MessageQueue::footprint(Here).pages, 3); }); } @@ -984,8 +1025,9 @@ fn footprint_works() { BookStateFor::::insert(origin, book); let info = MessageQueue::footprint(origin); - assert_eq!(info.count as usize, msgs); - assert_eq!(info.size, page.remaining_size as u64); + assert_eq!(info.storage.count as usize, msgs); + assert_eq!(info.storage.size, page.remaining_size as u64); + assert_eq!(info.pages, 1); // Sweeping a queue never calls OnQueueChanged. assert!(QueueChanges::take().is_empty()); @@ -1006,16 +1048,44 @@ fn footprint_invalid_works() { fn footprint_on_swept_works() { use MessageOrigin::*; build_and_execute::(|| { - let mut book = empty_book::(); - book.message_count = 3; - book.size = 10; - BookStateFor::::insert(Here, &book); - knit(&Here); + build_ring::(&[Here]); MessageQueue::sweep_queue(Here); let fp = MessageQueue::footprint(Here); - assert_eq!(fp.count, 3); - assert_eq!(fp.size, 10); + assert_eq!((1, 1, 1), (fp.storage.count, fp.storage.size, fp.pages)); + }) +} + +/// The number of reported pages takes overweight pages into account. +#[test] +fn footprint_num_pages_works() { + use MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("weight=2"), Here); + MessageQueue::enqueue_message(msg("weight=3"), Here); + + assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + + // Mark the messages as overweight. + assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); + assert_eq!(System::events().len(), 2); + // Overweight does not change the footprint. + assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + + // Now execute the second message. + assert_eq!( + ::execute_overweight(3.into_weight(), (Here, 1, 0)) + .unwrap(), + 3.into_weight() + ); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 8)); + // And the first one: + assert_eq!( + ::execute_overweight(2.into_weight(), (Here, 0, 0)) + .unwrap(), + 2.into_weight() + ); + assert_eq!(MessageQueue::footprint(Here), Default::default()); }) } @@ -1105,6 +1175,7 @@ fn permanently_overweight_book_unknits() { assert_ring(&[]); assert_eq!(MessagesProcessed::take().len(), 0); assert_eq!(BookStateFor::::get(Here).message_count, 1); + assert_eq!(MessageQueue::footprint(Here).pages, 1); // Now if we enqueue another message, it will become ready again. MessageQueue::enqueue_messages([msg("weight=1")].into_iter(), Here); assert_ring(&[Here]); @@ -1150,6 +1221,116 @@ fn permanently_overweight_book_unknits_multiple() { }); } +#[test] +fn permanently_overweight_limit_is_valid_basic() { + use MessageOrigin::*; + + for w in 50..300 { + build_and_execute::(|| { + DefaultWeightForCall::set(Weight::MAX); + + set_weight("bump_service_head", 10.into()); + set_weight("service_queue_base", 10.into()); + set_weight("service_page_base_no_completion", 10.into()); + set_weight("service_page_base_completion", 0.into()); + + set_weight("service_page_item", 10.into()); + set_weight("ready_ring_unknit", 10.into()); + + let m = "weight=200".to_string(); + + MessageQueue::enqueue_message(msg(&m), Here); + MessageQueue::service_queues(w.into()); + + let last_event = + frame_system::Pallet::::events().into_iter().last().expect("No event"); + + // The weight overhead for a single message is set to 50. The message itself needs 200. + // Every weight in range `[50, 249]` should result in a permanently overweight message: + if w < 250 { + assert_eq!( + last_event.event, + RuntimeEvent::MessageQueue(Event::OverweightEnqueued { + id: blake2_256(m.as_bytes()), + origin: Here, + message_index: 0, + page_index: 0, + }) + ); + } else { + // Otherwise it is processed as normal: + assert_eq!( + last_event.event, + RuntimeEvent::MessageQueue(Event::Processed { + origin: Here, + weight_used: 200.into(), + id: blake2_256(m.as_bytes()), + success: true, + }) + ); + } + }); + } +} + +#[test] +fn permanently_overweight_limit_is_valid_fuzzy() { + use MessageOrigin::*; + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + + for _ in 0..10 { + // Brainlet code, but works... + let (s1, s2) = (rng.gen_range(0..=10), rng.gen_range(0..=10)); + let (s3, s4) = (rng.gen_range(0..=10), rng.gen_range(0..=10)); + let s5 = rng.gen_range(0..=10); + let o = s1 + s2 + s3 + s4 + s5; + + for w in o..=o + 300 { + build_and_execute::(|| { + DefaultWeightForCall::set(Weight::MAX); + + set_weight("bump_service_head", s1.into()); + set_weight("service_queue_base", s2.into()); + // Only the larger one of these two is taken: + set_weight("service_page_base_no_completion", s3.into()); + set_weight("service_page_base_completion", 0.into()); + set_weight("service_page_item", s4.into()); + set_weight("ready_ring_unknit", s5.into()); + + let m = "weight=200".to_string(); + + MessageQueue::enqueue_message(msg(&m), Here); + MessageQueue::service_queues(w.into()); + + let last_event = + frame_system::Pallet::::events().into_iter().last().expect("No event"); + + if w < o + 200 { + assert_eq!( + last_event.event, + RuntimeEvent::MessageQueue(Event::OverweightEnqueued { + id: blake2_256(m.as_bytes()), + origin: Here, + message_index: 0, + page_index: 0, + }) + ); + } else { + assert_eq!( + last_event.event, + RuntimeEvent::MessageQueue(Event::Processed { + origin: Here, + weight_used: 200.into(), + id: blake2_256(m.as_bytes()), + success: true, + }) + ); + } + }); + } + } +} + /// We don't want empty books in the ready ring, but if they somehow make their way in there, it /// should not panic. #[test] @@ -1447,3 +1628,37 @@ fn service_queue_suspension_ready_ring_works() { ); }); } + +#[test] +fn integrity_test_checks_service_weight() { + build_and_execute::(|| { + assert_eq!(::ServiceWeight::get(), Some(100.into()), "precond"); + assert!(MessageQueue::do_integrity_test().is_ok(), "precond"); + + // Enough for all: + DefaultWeightForCall::set(20.into()); + assert!(MessageQueue::do_integrity_test().is_ok()); + + // Not enough for anything: + DefaultWeightForCall::set(101.into()); + assert_eq!(MessageQueue::single_msg_overhead(), 505.into()); + assert!(MessageQueue::do_integrity_test().is_err()); + + // Not enough for a single function: + for f in [ + "bump_service_head", + "service_queue_base", + "service_page_base_completion", + "service_page_base_no_completion", + "service_page_item", + "ready_ring_unknit", + ] { + WeightForCall::take(); + DefaultWeightForCall::set(Zero::zero()); + + assert!(MessageQueue::do_integrity_test().is_ok()); + set_weight(f, 101.into()); + assert!(MessageQueue::do_integrity_test().is_err()); + } + }); +} diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 68ffdad20fcbb2478f0a47f6fccff514d9a8c85f..665c606fc374cce9efe2849d58da058b241b4ad5 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -18,7 +18,7 @@ frame-benchmarking = { default-features = false, optional = true, path = "../ben frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive"] } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 83b9a09b1f3ce0ce14e7e8926e65f3ad56b8ddd9..a2ee608c33cd03916d9993451737e3ef20381c57 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -35,6 +35,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "scale-info/std", "sp-io/std", diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 0e6b85ee76ee08c97b17fe8dce3cededa2a693e3..a2cb9a4aec9ad822fc63efafad029dc2fbd17b7d 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index c690f0e580ed6242cc68066893dc563758984ca0..987c65a8954f60d12c3669b204a2bdc32ca4227b 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -86,6 +86,7 @@ impl pallet_balances::Config for Test { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = ConstU32<1>; type FreezeIdentifier = (); type MaxFreezes = (); diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 67113b656fd68e5a1ef1847600b9c90af9a7f0e8..2a3b2921c75f539b768dc38d704bcdea9cd953b2 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } enumflags2 = { version = "0.7.7" } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -33,6 +33,7 @@ sp-keystore = { path = "../../primitives/keystore" } default = [ "std" ] std = [ "codec/std", + "enumflags2/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", diff --git a/substrate/frame/nfts/runtime-api/src/lib.rs b/substrate/frame/nfts/runtime-api/src/lib.rs index cf2d444b42f8c32f3e7a0c90a9a20f0979727c3e..77535c64069ccbae8f62c6a556e2de8a71d39968 100644 --- a/substrate/frame/nfts/runtime-api/src/lib.rs +++ b/substrate/frame/nfts/runtime-api/src/lib.rs @@ -48,7 +48,7 @@ sp_api::decl_runtime_apis! { fn system_attribute( collection: CollectionId, - item: ItemId, + item: Option, key: Vec, ) -> Option>; diff --git a/substrate/frame/nfts/src/impl_nonfungibles.rs b/substrate/frame/nfts/src/impl_nonfungibles.rs index 4e2593b4057d7b59510fc336589d7af77994566c..ee7f42cfc689c5852fcf8b2a35bdb0acb33d4b7e 100644 --- a/substrate/frame/nfts/src/impl_nonfungibles.rs +++ b/substrate/frame/nfts/src/impl_nonfungibles.rs @@ -79,17 +79,19 @@ impl, I: 'static> Inspect<::AccountId> for Palle Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) } - /// Returns the system attribute value of `item` of `collection` corresponding to `key`. + /// Returns the system attribute value of `item` of `collection` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the system attribute value of `collection` + /// corresponding to `key`. /// /// By default this is `None`; no attributes are defined. fn system_attribute( collection: &Self::CollectionId, - item: &Self::ItemId, + item: Option<&Self::ItemId>, key: &[u8], ) -> Option> { let namespace = AttributeNamespace::Pallet; let key = BoundedSlice::<_, _>::try_from(key).ok()?; - Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) + Attribute::::get((collection, item, namespace, key)).map(|a| a.0.into()) } /// Returns the attribute value of `item` of `collection` corresponding to `key`. @@ -320,6 +322,33 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig }) } + fn set_item_metadata( + who: Option<&T::AccountId>, + collection: &Self::CollectionId, + item: &Self::ItemId, + data: &[u8], + ) -> DispatchResult { + Self::do_set_item_metadata( + who.cloned(), + *collection, + *item, + Self::construct_metadata(data.to_vec())?, + None, + ) + } + + fn set_collection_metadata( + who: Option<&T::AccountId>, + collection: &Self::CollectionId, + data: &[u8], + ) -> DispatchResult { + Self::do_set_collection_metadata( + who.cloned(), + *collection, + Self::construct_metadata(data.to_vec())?, + ) + } + fn clear_attribute( collection: &Self::CollectionId, item: &Self::ItemId, @@ -362,6 +391,21 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig >::clear_collection_attribute(collection, k) }) } + + fn clear_item_metadata( + who: Option<&T::AccountId>, + collection: &Self::CollectionId, + item: &Self::ItemId, + ) -> DispatchResult { + Self::do_clear_item_metadata(who.cloned(), *collection, *item) + } + + fn clear_collection_metadata( + who: Option<&T::AccountId>, + collection: &Self::CollectionId, + ) -> DispatchResult { + Self::do_clear_collection_metadata(who.cloned(), *collection) + } } impl, I: 'static> Transfer for Pallet { @@ -398,6 +442,31 @@ impl, I: 'static> Transfer for Pallet { } } +impl, I: 'static> Trading> for Pallet { + fn buy_item( + collection: &Self::CollectionId, + item: &Self::ItemId, + buyer: &T::AccountId, + bid_price: &ItemPrice, + ) -> DispatchResult { + Self::do_buy_item(*collection, *item, buyer.clone(), *bid_price) + } + + fn set_price( + collection: &Self::CollectionId, + item: &Self::ItemId, + sender: &T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + Self::do_set_price(*collection, *item, sender.clone(), price, whitelisted_buyer) + } + + fn item_price(collection: &Self::CollectionId, item: &Self::ItemId) -> Option> { + ItemPriceOf::::get(collection, item).map(|a| a.0) + } +} + impl, I: 'static> InspectEnumerable for Pallet { type CollectionsIterator = KeyPrefixIterator<>::CollectionId>; type ItemsIterator = KeyPrefixIterator<>::ItemId>; diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index f091a53f8d7c7938b80c8fe8406cd0aa309d5d82..248522aafffc3d219364f850d4ce4b8b0e6b95b8 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -85,6 +85,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index 6e264048f11a6e332447061b808a77d4927e3a2c..aeebf51b7c78a05e1303dfed0722bd9095aafe32 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -17,12 +17,12 @@ //! Tests for Nfts pallet. -use crate::{mock::*, Event, *}; +use crate::{mock::*, Event, SystemConfig, *}; use enumflags2::BitFlags; use frame_support::{ assert_noop, assert_ok, traits::{ - tokens::nonfungibles_v2::{Create, Destroy, Mutate}, + tokens::nonfungibles_v2::{Create, Destroy, Inspect, Mutate}, Currency, Get, }, }; @@ -982,6 +982,86 @@ fn set_collection_owner_attributes_should_work() { }); } +#[test] +fn set_collection_system_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&account(1), 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(account(1)), 0, 0, account(1), None)); + + let collection_id = 0; + let attribute_key = [0u8]; + let attribute_value = [0u8]; + + assert_ok!(, ItemConfig>>::set_collection_attribute( + &collection_id, + &attribute_key, + &attribute_value + )); + + assert_eq!(attributes(0), vec![(None, AttributeNamespace::Pallet, bvec![0], bvec![0])]); + + assert_eq!( + >>::system_attribute( + &collection_id, + None, + &attribute_key + ), + Some(attribute_value.to_vec()) + ); + + // test typed system attribute + let typed_attribute_key = [0u8; 32]; + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] + struct TypedAttributeValue(u32); + let typed_attribute_value = TypedAttributeValue(42); + + assert_ok!( + , ItemConfig>>::set_typed_collection_attribute( + &collection_id, + &typed_attribute_key, + &typed_attribute_value + ) + ); + + assert_eq!( + >>::typed_system_attribute( + &collection_id, + None, + &typed_attribute_key + ), + Some(typed_attribute_value) + ); + + // check storage + assert_eq!( + attributes(collection_id), + [ + (None, AttributeNamespace::Pallet, bvec![0], bvec![0]), + ( + None, + AttributeNamespace::Pallet, + bvec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ], + bvec![42, 0, 0, 0] + ) + ] + ); + + assert_ok!(Nfts::burn(RuntimeOrigin::root(), collection_id, 0)); + let w = Nfts::get_destroy_witness(&0).unwrap(); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(account(1)), collection_id, w)); + assert_eq!(attributes(collection_id), vec![]); + }) +} + #[test] fn set_item_owner_attributes_should_work() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/nicks/Cargo.toml b/substrate/frame/nicks/Cargo.toml index c8e8fa0467a5a5fdd39a567222577562f4fbea84..b8100d07435c8f4b01f1673b181f43a472934d1a 100644 --- a/substrate/frame/nicks/Cargo.toml +++ b/substrate/frame/nicks/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-io = { path = "../../primitives/io", default-features = false} diff --git a/substrate/frame/nicks/src/lib.rs b/substrate/frame/nicks/src/lib.rs index 0a68f7d7142dc45cebbddcdef8ab90da05a3552c..ad30c628adfbd4f20317faec3caec534b932e21d 100644 --- a/substrate/frame/nicks/src/lib.rs +++ b/substrate/frame/nicks/src/lib.rs @@ -313,6 +313,7 @@ mod tests { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index c8465285ffa325c3ba72a8f61e6483e93dd44485..986568ea722a269631687264ea3099e03d67c5d3 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index decebbd56762ed19750450d2d2c70b063956729b..5e547b63e5474ea36bdcbdb699200f162ca3ac2e 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -148,7 +148,7 @@ impl fungible::Unbalanced for NoCounterpart { } fn set_total_issuance(_: Self::Balance) {} } -impl FunMutate for NoCounterpart {} +impl FunMutate for NoCounterpart {} impl Convert for NoCounterpart { fn convert(_: Perquintill) -> u32 { 0 diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index 76fdf5f3f069376f4c42325685fe8d282672c5d1..30f7ef95f331b29966beda4224a33e813d9690e5 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -89,6 +89,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = ConstU32<1>; } @@ -109,6 +110,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index d666437e42eab60e1bfade19eb8ad359e32b1843..e5a504e2a0ff2643c69acf98e535dec77fe80a07 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 4923af0ab0c533ac374cdf0aac0adc15c7af014d..3c55822b9a530f5509e9694dfad9570fe19c4c06 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 6375411b7e239e4926b484ab08a4f63cc9c0222d..e8b18666815e8b3cbd51cbf44eaad67f4c4c752e 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME frame-benchmarking = { path = "../../benchmarking", default-features = false} @@ -43,6 +43,7 @@ sp-io = { path = "../../../primitives/io" } default = [ "std" ] std = [ + "codec/std", "frame-benchmarking/std", "frame-election-provider-support/std", "frame-support/std", @@ -52,6 +53,7 @@ std = [ "pallet-nomination-pools/std", "pallet-staking/std", "pallet-timestamp/std", + "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime-interface/std", diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 1e6a5c249998bb5ef604eeda69828b0441f5ee99..9a7f2197a7b234e643c067af9633d2a2b100a584 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -77,6 +77,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -109,7 +110,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 909a930e3821f8611b30ed39415eaab3f4b38636..ab4bd51ffc0ee55be6802cdad5fff82c80911b18 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1626,6 +1626,10 @@ pub mod pallet { #[pallet::constant] type MaxPointsToBalance: Get; + /// The maximum number of simultaneous unbonding chunks that can exist per member. + #[pallet::constant] + type MaxUnbonding: Get; + /// Infallible method for converting `Currency::Balance` to `U256`. type BalanceToU256: Convert, U256>; @@ -1644,9 +1648,6 @@ pub mod pallet { /// The maximum length, in bytes, that a pools metadata maybe. type MaxMetadataLen: Get; - - /// The maximum number of simultaneous unbonding chunks that can exist per member. - type MaxUnbonding: Get; } /// The sum of funds across all pools. diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index eef2a976f1a2550081fee5caa28ea36a9c52ee68..3d68fee1dca4dd8e41d7607724b4e4cca8145b98 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -24,7 +24,7 @@ use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; use sp_runtime::TryRuntimeError; /// Exports for versioned migration `type`s for this pallet. -pub mod versioned_migrations { +pub mod versioned { use super::*; /// Migration V6 to V7 wrapped in a [`frame_support::migrations::VersionedMigration`], ensuring diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index d806ba071bd41b5abe5a479c44093612dc3451aa..24bea0b87f22378339cb57c15467d05389ad8bdf 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -202,6 +202,11 @@ impl sp_staking::StakingInterface for StakingMock { fn set_current_era(_era: EraIndex) { unimplemented!("method currently not used in testing") } + + #[cfg(feature = "runtime-benchmarks")] + fn max_exposure_page_size() -> sp_staking::Page { + unimplemented!("method currently not used in testing") + } } impl frame_system::Config for Runtime { @@ -247,6 +252,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index a44d885d653223b912261a0cef39340d41d3d6ce..f0558f8314258990b3501c48f3a9b193e74c56ee 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.0.1", features = ["derive"] } +scale-info = { version = "2.10.0", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 54f578f861e87085902573dabd7439c7b1be850e..0db24e9c244181abaf7ed7b9d10404c909f20bd0 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -88,6 +88,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -123,7 +124,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index 2cfbfe6b5d0a1a5822cdd4cc133356c60078a5fc..ac204a7813a50c7acd572d8fd9f664a7d1478c7c 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 141ea0cb4662951622df320228c893ddf4b4ab2b..acd8447c054a3a052e250cb492dc43e8c86f8a55 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false} frame-election-provider-support = { path = "../../election-provider-support", default-features = false} frame-support = { path = "../../support", default-features = false} diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 88f418dd3e2e89e8b846df8c8ee4dfd84e7cda95..1a458ec90d584d477d513199171d7fd983357c56 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -79,6 +79,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -175,7 +176,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index eee099fff5f062b54085094050f55990018a7890..4bc3dd6a3c7a844ac2afc78dd811c1155a715e0b 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -13,8 +13,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } -docify = "0.2.4" -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +docify = "0.2.6" +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index e56110c1eaea8b75aa634fe9a61b68876fd6f6ec..a80ccd5a40d29edcff841f1de90b04890e1902e1 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -10,7 +10,7 @@ description = "FRAME pallet for storing preimages of hashes" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/preimage/src/migration.rs b/substrate/frame/preimage/src/migration.rs index 821cb01bbaae5784352bec53eecc89ea65dfd3d6..a86109f892a4f5b84b6aa48e89bc869f521bf74c 100644 --- a/substrate/frame/preimage/src/migration.rs +++ b/substrate/frame/preimage/src/migration.rs @@ -133,7 +133,7 @@ pub mod v1 { None => OldRequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, }, - v0::OldRequestStatus::Requested(count) if count == 0 => { + v0::OldRequestStatus::Requested(0) => { log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); continue }, diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 2e7051a99a497008c8339251975ff66508ff15f3..0f966312d9e73560e02e6876f452f4593b76d56d 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -82,6 +82,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = ConstU32<2>; } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index ba30bd147bd388f5b2f12d7d46b775c87f4787a2..647193fad8af5eec1497ee930406b8d80c5e9838 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/proxy/README.md b/substrate/frame/proxy/README.md index bfe26d9aefbc4329955301f9cbb820d2b74c6ef7..c52a881c5909793d74ed11c8485f6c667445949b 100644 --- a/substrate/frame/proxy/README.md +++ b/substrate/frame/proxy/README.md @@ -2,7 +2,7 @@ A module allowing accounts to give permission to other accounts to dispatch types of calls from their signed origin. -The accounts to which permission is delegated may be requied to announce the action that they +The accounts to which permission is delegated may be required to announce the action that they wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 9788b1df6302aa6ed8af70b3c1564d1c6d6b41df..236489c54b5bd846194ca84e4db234a7697d0d5d 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 38cecd5c44a4842df9c7f028eed23035dce7131b..8e240546fddd2f05ea7334ec93827fcdc0240175 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index 2f2bd866a71986be89d49955c984c31e37fedbdc..bc81d07bec236ad03d4cf8f22c28cff4102db956 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -84,6 +84,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index ac4f46d12df957821bde1cdc626b7ef613401648..4f53e2bc002a7db2e461a44a6e91c3c22ef18bb5 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -17,7 +17,7 @@ assert_matches = { version = "1.5", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", features = ["derive"], optional = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} @@ -42,6 +42,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "pallet-preimage/std", "pallet-scheduler/std", diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index be21375f526f034d072318afefca1f99d0e99b3d..8912f9ad217371846fc193f495ff1e49b0d18f00 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -305,7 +305,7 @@ pub mod pallet { /// The amount placed by the account. amount: BalanceOf, }, - /// A deposit has been slashaed. + /// A deposit has been slashed. DepositSlashed { /// The account who placed the deposit. who: T::AccountId, diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index dce91f7dbfd780fbbac5d53afd0d58249853713a..345accbe268f7014a05375cc9fbf0e2a6be03a89 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -116,6 +116,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } parameter_types! { diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index ae791011b160c47d7ec74371acccbd837050df09..ad04140ae9f148ad1160a7eeed130812767603b4 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index fa3cf981d7c7935fa0eab8a72f3168c799a19238..8e6fddb43352d581679b6c33a190f91ff988f21f 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false , features = [ "historical" ]} pallet-staking = { path = "../staking", default-features = false} diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index a93e7ff8487180eaa84f260272ec763b4756a00d..e6bb5bb188199c05c75def4cb19adbe4e5e1fecb 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -111,7 +111,7 @@ pub mod pallet { .clone() .into_iter() .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), Staking::::eras_stakers(now, o)), + offender: (o.clone(), Staking::::eras_stakers(now, &o)), reporters: vec![], }) .collect()) diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 2d2a5476149f692dabd4bf2e87fcc9778e0cf72f..82da429e00a58ab317670fb4015cdc809de38d66 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -123,6 +123,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -178,7 +179,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 8c6c2dc46ef12b7f1f96e2babf5de074222a7c7e..7837289cec591ca8907d3da1cd228329c1e93c63 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -8,14 +8,13 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME root testing pallet" readme = "README.md" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/root-testing/README.md b/substrate/frame/root-testing/README.md index aa231e3ef20a06e1afd7d03939d82b4b06853dc9..a0eeda5cc6aa1ca2164c9690ce276a60ec46eb9c 100644 --- a/substrate/frame/root-testing/README.md +++ b/substrate/frame/root-testing/README.md @@ -1,5 +1,5 @@ # Root Testing Pallet -Pallet that contains extrinsics that can be usefull in testing. +Pallet that contains extrinsics that can be useful in testing. NOTE: This pallet should only be used for testing purposes and should not be used in production runtimes! diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs index e04c7bfa13d26f5e40a8d5625d234c0a7ae93de7..51fd835409aef4bfe58f8f97c4df88e130c965a5 100644 --- a/substrate/frame/root-testing/src/lib.rs +++ b/substrate/frame/root-testing/src/lib.rs @@ -24,23 +24,32 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::dispatch::DispatchResult; -use sp_runtime::Perbill; +use frame_support::{dispatch::DispatchResult, sp_runtime::Perbill}; pub use pallet::*; -#[frame_support::pallet] +#[frame_support::pallet(dev_mode)] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::config] - pub trait Config: frame_system::Config {} + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } #[pallet::pallet] pub struct Pallet(_); + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Event dispatched when the trigger_defensive extrinsic is called. + DefensiveTestCall, + } + #[pallet::call] impl Pallet { /// A dispatch that will fill the block weight up to the given ratio. @@ -50,5 +59,14 @@ pub mod pallet { ensure_root(origin)?; Ok(()) } + + #[pallet::call_index(1)] + #[pallet::weight(0)] + pub fn trigger_defensive(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + frame_support::defensive!("root_testing::trigger_defensive was called."); + Self::deposit_event(Event::DefensiveTestCall); + Ok(()) + } } } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index a934092ee53dfff06e69a616d8de3a9582497d8d..ac469bb385c93cb045a69eea1ae253f4142eb43e 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} @@ -30,6 +30,7 @@ sp-io = { path = "../../primitives/io" } pallet-balances = { path = "../balances" } pallet-utility = { path = "../utility" } pallet-proxy = { path = "../proxy" } +frame-support = { path = "../support", features = ["experimental"] } [features] default = [ "std" ] diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 635ee0cfedc025ec4796d95dfbdef2cd50275349..10afe5bd4b5ec4457635ec7a7ac29f6d27f7ba73 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -79,6 +79,7 @@ impl pallet_balances::Config for Test { type MaxReserves = ConstU32<10>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxHolds = ConstU32<10>; type MaxFreezes = ConstU32<0>; diff --git a/substrate/frame/safe-mode/src/tests.rs b/substrate/frame/safe-mode/src/tests.rs index ca1d7eb1d93400b0bad97c45ac6aee00e15e2aa9..b92c5b87a53081e9d51f916ebd78b1f6d1a4ace7 100644 --- a/substrate/frame/safe-mode/src/tests.rs +++ b/substrate/frame/safe-mode/src/tests.rs @@ -22,32 +22,8 @@ use super::*; use crate::mock::{RuntimeCall, *}; -use frame_support::{assert_err, assert_noop, assert_ok, traits::Currency}; -use sp_runtime::{traits::Dispatchable, TransactionOutcome}; - -/// Do something hypothetically by rolling back any changes afterwards. -/// -/// Returns the original result of the closure. -macro_rules! hypothetically { - ( $e:expr ) => { - frame_support::storage::transactional::with_transaction( - || -> TransactionOutcome> { - sp_runtime::TransactionOutcome::Rollback(Ok($e)) - }, - ) - .expect("Always returning Ok; qed") - }; -} - -/// Assert something to be [*hypothetically*] `Ok` without actually committing it. -/// -/// Reverts any storage changes made by the closure. -macro_rules! hypothetically_ok { - ($e:expr $(, $args:expr)* $(,)?) => { - let result = hypothetically!($e); - assert_ok!(result $(, $args)*); - }; -} +use frame_support::{assert_err, assert_noop, assert_ok, hypothetically_ok, traits::Currency}; +use sp_runtime::traits::Dispatchable; #[test] fn fails_to_filter_calls_to_safe_mode_pallet() { diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 89b92fa7e7b8cf4294251e4bcf22cfb71f91342b..6c66f01082d268263b39950a2ebc6dc9dc7b9ebb 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index e81b4dbeff78c53e645dd4988b86411193127ac7..6aa81baf7ac69e90db864228ac6004a178251f62 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} @@ -20,7 +20,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} sp-weights = { path = "../../primitives/weights", default-features = false} -docify = "0.2.4" +docify = "0.2.6" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 5bd23ce22ce59f1147c2ad0fde36f5f4cb6ff304..81707382693fbbdc9d4aa8797b32031ead10b834 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-io = { path = "../../primitives/io", default-features = false} diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 591c910488b16c59660d354ed361774bf02ff187..32a66c0cdc5cbd9abc43ac31a5f4f8d22567202b 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -90,6 +90,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index fa72d6c72785d4742e4b8605a7ef79a64e676056..246dec63bbab5250762f89e3ac3db0762601e1b0 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-timestamp = { path = "../timestamp", default-features = false} diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index df423e162e9e7a29714c5880057b8552e7c21679..87f08985138d4a3f25aaba9dc526deffd0013466 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -26,7 +26,7 @@ sp-std = { path = "../../../primitives/std", default-features = false} [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = "2.1.1" +scale-info = "2.10.0" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } diff --git a/substrate/frame/session/benchmarking/src/lib.rs b/substrate/frame/session/benchmarking/src/lib.rs index 722bb14fb56edcccf735b6554a50e8ece9023d2d..84258d84994f45233e47a1ca413cb5b89ccf306a 100644 --- a/substrate/frame/session/benchmarking/src/lib.rs +++ b/substrate/frame/session/benchmarking/src/lib.rs @@ -134,7 +134,7 @@ fn check_membership_proof_setup( use rand::{RngCore, SeedableRng}; let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Pallet::::bonded(validator).unwrap(); + let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); let keys = { let mut keys = [0u8; 128]; diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 24a821ac87af50377d1ed51a65b5400fb3635404..47c337569a0274dc95d885173ccf27f87ffb71e3 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -84,6 +84,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -172,7 +173,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index d38875836aa254a345ecf577146f5ff2c919c208..654447e6893bbf748554bf50b4a6892ffff83e63 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.17", default-features = false } rand_chacha = { version = "0.2", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false} @@ -40,6 +40,7 @@ std = [ "frame-support-test/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "rand_chacha/std", "scale-info/std", diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 553eea1a7952f5f2e88fe43f0e972826abd206fd..a995c9d7be7f2a1c46fee6d222862fcd1f03ae58 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -95,14 +95,13 @@ impl< /// [`VersionUncheckedMigrateToV2`] wrapped in a [`frame_support::migrations::VersionedMigration`], /// ensuring the migration is only performed when on-chain version is 0. -pub type VersionCheckedMigrateToV2 = - frame_support::migrations::VersionedMigration< - 0, - 2, - VersionUncheckedMigrateToV2, - crate::pallet::Pallet, - ::DbWeight, - >; +pub type MigrateToV2 = frame_support::migrations::VersionedMigration< + 0, + 2, + VersionUncheckedMigrateToV2, + crate::pallet::Pallet, + ::DbWeight, +>; pub(crate) mod old { use super::*; diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs index a318c2e794b7a854a03cd8b6052d2070741f41cf..0bee08236f74a1d1e11ab8969047a5bb6966da44 100644 --- a/substrate/frame/society/src/mock.rs +++ b/substrate/frame/society/src/mock.rs @@ -97,6 +97,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a8350405a890e32cb32ce862bc76da2569148eb --- /dev/null +++ b/substrate/frame/src/lib.rs @@ -0,0 +1,347 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! > Made for [![polkadot]](https://polkadot.network) +//! +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! +//! # FRAME +//! +//! ```no_compile +//! ______ ______ ________ ___ __ __ ______ +//! /_____/\ /_____/\ /_______/\ /__//_//_/\ /_____/\ +//! \::::_\/_\:::_ \ \ \::: _ \ \\::\| \| \ \\::::_\/_ +//! \:\/___/\\:(_) ) )_\::(_) \ \\:. \ \\:\/___/\ +//! \:::._\/ \: __ `\ \\:: __ \ \\:.\-/\ \ \\::___\/_ +//! \:\ \ \ \ `\ \ \\:.\ \ \ \\. \ \ \ \\:\____/\ +//! \_\/ \_\/ \_\/ \__\/\__\/ \__\/ \__\/ \_____\/ +//! ``` +//! +//! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's +//! > State Transition Function (Runtime) Framework. +//! +//! ## Warning: Experimental +//! +//! This crate and all of its content is experimental, and should not yet be used in production. +//! +//! ## Getting Started +//! +//! TODO: link to `developer_hub::polkadot_sdk::frame`. The `developer_hub` hasn't been published +//! yet, this can be updated once it is linkable. + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg(feature = "experimental")] + +/// Exports the main pallet macro. This can wrap a `mod pallet` and will transform it into +/// being a pallet, eg `#[frame::pallet] mod pallet { .. }`. +/// +/// Note that this is not part of the prelude, in order to make it such that the common way to +/// define a macro is `#[frame::pallet] mod pallet { .. }`, followed by `#[pallet::foo]`, +/// `#[pallet::bar]` inside the mod. +pub use frame_support::pallet; + +/// The logging library of the runtime. Can normally be the classic `log` crate. +pub use log; + +/// The main prelude of FRAME. +/// +/// This prelude should almost always be the first line of code in any pallet or runtime. +/// +/// ``` +/// use frame::prelude::*; +/// +/// // rest of your pallet.. +/// mod pallet {} +/// ``` +pub mod prelude { + /// `frame_system`'s parent crate, which is mandatory in all pallets build with this crate. + /// + /// Conveniently, the keyword `frame_system` is in scope as one uses `use + /// frame::prelude::*` + #[doc(inline)] + pub use frame_system; + + /// Pallet prelude of `frame-support`. + /// + /// Note: this needs to revised once `frame-support` evolves. + // `frame-support` will be break down https://github.com/paritytech/polkadot-sdk/issues/127 and its reexports will + // most likely change. These wildcard reexportings can be optimized once `frame-support` has + // changed. + #[doc(no_inline)] + pub use frame_support::pallet_prelude::*; + + /// Pallet prelude of `frame-system`. + #[doc(no_inline)] + pub use frame_system::pallet_prelude::*; + + /// All of the std alternative types. + #[doc(no_inline)] + pub use sp_std::prelude::*; + + /// All FRAME-relevant derive macros. + #[doc(no_inline)] + pub use super::derive::*; +} + +/// The main testing prelude of FRAME. +/// +/// A test setup typically starts with: +/// +/// ``` +/// use frame::testing_prelude::*; +/// // rest of your test setup. +/// ``` +#[cfg(feature = "std")] +pub mod testing_prelude { + pub use super::prelude::*; + /// Testing includes building a runtime, so we bring in all preludes related to runtimes as + /// well. + pub use super::runtime::testing_prelude::*; + + /// Other helper macros from `frame_support` that help with asserting in tests. + pub use frame_support::{ + assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok, + assert_storage_noop, storage_alias, + }; + + pub use frame_system::{self, mocking::*}; + pub use sp_io::TestExternalities as TestState; + pub use sp_std::if_std; +} + +/// All of the types and tools needed to build FRAME-based runtimes. +#[cfg(any(feature = "runtime", feature = "std"))] +pub mod runtime { + /// The main prelude of `FRAME` for building runtimes. + /// + /// A runtime typically starts with: + /// + /// ``` + /// use frame::{prelude::*, runtime::prelude::*}; + /// ``` + pub mod prelude { + /// All of the types related to the FRAME runtime executive. + pub use frame_executive::*; + + /// Macro to amalgamate the runtime into `struct Runtime`. + pub use frame_support::construct_runtime; + + /// Macro to easily derive the `Config` trait of various pallet for `Runtime`. + pub use frame_support::derive_impl; + + /// Macros to easily impl traits such as `Get` for types. + // TODO: using linking in the Get in the line above triggers an ICE :/ + pub use frame_support::{ord_parameter_types, parameter_types}; + + /// Const types that can easily be used in conjuncture with `Get`. + pub use frame_support::traits::{ + ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, + ConstU32, ConstU64, ConstU8, + }; + + /// Types to define your runtime version. + pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; + + #[cfg(feature = "std")] + pub use sp_version::NativeVersion; + } + + /// Types and traits for runtimes that implement runtime APIs. + /// + /// A testing runtime should not need this. + /// + /// A non-testing runtime should have this enabled, as such: + /// + /// ``` + /// use frame::runtime::{prelude::*, apis::{*,}}; + /// ``` + // TODO: This is because of wildcard imports, and it should be not needed once we can avoid + // that. Imports like that are needed because we seem to need some unknown types in the macro + // expansion. See `sp_session::runtime_api::*;` as one example. All runtime api decls should be + // moved to file similarly. + #[allow(ambiguous_glob_reexports)] + pub mod apis { + // Types often used in the runtime APIs. + pub use sp_core::OpaqueMetadata; + pub use sp_inherents::{CheckInherentsResult, InherentData}; + pub use sp_runtime::ApplyExtrinsicResult; + + /// Macro to implement runtime APIs. + pub use sp_api::impl_runtime_apis; + + pub use frame_system_rpc_runtime_api::*; + pub use sp_api::{self, *}; + pub use sp_block_builder::*; + pub use sp_consensus_aura::*; + pub use sp_consensus_grandpa::*; + pub use sp_offchain::*; + pub use sp_session::runtime_api::*; + pub use sp_transaction_pool::runtime_api::*; + } + + /// A set of opinionated types aliases commonly used in runtimes. + /// + /// This is one set of opinionated types. They are compatible with one another, but are not + /// guaranteed to work if you start tweaking a portion. + /// + /// Some note-worthy opinions in this prelude: + /// + /// - `u32` block number. + /// - [`sp_runtime::MultiAddress`] and [`sp_runtime::MultiSignature`] are used as the account id + /// and signature types. This implies that this prelude can possibly used with an + /// "account-index" system (eg `pallet-indices`). And, in any case, it should be paired with + /// `AccountIdLookup` in [`frame_system::Config::Lookup`]. + pub mod types_common { + use frame_system::Config as SysConfig; + use sp_runtime::{generic, traits, OpaqueExtrinsic}; + + /// A signature type compatible capably of handling multiple crypto-schemes. + pub type Signature = sp_runtime::MultiSignature; + + /// The corresponding account-id type of [`Signature`]. + pub type AccountId = + <::Signer as traits::IdentifyAccount>::AccountId; + + /// The block-number type, which should be fed into [`frame_system::Config`]. + pub type BlockNumber = u32; + + /// TODO: Ideally we want the hashing type to be equal to SysConfig::Hashing? + type HeaderInner = generic::Header; + + // NOTE: `AccountIndex` is provided for future compatibility, if you want to introduce + // something like `pallet-indices`. + type ExtrinsicInner = generic::UncheckedExtrinsic< + sp_runtime::MultiAddress, + ::RuntimeCall, + Signature, + Extra, + >; + + /// The block type, which should be fed into [`frame_system::Config`]. + /// + /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. + /// When in doubt, use [`SystemSignedExtensionsOf`]. + // Note that this cannot be dependent on `T` for block-number because it would lead to a + // circular dependency (self-referential generics). + pub type BlockOf = generic::Block>; + + /// The opaque block type. This is the same [`BlockOf`], but it has + /// [`sp_runtime::OpaqueExtrinsic`] as its final extrinsic type. + /// + /// This should be provided to the client side as the extrinsic type. + pub type OpaqueBlock = generic::Block; + + /// Default set of signed extensions exposed from the `frame_system`. + /// + /// crucially, this does NOT contain any tx-payment extension. + pub type SystemSignedExtensionsOf = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ); + } + + /// The main prelude of FRAME for building runtimes, and in the context of testing. + /// + /// counter part of `runtime::prelude`. + #[cfg(feature = "std")] + pub mod testing_prelude { + pub use super::prelude::*; + pub use sp_core::storage::Storage; + pub use sp_runtime::BuildStorage; + } +} + +/// All traits often used in FRAME pallets. +/// +/// Note that types implementing these traits can also be found in this module. +// TODO: `Hash` and `Bounded` are defined multiple times; should be fixed once these two crates are +// cleaned up. +#[allow(ambiguous_glob_reexports)] +pub mod traits { + pub use frame_support::traits::*; + pub use sp_runtime::traits::*; +} + +/// The arithmetic types used for safe math. +pub mod arithmetic { + pub use sp_arithmetic::{traits::*, *}; +} + +/// Low level primitive types used in FRAME pallets. +pub mod primitives { + pub use sp_core::{H160, H256, H512, U256, U512}; + pub use sp_runtime::traits::{BlakeTwo256, Hash, Keccak256}; +} + +/// All derive macros used in frame. +/// +/// This is already part of the [`prelude`]. +pub mod derive { + pub use frame_support::{ + CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, + RuntimeDebugNoBound, + }; + pub use parity_scale_codec::{Decode, Encode}; + pub use scale_info::TypeInfo; + pub use sp_runtime::RuntimeDebug; + pub use sp_std::fmt::Debug; +} + +/// Access to all of the dependencies of this crate. In case the re-exports are not enough, this +/// module can be used. +/// +/// Any time one uses this module to access a dependency, you can have a moment to think about +/// whether this item could have been placed in any of the other modules and preludes in this crate. +/// In most cases, hopefully the answer is yes. +pub mod deps { + // TODO: It would be great to somehow instruct RA to prefer *not* suggesting auto-imports from + // these. For example, we prefer `frame::derive::CloneNoBound` rather than + // `frame::deps::frame_support::CloneNoBound`. + pub use frame_support; + pub use frame_system; + + pub use sp_arithmetic; + pub use sp_core; + pub use sp_io; + pub use sp_runtime; + pub use sp_std; + + pub use parity_scale_codec as codec; + pub use scale_info; + + #[cfg(feature = "runtime")] + pub use frame_executive; + #[cfg(feature = "runtime")] + pub use sp_api; + #[cfg(feature = "runtime")] + pub use sp_block_builder; + #[cfg(feature = "runtime")] + pub use sp_consensus_aura; + #[cfg(feature = "runtime")] + pub use sp_consensus_grandpa; + #[cfg(feature = "runtime")] + pub use sp_inherents; + #[cfg(feature = "runtime")] + pub use sp_offchain; + #[cfg(feature = "runtime")] + pub use sp_version; +} diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..719aa388755fce3523cadb273bb542e48076d3f5 --- /dev/null +++ b/substrate/frame/staking/CHANGELOG.md @@ -0,0 +1,27 @@ +# Changelog + +All notable changes and migrations to pallet-staking will be documented in this file. + +The format is loosely based +on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a +single integer version number for staking pallet to keep track of all storage +migrations. + +## [v14] + +### Added + +- New item `ErasStakersPaged` that keeps up to `MaxExposurePageSize` + individual nominator exposures by era, validator and page. +- New item `ErasStakersOverview` complementary to `ErasStakersPaged` which keeps + state of own and total stake of the validator across pages. +- New item `ClaimedRewards` to support paged rewards payout. + +### Deprecated + +- `ErasStakers` and `ErasStakersClipped` is deprecated, will not be used any longer for the exposures of the new era + post v14 and can be removed after 84 eras once all the exposures are stale. +- Field `claimed_rewards` in item `Ledger` is renamed + to `legacy_claimed_rewards` and can be removed after 84 eras. + +[v14]: https://github.com/paritytech/substrate/pull/13498 diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 5cd134471ebcd61731d943adee66199d0c6757d4..c5cac9fefa792cc648e13312827c8ffbc5079577 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -17,7 +17,7 @@ serde = { version = "1.0.188", default-features = false, features = ["alloc", "d codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } diff --git a/substrate/frame/staking/README.md b/substrate/frame/staking/README.md index 387b94b6a681f0e2dacd5c5644ee3795ea8019e7..8c91cfcaa7fa386a2e7f403e7a7854e196888910 100644 --- a/substrate/frame/staking/README.md +++ b/substrate/frame/staking/README.md @@ -14,6 +14,7 @@ funds are rewarded under normal operation but are held at pain of _slash_ (expro be found not to be discharging its duties properly. ### Terminology + - Staking: The process of locking up funds for some time, placing them at risk of slashing (loss) in order to become a @@ -29,6 +30,7 @@ be found not to be discharging its duties properly. - Slash: The punishment of a staker by reducing its funds. ### Goals + The staking system in Substrate NPoS is designed to make the following possible: @@ -75,7 +77,7 @@ An account can become a validator candidate via the #### Nomination -A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on a set of validators to +A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on a set of validators to be elected. Once interest in nomination is stated by an account, it takes effect at the next election round. The funds in the nominator's stash account indicate the _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared between the validator and its nominators. This rule incentivizes the nominators to NOT vote for the @@ -90,10 +92,12 @@ An account can become a nominator via the The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace valid behavior_ while _punishing any misbehavior or lack of availability_. -Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. Any -account can call `payout_stakers`, which pays the reward to the validator as well as its nominators. Only the -[`Config::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This is to limit the i/o cost to -mutate storage for each nominator's account. +Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. When a +validator has more than [`Config::MaxExposurePageSize`] nominators, nominators are divided into pages with each call to +`payout_stakers` paying rewards to one page of nominators in a sequential and ascending manner. Any account can also +call `payout_stakers_by_page` to explicitly pay reward for a given page. As evident, this means only the +[`Config::MaxExposurePageSize`] nominators are rewarded per call. This is to limit the i/o cost to mutate storage for +each nominator's account. Slashing can occur at any point in time, once misbehavior is reported. Once slashing is determined, a value is deducted from the balance of the validator and all the nominators who voted for this validator (values are deducted from the @@ -173,11 +177,13 @@ such: ```nocompile staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year ``` + This payout is used to reward stakers as defined in next section ```nocompile remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout ``` + The remaining reward is send to the configurable end-point [`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardRemainder). diff --git a/substrate/frame/staking/reward-curve/src/lib.rs b/substrate/frame/staking/reward-curve/src/lib.rs index ecf3af55379145cc85dea57cc1be998dc4d07079..1986357edabe20cca94d0d0e26e1bee54e08a0ac 100644 --- a/substrate/frame/staking/reward-curve/src/lib.rs +++ b/substrate/frame/staking/reward-curve/src/lib.rs @@ -81,11 +81,12 @@ pub fn build(input: TokenStream) -> TokenStream { let imports = match crate_name("sp-runtime") { Ok(FoundCrate::Itself) => quote!( - extern crate sp_runtime as _sp_runtime; + #[doc(hidden)] + pub use sp_runtime as _sp_runtime; ), Ok(FoundCrate::Name(sp_runtime)) => { let ident = syn::Ident::new(&sp_runtime, Span::call_site()); - quote!( extern crate #ident as _sp_runtime; ) + quote!( #[doc(hidden)] pub use #ident as _sp_runtime; ) }, Err(e) => syn::Error::new(Span::call_site(), e).to_compile_error(), }; diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 5f49df254ceb3f521d665948f9d86cb3f4e1c40b..746b463b8ce25d1f72980e269e1fa5e57dab2237 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -14,8 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false} +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } [features] default = [ "std" ] -std = [ "codec/std", "sp-api/std" ] +std = [ "codec/std", "sp-api/std", "sp-staking/std" ] diff --git a/substrate/frame/staking/runtime-api/src/lib.rs b/substrate/frame/staking/runtime-api/src/lib.rs index c669d222ec68b2ec6305cbffc3d0003acdc78c94..b04c383a077dc8fcb1c70b2fe6ff6117837c2867 100644 --- a/substrate/frame/staking/runtime-api/src/lib.rs +++ b/substrate/frame/staking/runtime-api/src/lib.rs @@ -22,11 +22,15 @@ use codec::Codec; sp_api::decl_runtime_apis! { - pub trait StakingApi + pub trait StakingApi where Balance: Codec, + AccountId: Codec, { /// Returns the nominations quota for a nominator with a given balance. fn nominations_quota(balance: Balance) -> u32; + + /// Returns the page count of exposures for a validator in a given era. + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; } } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index ce5c35524c747af4fa9face89a2d79be3c90e301..6f60c4909f4d555a352e01a876211b990a6dadc8 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ }; use sp_runtime::{ traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, - Perbill, Percent, + Perbill, Percent, Saturating, }; use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex}; use sp_std::prelude::*; @@ -464,16 +464,28 @@ benchmarks! { } set_payee { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; assert_eq!(Payee::::get(&stash), RewardDestination::Staked); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), RewardDestination::Controller) + }: _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())) verify { - assert_eq!(Payee::::get(&stash), RewardDestination::Controller); + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); + } + + update_payee { + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + Payee::::insert(&stash, { + #[allow(deprecated)] + RewardDestination::Controller + }); + whitelist_account!(controller); + }: _(RawOrigin::Signed(controller.clone()), controller.clone()) + verify { + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); } set_controller { - let (stash, ctlr) = create_unique_stash_controller::(9000, 100, Default::default(), false)?; + let (stash, ctlr) = create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; // ensure `ctlr` is the currently stored controller. assert!(!Ledger::::contains_key(&stash)); assert!(Ledger::::contains_key(&ctlr)); @@ -551,45 +563,11 @@ benchmarks! { assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } - payout_stakers_dead_controller { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; - let (validator, nominators) = create_validator_with_nominators::( - n, - T::MaxNominatorRewardedPerValidator::get() as u32, - true, - true, - RewardDestination::Controller, - )?; - - let current_era = CurrentEra::::get().unwrap(); - // set the commission for this particular era as well. - >::insert(current_era, validator.clone(), >::validators(&validator)); - - let caller = whitelisted_caller(); - let validator_controller = >::get(&validator).unwrap(); - let balance_before = T::Currency::free_balance(&validator_controller); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(balance.is_zero(), "Controller has balance, but should be dead."); - } - }: payout_stakers(RawOrigin::Signed(caller), validator, current_era) - verify { - let balance_after = T::Currency::free_balance(&validator_controller); - ensure!( - balance_before < balance_after, - "Balance of validator controller should have increased after payout.", - ); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(!balance.is_zero(), "Payout not given to controller."); - } - } - payout_stakers_alive_staked { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxExposurePageSize::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, - T::MaxNominatorRewardedPerValidator::get() as u32, + T::MaxExposurePageSize::get() as u32, false, true, RewardDestination::Staked, @@ -684,13 +662,10 @@ benchmarks! { let stash = scenario.origin_stash1; add_slashing_spans::(&stash, s); - let l = StakingLedger { - stash: stash.clone(), - active: T::Currency::minimum_balance() - One::one(), - total: T::Currency::minimum_balance() - One::one(), - unlocking: Default::default(), - claimed_rewards: Default::default(), - }; + let l = StakingLedger::::new( + stash.clone(), + T::Currency::minimum_balance() - One::one(), + ); Ledger::::insert(&controller, l); assert!(Bonded::::contains_key(&stash)); @@ -762,7 +737,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let origin = RawOrigin::Signed(caller); let calls: Vec<_> = payout_calls_arg.iter().map(|arg| - Call::::payout_stakers { validator_stash: arg.0.clone(), era: arg.1 }.encode() + Call::::payout_stakers_by_page { validator_stash: arg.0.clone(), era: arg.1, page: 0 }.encode() ).collect(); }: { for call in calls { @@ -986,7 +961,7 @@ mod tests { let (validator_stash, nominators) = create_validator_with_nominators::( n, - <::MaxNominatorRewardedPerValidator as Get<_>>::get(), + <::MaxExposurePageSize as Get<_>>::get(), false, false, RewardDestination::Staked, @@ -998,10 +973,11 @@ mod tests { let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); - assert_ok!(Staking::payout_stakers( + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), validator_stash, - current_era + current_era, + 0 )); let new_free_balance = Balances::free_balance(&validator_stash); @@ -1016,7 +992,7 @@ mod tests { let (validator_stash, _nominators) = create_validator_with_nominators::( n, - <::MaxNominatorRewardedPerValidator as Get<_>>::get(), + <::MaxExposurePageSize as Get<_>>::get(), false, false, RewardDestination::Staked, diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs new file mode 100644 index 0000000000000000000000000000000000000000..84bb4d167dcb0118ed0f933d044f76d74bc10560 --- /dev/null +++ b/substrate/frame/staking/src/ledger.rs @@ -0,0 +1,254 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A Ledger implementation for stakers. +//! +//! A [`StakingLedger`] encapsulates all the state and logic related to the stake of bonded +//! stakers, namely, it handles the following storage items: +//! * [`Bonded`]: mutates and reads the state of the controller <> stash bond map (to be deprecated +//! soon); +//! * [`Ledger`]: mutates and reads the state of all the stakers. The [`Ledger`] storage item stores +//! instances of [`StakingLedger`] keyed by the staker's controller account and should be mutated +//! and read through the [`StakingLedger`] API; +//! * [`Payee`]: mutates and reads the reward destination preferences for a bonded stash. +//! * Staking locks: mutates the locks for staking. +//! +//! NOTE: All the storage operations related to the staking ledger (both reads and writes) *MUST* be +//! performed through the methods exposed by the [`StakingLedger`] implementation in order to ensure +//! state consistency. + +use frame_support::{ + defensive, + traits::{LockableCurrency, WithdrawReasons}, +}; +use sp_staking::StakingAccount; +use sp_std::prelude::*; + +use crate::{ + BalanceOf, Bonded, Config, Error, Ledger, Payee, RewardDestination, StakingLedger, STAKING_ID, +}; + +#[cfg(any(feature = "runtime-benchmarks", test))] +use sp_runtime::traits::Zero; + +impl StakingLedger { + #[cfg(any(feature = "runtime-benchmarks", test))] + pub fn default_from(stash: T::AccountId) -> Self { + Self { + stash: stash.clone(), + total: Zero::zero(), + active: Zero::zero(), + unlocking: Default::default(), + legacy_claimed_rewards: Default::default(), + controller: Some(stash), + } + } + + /// Returns a new instance of a staking ledger. + /// + /// The [`Ledger`] storage is not mutated. In order to store, `StakingLedger::update` must be + /// called on the returned staking ledger. + /// + /// Note: as the controller accounts are being deprecated, the stash account is the same as the + /// controller account. + pub fn new(stash: T::AccountId, stake: BalanceOf) -> Self { + Self { + stash: stash.clone(), + active: stake, + total: stake, + unlocking: Default::default(), + legacy_claimed_rewards: Default::default(), + // controllers are deprecated and mapped 1-1 to stashes. + controller: Some(stash), + } + } + + /// Returns the paired account, if any. + /// + /// A "pair" refers to the tuple (stash, controller). If the input is a + /// [`StakingAccount::Stash`] variant, its pair account will be of type + /// [`StakingAccount::Controller`] and vice-versa. + /// + /// This method is meant to abstract from the runtime development the difference between stash + /// and controller. This will be deprecated once the controller is fully deprecated as well. + pub(crate) fn paired_account(account: StakingAccount) -> Option { + match account { + StakingAccount::Stash(stash) => >::get(stash), + StakingAccount::Controller(controller) => + >::get(&controller).map(|ledger| ledger.stash), + } + } + + /// Returns whether a given account is bonded. + pub(crate) fn is_bonded(account: StakingAccount) -> bool { + match account { + StakingAccount::Stash(stash) => >::contains_key(stash), + StakingAccount::Controller(controller) => >::contains_key(controller), + } + } + + /// Returns a staking ledger, if it is bonded and it exists in storage. + /// + /// This getter can be called with either a controller or stash account, provided that the + /// account is properly wrapped in the respective [`StakingAccount`] variant. This is meant to + /// abstract the concept of controller/stash accounts from the caller. + pub(crate) fn get(account: StakingAccount) -> Result, Error> { + let controller = match account { + StakingAccount::Stash(stash) => >::get(stash).ok_or(Error::::NotStash), + StakingAccount::Controller(controller) => Ok(controller), + }?; + + >::get(&controller) + .map(|mut ledger| { + ledger.controller = Some(controller.clone()); + ledger + }) + .ok_or(Error::::NotController) + } + + /// Returns the reward destination of a staking ledger, stored in [`Payee`]. + /// + /// Note: if the stash is not bonded and/or does not have an entry in [`Payee`], it returns the + /// default reward destination. + pub(crate) fn reward_destination( + account: StakingAccount, + ) -> RewardDestination { + let stash = match account { + StakingAccount::Stash(stash) => Some(stash), + StakingAccount::Controller(controller) => + Self::paired_account(StakingAccount::Controller(controller)), + }; + + if let Some(stash) = stash { + >::get(stash) + } else { + defensive!("fetched reward destination from unbonded stash {}", stash); + RewardDestination::default() + } + } + + /// Returns the controller account of a staking ledger. + /// + /// Note: it will fallback into querying the [`Bonded`] storage with the ledger stash if the + /// controller is not set in `self`, which most likely means that self was fetched directly from + /// [`Ledger`] instead of through the methods exposed in [`StakingLedger`]. If the ledger does + /// not exist in storage, it returns `None`. + pub(crate) fn controller(&self) -> Option { + self.controller.clone().or_else(|| { + defensive!("fetched a controller on a ledger instance without it."); + Self::paired_account(StakingAccount::Stash(self.stash.clone())) + }) + } + + /// Inserts/updates a staking ledger account. + /// + /// Bonds the ledger if it is not bonded yet, signalling that this is a new ledger. The staking + /// locks of the stash account are updated accordingly. + /// + /// Note: To ensure lock consistency, all the [`Ledger`] storage updates should be made through + /// this helper function. + pub(crate) fn update(self) -> Result<(), Error> { + if !>::contains_key(&self.stash) { + return Err(Error::::NotStash) + } + + T::Currency::set_lock(STAKING_ID, &self.stash, self.total, WithdrawReasons::all()); + Ledger::::insert( + &self.controller().ok_or_else(|| { + defensive!("update called on a ledger that is not bonded."); + Error::::NotController + })?, + &self, + ); + + Ok(()) + } + + /// Bonds a ledger. + /// + /// It sets the reward preferences for the bonded stash. + pub(crate) fn bond(self, payee: RewardDestination) -> Result<(), Error> { + if >::contains_key(&self.stash) { + Err(Error::::AlreadyBonded) + } else { + >::insert(&self.stash, payee); + >::insert(&self.stash, &self.stash); + self.update() + } + } + + /// Sets the ledger Payee. + pub(crate) fn set_payee(self, payee: RewardDestination) -> Result<(), Error> { + if !>::contains_key(&self.stash) { + Err(Error::::NotStash) + } else { + >::insert(&self.stash, payee); + Ok(()) + } + } + + /// Clears all data related to a staking ledger and its bond in both [`Ledger`] and [`Bonded`] + /// storage items and updates the stash staking lock. + pub(crate) fn kill(stash: &T::AccountId) -> Result<(), Error> { + let controller = >::get(stash).ok_or(Error::::NotStash)?; + + >::get(&controller).ok_or(Error::::NotController).map(|ledger| { + T::Currency::remove_lock(STAKING_ID, &ledger.stash); + Ledger::::remove(controller); + + >::remove(&stash); + >::remove(&stash); + + Ok(()) + })? + } +} + +#[cfg(test)] +use { + crate::UnlockChunk, + codec::{Decode, Encode, MaxEncodedLen}, + scale_info::TypeInfo, +}; + +// This structs makes it easy to write tests to compare staking ledgers fetched from storage. This +// is required because the controller field is not stored in storage and it is private. +#[cfg(test)] +#[derive(frame_support::DebugNoBound, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct StakingLedgerInspect { + pub stash: T::AccountId, + #[codec(compact)] + pub total: BalanceOf, + #[codec(compact)] + pub active: BalanceOf, + pub unlocking: frame_support::BoundedVec>, T::MaxUnlockingChunks>, + pub legacy_claimed_rewards: frame_support::BoundedVec, +} + +#[cfg(test)] +impl PartialEq> for StakingLedger { + fn eq(&self, other: &StakingLedgerInspect) -> bool { + self.stash == other.stash && + self.total == other.total && + self.active == other.active && + self.unlocking == other.unlocking && + self.legacy_claimed_rewards == other.legacy_claimed_rewards + } +} + +#[cfg(test)] +impl codec::EncodeLike> for StakingLedgerInspect {} diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index dcf57a4643233c4d199bb5add1a2f57e7ef95eee..2cfee38ab4fa61d5a29e9aa8b1dd9783ce1500a7 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -91,7 +91,7 @@ //! #### Nomination //! //! A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on -//! a set of validators to be elected. Once interest in nomination is stated by an account, it +//! a set of validators to be elected. Once interest in nomination is stated by an account, it //! takes effect at the next election round. The funds in the nominator's stash account indicate the //! _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared //! between the validator and its nominators. This rule incentivizes the nominators to NOT vote for @@ -112,11 +112,15 @@ //! The **reward and slashing** procedure is the core of the Staking pallet, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! -//! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the -//! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -//! validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] -//! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each -//! nominator's account. +//! Rewards must be claimed for each era before it gets too old by +//! [`HistoryDepth`](`Config::HistoryDepth`) using the `payout_stakers` call. Any account can call +//! `payout_stakers`, which pays the reward to the validator as well as its nominators. Only +//! [`Config::MaxExposurePageSize`] nominator rewards can be claimed in a single call. When the +//! number of nominators exceeds [`Config::MaxExposurePageSize`], then the exposed nominators are +//! stored in multiple pages, with each page containing up to +//! [`Config::MaxExposurePageSize`] nominators. To pay out all nominators, `payout_stakers` must be +//! called once for each available page. Paging exists to limit the i/o cost to mutate storage for +//! each nominator's account. //! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who @@ -224,21 +228,21 @@ //! The validator can declare an amount, named [`commission`](ValidatorPrefs::commission), that does //! not get shared with the nominators at each reward payout through its [`ValidatorPrefs`]. This //! value gets deducted from the total reward that is paid to the validator and its nominators. The -//! remaining portion is split pro rata among the validator and the top -//! [`Config::MaxNominatorRewardedPerValidator`] nominators that nominated the validator, -//! proportional to the value staked behind the validator (_i.e._ dividing the +//! remaining portion is split pro rata among the validator and the nominators that nominated the +//! validator, proportional to the value staked behind the validator (_i.e._ dividing the //! [`own`](Exposure::own) or [`others`](Exposure::others) by [`total`](Exposure::total) in -//! [`Exposure`]). Note that the pro rata division of rewards uses the total exposure behind the -//! validator, *not* just the exposure of the validator and the top -//! [`Config::MaxNominatorRewardedPerValidator`] nominators. +//! [`Exposure`]). Note that payouts are made in pages with each page capped at +//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across +//! pages may be unsorted. The total commission is paid out proportionally across pages based on the +//! total stake of the page. //! //! All entities who receive a reward have the option to choose their reward destination through the //! [`Payee`] storage item (see //! [`set_payee`](Call::set_payee)), to be one of the following: //! -//! - Controller account, (obviously) not increasing the staked value. //! - Stash account, not increasing the staked value. //! - Stash account, also increasing the staked value. +//! - Any other account, sent as free balance. //! //! ### Additional Fund Management Operations //! @@ -294,6 +298,7 @@ mod tests; pub mod election_size_tracker; pub mod inflation; +pub mod ledger; pub mod migrations; pub mod slashing; pub mod weights; @@ -302,26 +307,31 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - traits::{ConstU32, Currency, Defensive, Get}, + defensive, defensive_assert, + traits::{ + ConstU32, Currency, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier, + }, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_runtime::{ curve::PiecewiseLinear, - traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, - Perbill, Perquintill, Rounding, RuntimeDebug, + traits::{AtLeast32BitUnsigned, Convert, StaticLookup, Zero}, + Perbill, Perquintill, Rounding, RuntimeDebug, Saturating, }; -pub use sp_staking::StakerStatus; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, - EraIndex, OnStakingUpdate, SessionIndex, + EraIndex, ExposurePage, OnStakingUpdate, Page, PagedExposureMetadata, SessionIndex, + StakingAccount, }; +pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; pub use weights::WeightInfo; pub use pallet::{pallet::*, UseNominatorsAndValidatorsMap, UseValidatorsMap}; +pub(crate) const STAKING_ID: LockIdentifier = *b"staking "; pub(crate) const LOG_TARGET: &str = "runtime::staking"; // syntactic sugar for logging. @@ -394,7 +404,9 @@ pub enum RewardDestination { Staked, /// Pay into the stash account, not increasing the amount at stake. Stash, - /// Pay into the controller account. + #[deprecated( + note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead. This variant now behaves the same as `Stash` variant." + )] Controller, /// Pay into a specified account. Account(AccountId), @@ -433,6 +445,14 @@ pub struct UnlockChunk { } /// The ledger of a (bonded) stash. +/// +/// Note: All the reads and mutations to the [`Ledger`], [`Bonded`] and [`Payee`] storage items +/// *MUST* be performed through the methods exposed by this struct, to ensure the consistency of +/// ledger's data and corresponding staking lock +/// +/// TODO: move struct definition and full implementation into `/src/ledger.rs`. Currently +/// leaving here to enforce a clean PR diff, given how critical this logic is. Tracking issue +/// . #[derive( PartialEqNoBound, EqNoBound, @@ -447,35 +467,38 @@ pub struct UnlockChunk { pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. pub stash: T::AccountId, + /// The total amount of the stash's balance that we are currently accounting for. /// It's just `active` plus all the `unlocking` balances. #[codec(compact)] pub total: BalanceOf, + /// The total amount of the stash's balance that will be at stake in any forthcoming /// rounds. #[codec(compact)] pub active: BalanceOf, + /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, + /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. - pub claimed_rewards: BoundedVec, + /// + /// This is deprecated as of V14 in favor of `T::ClaimedRewards` and will be removed in future. + /// Refer to issue + pub legacy_claimed_rewards: BoundedVec, + + /// The controller associated with this ledger's stash. + /// + /// This is not stored on-chain, and is only bundled when the ledger is read from storage. + /// Use [`controller`] function to get the controller associated with the ledger. + #[codec(skip)] + controller: Option, } impl StakingLedger { - /// Initializes the default object using the given `validator`. - pub fn default_from(stash: T::AccountId) -> Self { - Self { - stash, - total: Zero::zero(), - active: Zero::zero(), - unlocking: Default::default(), - claimed_rewards: Default::default(), - } - } - /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. fn consolidate_unlocked(self, current_era: EraIndex) -> Self { @@ -502,7 +525,8 @@ impl StakingLedger { total, active: self.active, unlocking, - claimed_rewards: self.claimed_rewards, + legacy_claimed_rewards: self.legacy_claimed_rewards, + controller: self.controller, } } @@ -702,32 +726,50 @@ pub struct Nominations { pub suppressed: bool, } -/// The amount of exposure (to slashing) than an individual nominator has. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct IndividualExposure { - /// The stash account of the nominator in question. - pub who: AccountId, - /// Amount of funds exposed. - #[codec(compact)] - pub value: Balance, +/// Facade struct to encapsulate `PagedExposureMetadata` and a single page of `ExposurePage`. +/// +/// This is useful where we need to take into account the validator's own stake and total exposure +/// in consideration, in addition to the individual nominators backing them. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Eq)] +struct PagedExposure { + exposure_metadata: PagedExposureMetadata, + exposure_page: ExposurePage, } -/// A snapshot of the stake backing a single validator in the system. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct Exposure { - /// The total balance backing this validator. - #[codec(compact)] - pub total: Balance, - /// The validator's own stash that is exposed. - #[codec(compact)] - pub own: Balance, - /// The portions of nominators stashes that are exposed. - pub others: Vec>, -} +impl + PagedExposure +{ + /// Create a new instance of `PagedExposure` from legacy clipped exposures. + pub fn from_clipped(exposure: Exposure) -> Self { + Self { + exposure_metadata: PagedExposureMetadata { + total: exposure.total, + own: exposure.own, + nominator_count: exposure.others.len() as u32, + page_count: 1, + }, + exposure_page: ExposurePage { page_total: exposure.total, others: exposure.others }, + } + } -impl Default for Exposure { - fn default() -> Self { - Self { total: Default::default(), own: Default::default(), others: vec![] } + /// Returns total exposure of this validator across pages + pub fn total(&self) -> Balance { + self.exposure_metadata.total + } + + /// Returns total exposure of this validator for the current page + pub fn page_total(&self) -> Balance { + self.exposure_page.page_total + self.exposure_metadata.own + } + + /// Returns validator's own stake that is exposed + pub fn own(&self) -> Balance { + self.exposure_metadata.own + } + + /// Returns the portions of nominators stashes that are exposed in this page. + pub fn others(&self) -> &Vec> { + &self.exposure_page.others } } @@ -927,7 +969,7 @@ pub struct StashOf(sp_std::marker::PhantomData); impl Convert> for StashOf { fn convert(controller: T::AccountId) -> Option { - >::ledger(&controller).map(|l| l.stash) + StakingLedger::::paired_account(StakingAccount::Controller(controller)) } } @@ -979,6 +1021,195 @@ where } } +/// Wrapper struct for Era related information. It is not a pure encapsulation as these storage +/// items can be accessed directly but nevertheless, its recommended to use `EraInfo` where we +/// can and add more functions to it as needed. +pub(crate) struct EraInfo(sp_std::marker::PhantomData); +impl EraInfo { + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy + /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be + /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards + /// are relevant/claimable. + // Refer tracker issue for cleanup: #13034 + pub(crate) fn is_rewards_claimed_with_legacy_fallback( + era: EraIndex, + ledger: &StakingLedger, + validator: &T::AccountId, + page: Page, + ) -> bool { + ledger.legacy_claimed_rewards.binary_search(&era).is_ok() || + Self::is_rewards_claimed(era, validator, page) + } + + /// Check if the rewards for the given era and page index have been claimed. + /// + /// This is only used for paged rewards. Once older non-paged rewards are no longer + /// relevant, `is_rewards_claimed_with_legacy_fallback` can be removed and this function can + /// be made public. + fn is_rewards_claimed(era: EraIndex, validator: &T::AccountId, page: Page) -> bool { + ClaimedRewards::::get(era, validator).contains(&page) + } + + /// Get exposure for a validator at a given era and page. + /// + /// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the + /// validator. For older non-paged exposure, it returns the clipped exposure directly. + pub(crate) fn get_paged_exposure( + era: EraIndex, + validator: &T::AccountId, + page: Page, + ) -> Option>> { + let overview = >::get(&era, validator); + + // return clipped exposure if page zero and paged exposure does not exist + // exists for backward compatibility and can be removed as part of #13034 + if overview.is_none() && page == 0 { + return Some(PagedExposure::from_clipped(>::get(era, validator))) + } + + // no exposure for this validator + if overview.is_none() { + return None + } + + let overview = overview.expect("checked above; qed"); + + // validator stake is added only in page zero + let validator_stake = if page == 0 { overview.own } else { Zero::zero() }; + + // since overview is present, paged exposure will always be present except when a + // validator has only own stake and no nominator stake. + let exposure_page = >::get((era, validator, page)).unwrap_or_default(); + + // build the exposure + Some(PagedExposure { + exposure_metadata: PagedExposureMetadata { own: validator_stake, ..overview }, + exposure_page, + }) + } + + /// Get full exposure of the validator at a given era. + pub(crate) fn get_full_exposure( + era: EraIndex, + validator: &T::AccountId, + ) -> Exposure> { + let overview = >::get(&era, validator); + + if overview.is_none() { + return ErasStakers::::get(era, validator) + } + + let overview = overview.expect("checked above; qed"); + + let mut others = Vec::with_capacity(overview.nominator_count as usize); + for page in 0..overview.page_count { + let nominators = >::get((era, validator, page)); + others.append(&mut nominators.map(|n| n.others).defensive_unwrap_or_default()); + } + + Exposure { total: overview.total, own: overview.own, others } + } + + /// Returns the number of pages of exposure a validator has for the given era. + /// + /// For eras where paged exposure does not exist, this returns 1 to keep backward compatibility. + pub(crate) fn get_page_count(era: EraIndex, validator: &T::AccountId) -> Page { + >::get(&era, validator) + .map(|overview| { + if overview.page_count == 0 && overview.own > Zero::zero() { + // Even though there are no nominator pages, there is still validator's own + // stake exposed which needs to be paid out in a page. + 1 + } else { + overview.page_count + } + }) + // Always returns 1 page for older non-paged exposure. + // FIXME: Can be cleaned up with issue #13034. + .unwrap_or(1) + } + + /// Returns the next page that can be claimed or `None` if nothing to claim. + pub(crate) fn get_next_claimable_page( + era: EraIndex, + validator: &T::AccountId, + ledger: &StakingLedger, + ) -> Option { + if Self::is_non_paged_exposure(era, validator) { + return match ledger.legacy_claimed_rewards.binary_search(&era) { + // already claimed + Ok(_) => None, + // Non-paged exposure is considered as a single page + Err(_) => Some(0), + } + } + + // Find next claimable page of paged exposure. + let page_count = Self::get_page_count(era, validator); + let all_claimable_pages: Vec = (0..page_count).collect(); + let claimed_pages = ClaimedRewards::::get(era, validator); + + all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p)) + } + + /// Checks if exposure is paged or not. + fn is_non_paged_exposure(era: EraIndex, validator: &T::AccountId) -> bool { + >::contains_key(&era, validator) + } + + /// Returns validator commission for this era and page. + pub(crate) fn get_validator_commission( + era: EraIndex, + validator_stash: &T::AccountId, + ) -> Perbill { + >::get(&era, validator_stash).commission + } + + /// Creates an entry to track validator reward has been claimed for a given era and page. + /// Noop if already claimed. + pub(crate) fn set_rewards_as_claimed(era: EraIndex, validator: &T::AccountId, page: Page) { + let mut claimed_pages = ClaimedRewards::::get(era, validator); + + // this should never be called if the reward has already been claimed + if claimed_pages.contains(&page) { + defensive!("Trying to set an already claimed reward"); + // nevertheless don't do anything since the page already exist in claimed rewards. + return + } + + // add page to claimed entries + claimed_pages.push(page); + ClaimedRewards::::insert(era, validator, claimed_pages); + } + + /// Store exposure for elected validators at start of an era. + pub(crate) fn set_exposure( + era: EraIndex, + validator: &T::AccountId, + exposure: Exposure>, + ) { + let page_size = T::MaxExposurePageSize::get().defensive_max(1); + + let nominator_count = exposure.others.len(); + // expected page count is the number of nominators divided by the page size, rounded up. + let expected_page_count = + nominator_count.defensive_saturating_add(page_size as usize - 1) / page_size as usize; + + let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); + defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); + + >::insert(era, &validator, &exposure_metadata); + exposure_pages.iter().enumerate().for_each(|(page, paged_exposure)| { + >::insert((era, &validator, page as Page), &paged_exposure); + }); + } + + /// Store total exposure for all the elected validators in the era. + pub(crate) fn set_total_stake(era: EraIndex, total_stake: BalanceOf) { + >::insert(era, total_stake); + } +} + /// Configurations of the benchmarking of the pallet. pub trait BenchmarkingConfig { /// The maximum number of validators to use. diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 89520028b901c07599817d371db875fb258f536a..84b00254126f7e1898861aad977850bfd045d0bf 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -14,7 +14,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -//! Storage migrations for the Staking pallet. +//! Storage migrations for the Staking pallet. The changelog for this is maintained at +//! [CHANGELOG.md](https://github.com/paritytech/substrate/blob/master/frame/staking/CHANGELOG.md). use super::*; use frame_election_provider_support::SortedListProvider; @@ -58,6 +59,49 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migration of era exposure storage items to paged exposures. +/// Changelog: [v14.](https://github.com/paritytech/substrate/blob/ankan/paged-rewards-rebased2/frame/staking/CHANGELOG.md#14) +pub mod v14 { + use super::*; + + pub struct MigrateToV14(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV14 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + + if current == 14 && on_chain == 13 { + current.put::>(); + + log!(info, "v14 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } else { + log!(warn, "v14 not applied."); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + frame_support::ensure!( + Pallet::::on_chain_storage_version() == 13, + "Required v13 before upgrading to v14." + ); + + Ok(Default::default()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + Pallet::::on_chain_storage_version() == 14, + "v14 not applied" + ); + Ok(()) + } + } +} + pub mod v13 { use super::*; @@ -113,9 +157,9 @@ pub mod v12 { #[storage_alias] type HistoryDepth = StorageValue, u32, ValueQuery>; - /// Clean up `HistoryDepth` from storage. + /// Clean up `T::HistoryDepth` from storage. /// - /// We will be depending on the configurable value of `HistoryDepth` post + /// We will be depending on the configurable value of `T::HistoryDepth` post /// this release. pub struct MigrateToV12(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV12 { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index c41144278f2c1749f6c59a7862033f9fce014d67..364d125792b84e0b43f05e44384bd495fabcc521 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, - OnUnbalanced, OneSessionHandler, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, + OneSessionHandler, }, weights::constants::RocksDbWeight, }; @@ -39,7 +39,10 @@ use sp_runtime::{ traits::{IdentityLookup, Zero}, BuildStorage, }; -use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; +use sp_staking::{ + offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + OnStakingUpdate, +}; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -77,7 +80,7 @@ impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { } pub fn is_disabled(controller: AccountId) -> bool { - let stash = Staking::ledger(&controller).unwrap().stash; + let stash = Ledger::::get(&controller).unwrap().stash; let validator_index = match Session::validators().iter().position(|v| *v == stash) { Some(index) => index as u32, None => return false, @@ -159,6 +162,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } @@ -232,6 +236,7 @@ const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static HistoryDepth: u32 = 80; + pub static MaxExposurePageSize: u32 = 64; pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static MaxWinners: u32 = 100; @@ -300,7 +305,7 @@ impl crate::pallet::pallet::Config for Test { type SessionInterface = Self; type EraPayout = ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -589,7 +594,7 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond(who: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&who, val); - assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Stash)); } pub(crate) fn bond_validator(who: AccountId, val: Balance) { @@ -756,7 +761,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), *who)), + offender: (*who, Staking::eras_stakers(active_era(), who)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -774,10 +779,27 @@ pub(crate) fn make_all_reward_payment(era: EraIndex) { // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), ledger.stash, era)); + for page in 0..EraInfo::::get_page_count(era, &ledger.stash) { + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + ledger.stash, + era, + page + )); + } } } +pub(crate) fn bond_controller_stash(controller: AccountId, stash: AccountId) -> Result<(), String> { + >::get(&stash).map_or(Ok(()), |_| Err("stash already bonded"))?; + >::get(&controller).map_or(Ok(()), |_| Err("controller already bonded"))?; + + >::insert(stash, controller); + >::insert(controller, StakingLedger::::default_from(stash)); + + Ok(()) +} + #[macro_export] macro_rules! assert_session_era { ($session:expr, $era:expr) => { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 3d9b1157ca87afaaea80b260ecb8d71179ffe4df..6ea8e4c9d3b2548b542d1ac33dc91b95628ebfac 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveResult, EstimateNextNewSession, Get, Imbalance, - LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, + Currency, Defensive, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced, TryCollect, + UnixTime, }, weights::Weight, }; @@ -41,18 +41,20 @@ use sp_runtime::{ use sp_staking::{ currency_to_vote::CurrencyToVote, offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, - EraIndex, SessionIndex, Stake, StakingInterface, + EraIndex, Page, SessionIndex, Stake, + StakingAccount::{self, Controller, Stash}, + StakingInterface, }; use sp_std::prelude::*; use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, MaxNominationsOf, - MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, - SessionInterface, StakingLedger, ValidatorPrefs, + BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, + MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; -use super::{pallet::*, STAKING_ID}; +use super::pallet::*; #[cfg(feature = "try-runtime")] use frame_support::ensure; @@ -68,10 +70,24 @@ use sp_runtime::TryRuntimeError; const NPOS_MAX_ITERATIONS_COEFFICIENT: u32 = 2; impl Pallet { + /// Fetches the ledger associated with a controller or stash account, if any. + pub fn ledger(account: StakingAccount) -> Result, Error> { + StakingLedger::::get(account) + } + + pub fn payee(account: StakingAccount) -> RewardDestination { + StakingLedger::::reward_destination(account) + } + + /// Fetches the controller bonded to a stash account, if any. + pub fn bonded(stash: &T::AccountId) -> Option { + StakingLedger::::paired_account(Stash(stash.clone())) + } + /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. - Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() + Self::ledger(Stash(stash.clone())).map(|l| l.active).unwrap_or_default() } /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. @@ -105,25 +121,24 @@ impl Pallet { controller: &T::AccountId, num_slashing_spans: u32, ) -> Result { - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let mut ledger = Self::ledger(Controller(controller.clone()))?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); if let Some(current_era) = Self::current_era() { ledger = ledger.consolidate_unlocked(current_era) } + let new_total = ledger.total; let used_weight = if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { // This account must have called `unbond()` with some value that caused the active // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; - // Remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); + Self::kill_stash(&ledger.stash, num_slashing_spans)?; T::WeightInfo::withdraw_unbonded_kill(num_slashing_spans) } else { // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); + ledger.update()?; // This is only an update, so we use less overall weight. T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) @@ -131,9 +146,9 @@ impl Pallet { // `old_total` should never be less than the new total because // `consolidate_unlocked` strictly subtracts balance. - if ledger.total < old_total { + if new_total < old_total { // Already checked that this won't overflow by entry condition. - let value = old_total - ledger.total; + let value = old_total - new_total; Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } @@ -143,12 +158,31 @@ impl Pallet { pub(super) fn do_payout_stakers( validator_stash: T::AccountId, era: EraIndex, + ) -> DispatchResultWithPostInfo { + let controller = Self::bonded(&validator_stash).ok_or_else(|| { + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + let ledger = >::get(&controller).ok_or(Error::::NotController)?; + let page = EraInfo::::get_next_claimable_page(era, &validator_stash, &ledger) + .ok_or_else(|| { + Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + Self::do_payout_stakers_by_page(validator_stash, era, page) + } + + pub(super) fn do_payout_stakers_by_page( + validator_stash: T::AccountId, + era: EraIndex, + page: Page, ) -> DispatchResultWithPostInfo { // Validate input data let current_era = CurrentEra::::get().ok_or_else(|| { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; + let history_depth = T::HistoryDepth::get(); ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), @@ -156,41 +190,49 @@ impl Pallet { .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) ); + ensure!( + page < EraInfo::::get_page_count(era, &validator_stash), + Error::::InvalidPage.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); + // Note: if era has no reward to be claimed, era may be future. better not to update - // `ledger.claimed_rewards` in this case. + // `ledger.legacy_claimed_rewards` in this case. let era_payout = >::get(&era).ok_or_else(|| { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; - let controller = Self::bonded(&validator_stash).ok_or_else(|| { - Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + let account = StakingAccount::Stash(validator_stash.clone()); + let mut ledger = Self::ledger(account.clone()).or_else(|_| { + if StakingLedger::::is_bonded(account) { + Err(Error::::NotController.into()) + } else { + Err(Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } })?; - let mut ledger = >::get(&controller).ok_or(Error::::NotController)?; + // clean up older claimed rewards ledger - .claimed_rewards + .legacy_claimed_rewards .retain(|&x| x >= current_era.saturating_sub(history_depth)); + ledger.clone().update()?; + + let stash = ledger.stash.clone(); - match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => - return Err(Error::::AlreadyClaimed - .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), - Err(pos) => ledger - .claimed_rewards - .try_insert(pos, era) - // Since we retain era entries in `claimed_rewards` only upto - // `HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, + if EraInfo::::is_rewards_claimed_with_legacy_fallback(era, &ledger, &stash, page) { + return Err(Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } else { + EraInfo::::set_rewards_as_claimed(era, &stash, page); } - let exposure = >::get(&era, &ledger.stash); + let exposure = EraInfo::::get_paged_exposure(era, &stash, page).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; // Input data seems good, no errors allowed after this point - >::insert(&controller, &ledger); - // Get Era reward points. It has TOTAL and INDIVIDUAL // Find the fraction of the era reward that belongs to the validator // Take that fraction of the eras rewards to split to nominator and validator @@ -200,11 +242,8 @@ impl Pallet { let era_reward_points = >::get(&era); let total_reward_points = era_reward_points.total; - let validator_reward_points = era_reward_points - .individual - .get(&ledger.stash) - .copied() - .unwrap_or_else(Zero::zero); + let validator_reward_points = + era_reward_points.individual.get(&stash).copied().unwrap_or_else(Zero::zero); // Nothing to do if they have no reward points. if validator_reward_points.is_zero() { @@ -219,31 +258,29 @@ impl Pallet { // This is how much validator + nominators are entitled to. let validator_total_payout = validator_total_reward_part * era_payout; - let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); - // Validator first gets a cut off the top. - let validator_commission = validator_prefs.commission; - let validator_commission_payout = validator_commission * validator_total_payout; + let validator_commission = EraInfo::::get_validator_commission(era, &ledger.stash); + // total commission validator takes across all nominator pages + let validator_total_commission_payout = validator_commission * validator_total_payout; - let validator_leftover_payout = validator_total_payout - validator_commission_payout; + let validator_leftover_payout = validator_total_payout - validator_total_commission_payout; // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); + let validator_exposure_part = Perbill::from_rational(exposure.own(), exposure.total()); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + let page_stake_part = Perbill::from_rational(exposure.page_total(), exposure.total()); + // validator commission is paid out in fraction across pages proportional to the page stake. + let validator_commission_payout = page_stake_part * validator_total_commission_payout; Self::deposit_event(Event::::PayoutStarted { era_index: era, - validator_stash: ledger.stash.clone(), + validator_stash: stash.clone(), }); let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: if let Some((imbalance, dest)) = - Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) + Self::make_payout(&stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Rewarded { - stash: ledger.stash, - dest, - amount: imbalance.peek(), - }); + Self::deposit_event(Event::::Rewarded { stash, dest, amount: imbalance.peek() }); total_imbalance.subsume(imbalance); } @@ -254,8 +291,8 @@ impl Pallet { // Lets now calculate how this is split to the nominators. // Reward only the clipped exposures. Note this is not necessarily sorted. - for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total); + for nominator in exposure.others().iter() { + let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total()); let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; @@ -274,16 +311,9 @@ impl Pallet { } T::Reward::on_unbalanced(total_imbalance); - debug_assert!(nominator_payout_count <= T::MaxNominatorRewardedPerValidator::get()); - Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) - } + debug_assert!(nominator_payout_count <= T::MaxExposurePageSize::get()); - /// Update the ledger for a controller. - /// - /// This will also update the stash lock. - pub(crate) fn update_ledger(controller: &T::AccountId, ledger: &StakingLedger) { - T::Currency::set_lock(STAKING_ID, &ledger.stash, ledger.total, WithdrawReasons::all()); - >::insert(controller, ledger); + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) } /// Chill a stash account. @@ -301,24 +331,42 @@ impl Pallet { stash: &T::AccountId, amount: BalanceOf, ) -> Option<(PositiveImbalanceOf, RewardDestination)> { - let maybe_imbalance = match Self::payee(stash) { - RewardDestination::Controller => Self::bonded(stash) - .map(|controller| T::Currency::deposit_creating(&controller, amount)), - RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), - RewardDestination::Staked => Self::bonded(stash) - .and_then(|c| Self::ledger(&c).map(|l| (c, l))) - .and_then(|(controller, mut l)| { - l.active += amount; - l.total += amount; + // noop if amount is zero + if amount.is_zero() { + return None + } + + let dest = Self::payee(StakingAccount::Stash(stash.clone())); + let maybe_imbalance = match dest { + RewardDestination::Stash => + T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Staked => Self::ledger(Stash(stash.clone())) + .and_then(|mut ledger| { + ledger.active += amount; + ledger.total += amount; let r = T::Currency::deposit_into_existing(stash, amount).ok(); - Self::update_ledger(&controller, &l); - r - }), + + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage, so it exists; qed."); + + Ok(r) + }) + .unwrap_or_default(), RewardDestination::Account(dest_account) => Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, + #[allow(deprecated)] + RewardDestination::Controller => Self::bonded(stash) + .map(|controller| { + defensive!("Paying out controller as reward destination which is deprecated and should be migrated"); + // This should never happen once payees with a `Controller` variant have been migrated. + // But if it does, just pay the controller account. + T::Currency::deposit_creating(&controller, amount) + }), }; - maybe_imbalance.map(|imbalance| (imbalance, Self::payee(stash))) + maybe_imbalance + .map(|imbalance| (imbalance, Self::payee(StakingAccount::Stash(stash.clone())))) } /// Plan a new session potentially trigger a new era. @@ -407,7 +455,6 @@ impl Pallet { } /// Start a new era. It does: - /// /// * Increment `active_era.index`, /// * reset `active_era.start`, /// * update `BondedEras` and apply slashes. @@ -577,31 +624,24 @@ impl Pallet { >, new_planned_era: EraIndex, ) -> BoundedVec> { - let elected_stashes: BoundedVec<_, MaxWinnersOf> = exposures - .iter() - .cloned() - .map(|(x, _)| x) - .collect::>() - .try_into() - .expect("since we only map through exposures, size of elected_stashes is always same as exposures; qed"); - - // Populate stakers, exposures, and the snapshot of validator prefs. + // Populate elected stash, stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); + let mut elected_stashes = Vec::with_capacity(exposures.len()); + exposures.into_iter().for_each(|(stash, exposure)| { + // build elected stash + elected_stashes.push(stash.clone()); + // accumulate total stake total_stake = total_stake.saturating_add(exposure.total); - >::insert(new_planned_era, &stash, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(&new_planned_era, &stash, exposure_clipped); + // store staker exposure for this era + EraInfo::::set_exposure(new_planned_era, &stash, exposure); }); - // Insert current era staking information - >::insert(&new_planned_era, total_stake); + let elected_stashes: BoundedVec<_, MaxWinnersOf> = elected_stashes + .try_into() + .expect("elected_stashes.len() always equal to exposures.len(); qed"); + + EraInfo::::set_total_stake(new_planned_era, total_stake); // Collect the pref of all winners. for stash in &elected_stashes { @@ -666,30 +706,37 @@ impl Pallet { /// - after a `withdraw_unbonded()` call that frees all of a stash's bonded balance. /// - through `reap_stash()` if the balance has fallen to zero (through slashing). pub(crate) fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult { - let controller = >::get(stash).ok_or(Error::::NotStash)?; - - slashing::clear_stash_metadata::(stash, num_slashing_spans)?; + slashing::clear_stash_metadata::(&stash, num_slashing_spans)?; - >::remove(stash); - >::remove(&controller); + // removes controller from `Bonded` and staking ledger from `Ledger`, as well as reward + // setting of the stash in `Payee`. + StakingLedger::::kill(&stash)?; - >::remove(stash); - Self::do_remove_validator(stash); - Self::do_remove_nominator(stash); + Self::do_remove_validator(&stash); + Self::do_remove_nominator(&stash); - frame_system::Pallet::::dec_consumers(stash); + frame_system::Pallet::::dec_consumers(&stash); Ok(()) } /// Clear all era information for given era. pub(crate) fn clear_era_information(era_index: EraIndex) { + // FIXME: We can possibly set a reasonable limit since we do this only once per era and + // clean up state across multiple blocks. let mut cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix((era_index,), u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + >::remove(era_index); >::remove(era_index); >::remove(era_index); @@ -754,7 +801,7 @@ impl Pallet { stash: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(current_era, &stash, exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -1028,6 +1075,18 @@ impl Pallet { DispatchClass::Mandatory, ); } + + /// Returns full exposure of a validator for a given era. + /// + /// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14. + /// Since this function is used in the codebase at various places, we kept it as a custom getter + /// that takes care of getting the full exposure of the validator in a backward compatible way. + pub fn eras_stakers( + era: EraIndex, + account: &T::AccountId, + ) -> Exposure> { + EraInfo::::get_full_exposure(era, account) + } } impl Pallet { @@ -1037,6 +1096,17 @@ impl Pallet { pub fn api_nominations_quota(balance: BalanceOf) -> u32 { T::NominationsQuota::get_quota(balance) } + + pub fn api_eras_stakers( + era: EraIndex, + account: T::AccountId, + ) -> Exposure> { + Self::eras_stakers(era, &account) + } + + pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { + EraInfo::::get_page_count(era, &account) + } } impl ElectionDataProvider for Pallet { @@ -1121,16 +1191,7 @@ impl ElectionDataProvider for Pallet { panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") }); >::insert(voter.clone(), voter.clone()); - >::insert( - voter.clone(), - StakingLedger { - stash: voter.clone(), - active: stake, - total: stake, - unlocking: Default::default(), - claimed_rewards: Default::default(), - }, - ); + >::insert(voter.clone(), StakingLedger::::new(voter.clone(), stake)); Self::do_add_nominator(&voter, Nominations { targets, submitted_in: 0, suppressed: false }); } @@ -1139,16 +1200,7 @@ impl ElectionDataProvider for Pallet { fn add_target(target: T::AccountId) { let stake = MinValidatorBond::::get() * 100u32.into(); >::insert(target.clone(), target.clone()); - >::insert( - target.clone(), - StakingLedger { - stash: target.clone(), - active: stake, - total: stake, - unlocking: Default::default(), - claimed_rewards: Default::default(), - }, - ); + >::insert(target.clone(), StakingLedger::::new(target.clone(), stake)); Self::do_add_validator( &target, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, @@ -1180,16 +1232,7 @@ impl ElectionDataProvider for Pallet { .and_then(|w| >::try_from(w).ok()) .unwrap_or_else(|| MinNominatorBond::::get() * 100u32.into()); >::insert(v.clone(), v.clone()); - >::insert( - v.clone(), - StakingLedger { - stash: v.clone(), - active: stake, - total: stake, - unlocking: Default::default(), - claimed_rewards: Default::default(), - }, - ); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); Self::do_add_validator( &v, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, @@ -1201,16 +1244,7 @@ impl ElectionDataProvider for Pallet { panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") }); >::insert(v.clone(), v.clone()); - >::insert( - v.clone(), - StakingLedger { - stash: v.clone(), - active: stake, - total: stake, - unlocking: Default::default(), - claimed_rewards: Default::default(), - }, - ); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); Self::do_add_nominator( &v, Nominations { targets: t, submitted_in: 0, suppressed: false }, @@ -1459,9 +1493,9 @@ impl ScoreProvider for Pallet { // this will clearly results in an inconsistent state, but it should not matter for a // benchmark. let active: BalanceOf = weight.try_into().map_err(|_| ()).unwrap(); - let mut ledger = match Self::ledger(who) { - None => StakingLedger::default_from(who.clone()), - Some(l) => l, + let mut ledger = match Self::ledger(StakingAccount::Stash(who.clone())) { + Ok(l) => l, + Err(_) => StakingLedger::default_from(who.clone()), }; ledger.active = active; @@ -1638,29 +1672,10 @@ impl StakingInterface for Pallet { MinValidatorBond::::get() } - fn desired_validator_count() -> u32 { - ValidatorCount::::get() - } - - fn election_ongoing() -> bool { - T::ElectionProvider::ongoing() - } - - fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); - Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) - } - fn stash_by_ctrl(controller: &Self::AccountId) -> Result { - Self::ledger(controller) + Self::ledger(Controller(controller.clone())) .map(|l| l.stash) - .ok_or(Error::::NotController.into()) - } - - fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { - ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { - validator == *who || exposures.others.iter().any(|i| i.who == *who) - }) + .map_err(|e| e.into()) } fn bonding_duration() -> EraIndex { @@ -1672,10 +1687,9 @@ impl StakingInterface for Pallet { } fn stake(who: &Self::AccountId) -> Result>, DispatchError> { - Self::bonded(who) - .and_then(|c| Self::ledger(c)) + Self::ledger(Stash(who.clone())) .map(|l| Stake { total: l.total, active: l.active }) - .ok_or(Error::::NotStash.into()) + .map_err(|e| e.into()) } fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { @@ -1700,7 +1714,7 @@ impl StakingInterface for Pallet { who: Self::AccountId, num_slashing_spans: u32, ) -> Result { - let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + let ctrl = Self::bonded(&who).ok_or(Error::::NotStash)?; Self::withdraw_unbonded(RawOrigin::Signed(ctrl.clone()).into(), num_slashing_spans) .map(|_| !Ledger::::contains_key(&ctrl)) .map_err(|with_post| with_post.error) @@ -1724,11 +1738,35 @@ impl StakingInterface for Pallet { Self::nominate(RawOrigin::Signed(ctrl).into(), targets) } + fn desired_validator_count() -> u32 { + ValidatorCount::::get() + } + + fn election_ongoing() -> bool { + T::ElectionProvider::ongoing() + } + + fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { + let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); + Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) + } + + fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + // look in the non paged exposures + // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433) + ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { + validator == *who || exposures.others.iter().any(|i| i.who == *who) + }) + || + // look in the paged exposures + ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { + validator == *who || exposure_page.others.iter().any(|i| i.who == *who) + }) + } fn status( who: &Self::AccountId, ) -> Result, DispatchError> { - let is_bonded = Self::bonded(who).is_some(); - if !is_bonded { + if !StakingLedger::::is_bonded(StakingAccount::Stash(who.clone())) { return Err(Error::::NotStash.into()) } @@ -1764,12 +1802,16 @@ impl StakingInterface for Pallet { .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) .collect::>(); let exposure = Exposure { total: Default::default(), own: Default::default(), others }; - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(*current_era, stash, exposure); } fn set_current_era(era: EraIndex) { CurrentEra::::put(era); } + + fn max_exposure_page_size() -> Page { + T::MaxExposurePageSize::get() + } } } @@ -1784,6 +1826,7 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; + Self::check_paged_exposures()?; Self::check_ledgers()?; Self::check_count() } @@ -1832,10 +1875,81 @@ impl Pallet { .collect::>() } + fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use sp_staking::PagedExposureMetadata; + use sp_std::collections::btree_map::BTreeMap; + + // Sanity check for the paged exposure of the active era. + let mut exposures: BTreeMap>> = + BTreeMap::new(); + let era = Self::active_era().unwrap().index; + let accumulator_default = PagedExposureMetadata { + total: Zero::zero(), + own: Zero::zero(), + nominator_count: 0, + page_count: 0, + }; + + ErasStakersPaged::::iter_prefix((era,)) + .map(|((validator, _page), expo)| { + ensure!( + expo.page_total == + expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure for the page.", + ); + + let metadata = exposures.get(&validator).unwrap_or(&accumulator_default); + exposures.insert( + validator, + PagedExposureMetadata { + total: metadata.total + expo.page_total, + own: metadata.own, + nominator_count: metadata.nominator_count + expo.others.len() as u32, + page_count: metadata.page_count + 1, + }, + ); + + Ok(()) + }) + .collect::>()?; + + exposures + .iter() + .map(|(validator, metadata)| { + let actual_overview = ErasStakersOverview::::get(era, validator); + + ensure!(actual_overview.is_some(), "No overview found for a paged exposure"); + let actual_overview = actual_overview.unwrap(); + + ensure!( + actual_overview.total == metadata.total + actual_overview.own, + "Exposure metadata does not have correct total exposed stake." + ); + ensure!( + actual_overview.nominator_count == metadata.nominator_count, + "Exposure metadata does not have correct count of nominators." + ); + ensure!( + actual_overview.page_count == metadata.page_count, + "Exposure metadata does not have correct count of pages." + ); + + Ok(()) + }) + .collect::>() + } + fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. let era = Self::active_era().unwrap().index; + + // cache era exposures to avoid too many db reads. + let era_exposures = T::SessionInterface::validators() + .iter() + .map(|v| Self::eras_stakers(era, v)) + .collect::>(); + >::iter() .filter_map( |(nominator, nomination)| { @@ -1850,9 +1964,8 @@ impl Pallet { // must be bonded. Self::ensure_is_stash(&nominator)?; let mut sum = BalanceOf::::zero(); - T::SessionInterface::validators() + era_exposures .iter() - .map(|v| Self::eras_stakers(era, v)) .map(|e| -> Result<(), TryRuntimeError> { let individual = e.others.iter().filter(|e| e.who == nominator).collect::>(); @@ -1868,6 +1981,14 @@ impl Pallet { Ok(()) }) .collect::, _>>()?; + + // We take total instead of active as the nominator might have requested to unbond + // some of their stake that is still exposed in the current era. + if sum <= Self::ledger(Stash(nominator.clone()))?.total { + // This can happen when there is a slash in the current era so we only warn. + log!(warn, "nominator stake exceeds what is bonded."); + } + Ok(()) }) .collect::, _>>()?; @@ -1882,7 +2003,8 @@ impl Pallet { fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), TryRuntimeError> { // ensures ledger.total == ledger.active + sum(ledger.unlocking). - let ledger = Self::ledger(ctrl.clone()).ok_or("Not a controller.")?; + let ledger = Self::ledger(StakingAccount::Controller(ctrl.clone()))?; + let real_total: BalanceOf = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); ensure!(real_total == ledger.total, "ledger.total corrupt"); diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 0bcf932d90b44d9233515bb312e470fcd4ef1480..a68e9c90da9e660e1d38f67c2ee44c509b324d75 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -24,9 +24,8 @@ use frame_election_provider_support::{ use frame_support::{ pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, - EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, TryCollect, - UnixTime, + Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, + LockableCurrency, OnUnbalanced, UnixTime, }, weights::Weight, BoundedVec, @@ -36,7 +35,11 @@ use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, ArithmeticError, Perbill, Percent, }; -use sp_staking::{EraIndex, SessionIndex}; + +use sp_staking::{ + EraIndex, Page, SessionIndex, + StakingAccount::{self, Controller, Stash}, +}; use sp_std::prelude::*; mod impls; @@ -45,12 +48,11 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, MaxNominationsOf, NegativeImbalanceOf, Nominations, - NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, - UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, ExposurePage, Forcing, MaxNominationsOf, NegativeImbalanceOf, + Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, + StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; -const STAKING_ID: LockIdentifier = *b"staking "; // The speculative number of spans are used as an input of the weight annotation of // [`Call::unbond`], as the post dipatch weight may depend on the number of slashing span on the // account which is not provided as an input. The value set should be conservative but sensible. @@ -60,12 +62,12 @@ pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; pub mod pallet { use frame_election_provider_support::ElectionDataProvider; - use crate::BenchmarkingConfig; + use crate::{BenchmarkingConfig, PagedExposureMetadata}; use super::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(13); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -137,8 +139,8 @@ pub mod pallet { /// Following information is kept for eras in `[current_era - /// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`, /// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`, - /// `ErasTotalStake`, `ErasStartSessionIndex`, - /// `StakingLedger.claimed_rewards`. + /// `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`, `ErasStakersPaged`, + /// `ErasStakersOverview`. /// /// Must be more than the number of eras delayed by session. /// I.e. active era must always be in history. I.e. `active_era > @@ -148,7 +150,7 @@ pub mod pallet { /// this should be set to same value or greater as in storage. /// /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` - /// item `StakingLedger.claimed_rewards`. Setting this value lower than + /// item `StakingLedger.legacy_claimed_rewards`. Setting this value lower than /// the existing value can lead to inconsistencies in the /// `StakingLedger` and will need to be handled properly in a migration. /// The test `reducing_history_depth_abrupt` shows this effect. @@ -201,12 +203,19 @@ pub mod pallet { /// guess. type NextNewSession: EstimateNextNewSession>; - /// The maximum number of nominators rewarded for each validator. + /// The maximum size of each `T::ExposurePage`. + /// + /// An `ExposurePage` is weakly bounded to a maximum of `MaxExposurePageSize` + /// nominators. + /// + /// For older non-paged exposure, a reward payout was restricted to the top + /// `MaxExposurePageSize` nominators. This is to limit the i/o cost for the + /// nominator payout. /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can - /// claim their reward. This used to limit the i/o cost for the nominator payout. + /// Note: `MaxExposurePageSize` is used to bound `ClaimedRewards` and is unsafe to reduce + /// without handling it in a migration. #[pallet::constant] - type MaxNominatorRewardedPerValidator: Get; + type MaxExposurePageSize: Get; /// The fraction of the validator set that is safe to be offending. /// After the threshold is reached a new era will be forced. @@ -295,7 +304,6 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - #[pallet::getter(fn bonded)] pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; /// The minimum active bond to become and maintain the role of a nominator. @@ -317,15 +325,16 @@ pub mod pallet { pub type MinCommission = StorageValue<_, Perbill, ValueQuery>; /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + /// + /// Note: All the reads and mutations to this storage *MUST* be done through the methods exposed + /// by [`StakingLedger`] to ensure data and lock consistency. #[pallet::storage] - #[pallet::getter(fn ledger)] pub type Ledger = StorageMap<_, Blake2_128Concat, T::AccountId, StakingLedger>; /// Where the reward payment should be made. Keyed by stash. /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - #[pallet::getter(fn payee)] pub type Payee = StorageMap<_, Twox64Concat, T::AccountId, RewardDestination, ValueQuery>; @@ -389,7 +398,7 @@ pub mod pallet { #[pallet::getter(fn active_era)] pub type ActiveEra = StorageValue<_, ActiveEraInfo>; - /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + /// The session index at which the era start for the last [`Config::HistoryDepth`] eras. /// /// Note: This tracks the starting session (i.e. session index when era start being active) /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. @@ -401,10 +410,11 @@ pub mod pallet { /// /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// Is it removed after [`Config::HistoryDepth`] eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] - #[pallet::getter(fn eras_stakers)] #[pallet::unbounded] pub type ErasStakers = StorageDoubleMap< _, @@ -416,17 +426,45 @@ pub mod pallet { ValueQuery, >; + /// Summary of validator exposure at a given era. + /// + /// This contains the total stake in support of the validator and their own stake. In addition, + /// it can also be used to get the number of nominators backing this validator and the number of + /// exposure pages they are divided into. The page count is useful to determine the number of + /// pages of rewards that needs to be claimed. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// Should only be accessed through `EraInfo`. + /// + /// Is it removed after [`Config::HistoryDepth`] eras. + /// If stakers hasn't been set or has been removed then empty overview is returned. + #[pallet::storage] + pub type ErasStakersOverview = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + PagedExposureMetadata>, + OptionQuery, + >; + /// Clipped Exposure of validator at era. /// + /// Note: This is deprecated, should be used as read-only and will be removed in the future. + /// New `Exposure`s are stored in a paged manner in `ErasStakersPaged` instead. + /// /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the - /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// `T::MaxExposurePageSize` biggest stakers. /// (Note: the field `total` and `own` of the exposure remains unchanged). /// This is used to limit the i/o cost for the nominator payout. /// /// This is keyed fist by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// It is removed after [`Config::HistoryDepth`] eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] #[pallet::unbounded] #[pallet::getter(fn eras_stakers_clipped)] @@ -440,11 +478,49 @@ pub mod pallet { ValueQuery, >; + /// Paginated exposure of a validator at given era. + /// + /// This is keyed first by the era index to allow bulk deletion, then stash account and finally + /// the page. Should only be accessed through `EraInfo`. + /// + /// This is cleared after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasStakersPaged = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + ExposurePage>, + OptionQuery, + >; + + /// History of claimed paged rewards by era and validator. + /// + /// This is keyed by era and validator stash which maps to the set of page indexes which have + /// been claimed. + /// + /// It is removed after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::getter(fn claimed_rewards)] + #[pallet::unbounded] + pub type ClaimedRewards = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Vec, + ValueQuery, + >; + /// Similar to `ErasStakers`, this holds the preferences of validators. /// /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// Is it removed after [`Config::HistoryDepth`] eras. // If prefs hasn't been set or has been removed then 0 commission is returned. #[pallet::storage] #[pallet::getter(fn eras_validator_prefs)] @@ -458,14 +534,14 @@ pub mod pallet { ValueQuery, >; - /// The total validator era payout for the last `HISTORY_DEPTH` eras. + /// The total validator era payout for the last [`Config::HistoryDepth`] eras. /// /// Eras that haven't finished yet or has been removed doesn't have reward. #[pallet::storage] #[pallet::getter(fn eras_validator_reward)] pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; - /// Rewards for the last `HISTORY_DEPTH` eras. + /// Rewards for the last [`Config::HistoryDepth`] eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] #[pallet::unbounded] @@ -473,7 +549,7 @@ pub mod pallet { pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; - /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// The total amount staked for the last [`Config::HistoryDepth`] eras. /// If total hasn't been set or has been removed then 0 stake is returned. #[pallet::storage] #[pallet::getter(fn eras_total_stake)] @@ -742,6 +818,8 @@ pub mod pallet { NotSortedAndUnique, /// Rewards for this era have already been claimed for this validator. AlreadyClaimed, + /// No nominators exist on this page. + InvalidPage, /// Incorrect previous history depth input provided. IncorrectHistoryDepth, /// Incorrect number of slashing spans provided. @@ -764,6 +842,8 @@ pub mod pallet { CommissionTooLow, /// Some bound is not met. BoundNotMet, + /// Used when attempting to use deprecated controller account logic. + ControllerDeprecated, } #[pallet::hooks] @@ -841,16 +921,11 @@ pub mod pallet { payee: RewardDestination, ) -> DispatchResult { let stash = ensure_signed(origin)?; - let controller_to_be_deprecated = stash.clone(); - if >::contains_key(&stash) { + if StakingLedger::::is_bonded(StakingAccount::Stash(stash.clone())) { return Err(Error::::AlreadyBonded.into()) } - if >::contains_key(&controller_to_be_deprecated) { - return Err(Error::::AlreadyPaired.into()) - } - // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { return Err(Error::::InsufficientBond.into()) @@ -858,31 +933,15 @@ pub mod pallet { frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; - // You're auto-bonded forever, here. We might improve this by only bonding when - // you actually validate/nominate and remove once you unbond __everything__. - >::insert(&stash, &stash); - >::insert(&stash, payee); - - let current_era = CurrentEra::::get().unwrap_or(0); - let history_depth = T::HistoryDepth::get(); - let last_reward_era = current_era.saturating_sub(history_depth); - let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); - let item = StakingLedger { - stash: stash.clone(), - total: value, - active: value, - unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era) - .try_collect() - // Since last_reward_era is calculated as `current_era - - // HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, - }; - Self::update_ledger(&controller_to_be_deprecated, &item); + let ledger = StakingLedger::::new(stash.clone(), value); + + // You're auto-bonded forever, here. We might improve this by only bonding when + // you actually validate/nominate and remove once you unbond __everything__. + ledger.bond(payee)?; + Ok(()) } @@ -908,8 +967,7 @@ pub mod pallet { ) -> DispatchResult { let stash = ensure_signed(origin)?; - let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; let stash_balance = T::Currency::free_balance(&stash); if let Some(extra) = stash_balance.checked_sub(&ledger.total) { @@ -923,11 +981,10 @@ pub mod pallet { ); // NOTE: ledger must be updated prior to calling `Self::weight_of`. - Self::update_ledger(&controller, &ledger); + ledger.update()?; // update this staker in the sorted list, if they exist in it. if T::VoterList::contains(&stash) { - let _ = - T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); + let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)).defensive(); } Self::deposit_event(Event::::Bonded { stash, amount: extra }); @@ -963,9 +1020,8 @@ pub mod pallet { #[pallet::compact] value: BalanceOf, ) -> DispatchResultWithPostInfo { let controller = ensure_signed(origin)?; - let unlocking = Self::ledger(&controller) - .map(|l| l.unlocking.len()) - .ok_or(Error::::NotController)?; + let unlocking = + Self::ledger(Controller(controller.clone())).map(|l| l.unlocking.len())?; // if there are no unlocking chunks available, try to withdraw chunks older than // `BondingDuration` to proceed with the unbonding. @@ -981,8 +1037,9 @@ pub mod pallet { // we need to fetch the ledger again because it may have been mutated in the call // to `Self::do_withdraw_unbonded` above. - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let mut ledger = Self::ledger(Controller(controller))?; let mut value = value.min(ledger.active); + let stash = ledger.stash.clone(); ensure!( ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, @@ -998,9 +1055,9 @@ pub mod pallet { ledger.active = Zero::zero(); } - let min_active_bond = if Nominators::::contains_key(&ledger.stash) { + let min_active_bond = if Nominators::::contains_key(&stash) { MinNominatorBond::::get() - } else if Validators::::contains_key(&ledger.stash) { + } else if Validators::::contains_key(&stash) { MinValidatorBond::::get() } else { Zero::zero() @@ -1024,15 +1081,14 @@ pub mod pallet { .map_err(|_| Error::::NoMoreChunks)?; }; // NOTE: ledger must be updated prior to calling `Self::weight_of`. - Self::update_ledger(&controller, &ledger); + ledger.update()?; // update this staker in the sorted list, if they exist in it. - if T::VoterList::contains(&ledger.stash) { - let _ = T::VoterList::on_update(&ledger.stash, Self::weight_of(&ledger.stash)) - .defensive(); + if T::VoterList::contains(&stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)).defensive(); } - Self::deposit_event(Event::::Unbonded { stash: ledger.stash, amount: value }); + Self::deposit_event(Event::::Unbonded { stash, amount: value }); } let actual_weight = if let Some(withdraw_weight) = maybe_withdraw_weight { @@ -1089,7 +1145,7 @@ pub mod pallet { pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let ledger = Self::ledger(Controller(controller))?; ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); let stash = &ledger.stash; @@ -1135,7 +1191,8 @@ pub mod pallet { ) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); let stash = &ledger.stash; @@ -1202,7 +1259,9 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + + let ledger = Self::ledger(StakingAccount::Controller(controller))?; + Self::chill_stash(&ledger.stash); Ok(()) } @@ -1226,9 +1285,20 @@ pub mod pallet { payee: RewardDestination, ) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - >::insert(stash, payee); + let ledger = Self::ledger(Controller(controller.clone()))?; + + ensure!( + (payee != { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::ControllerDeprecated + ); + + let _ = ledger + .set_payee(payee) + .defensive_proof("ledger was retrieved from storage, thus its bonded; qed.")?; + Ok(()) } @@ -1250,18 +1320,24 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_controller())] pub fn set_controller(origin: OriginFor) -> DispatchResult { let stash = ensure_signed(origin)?; - let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - if >::contains_key(&stash) { - return Err(Error::::AlreadyPaired.into()) - } - if old_controller != stash { - >::insert(&stash, &stash); - if let Some(l) = >::take(&old_controller) { - >::insert(&stash, l); + // the bonded map and ledger are mutated directly as this extrinsic is related to a + // (temporary) passive migration. + Self::ledger(StakingAccount::Stash(stash.clone())).map(|ledger| { + let controller = ledger.controller() + .defensive_proof("ledger was fetched used the StakingInterface, so controller field must exist; qed.") + .ok_or(Error::::NotController)?; + + if controller == stash { + // stash is already its own controller. + return Err(Error::::AlreadyPaired.into()) } - } - Ok(()) + // update bond and ledger. + >::remove(controller); + >::insert(&stash, &stash); + >::insert(&stash, ledger); + Ok(()) + })? } /// Sets the ideal number of validators. @@ -1409,11 +1485,9 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - // Remove all staking-related information. + // Remove all staking-related information and lock. Self::kill_stash(&stash, num_slashing_spans)?; - // Remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); Ok(()) } @@ -1464,21 +1538,21 @@ pub mod pallet { Ok(()) } - /// Pay out all the stakers behind a single validator for a single era. + /// Pay out next page of the stakers behind a validator for the given era. /// - /// - `validator_stash` is the stash account of the validator. Their nominators, up to - /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. + /// - `validator_stash` is the stash account of the validator. /// - `era` may be any era between `[current_era - history_depth; current_era]`. /// /// The origin of this call must be _Signed_. Any account can call this function, even if /// it is not one of the stakers. /// - /// ## Complexity - /// - At most O(MaxNominatorRewardedPerValidator). + /// The reward payout could be paged in case there are too many nominators backing the + /// `validator_stash`. This call will payout unpaid pages in an ascending order. To claim a + /// specific page, use `payout_stakers_by_page`.` + /// + /// If all pages are claimed, it returns an error `InvalidPage`. #[pallet::call_index(18)] - #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked( - T::MaxNominatorRewardedPerValidator::get() - ))] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] pub fn payout_stakers( origin: OriginFor, validator_stash: T::AccountId, @@ -1502,7 +1576,7 @@ pub mod pallet { #[pallet::compact] value: BalanceOf, ) -> DispatchResultWithPostInfo { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let ledger = Self::ledger(Controller(controller))?; ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let initial_unlocking = ledger.unlocking.len() as u32; @@ -1515,16 +1589,18 @@ pub mod pallet { amount: rebonded_value, }); + let stash = ledger.stash.clone(); + let final_unlocking = ledger.unlocking.len(); + // NOTE: ledger must be updated prior to calling `Self::weight_of`. - Self::update_ledger(&controller, &ledger); - if T::VoterList::contains(&ledger.stash) { - let _ = T::VoterList::on_update(&ledger.stash, Self::weight_of(&ledger.stash)) - .defensive(); + ledger.update()?; + if T::VoterList::contains(&stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)).defensive(); } let removed_chunks = 1u32 // for the case where the last iterated chunk is not removed .saturating_add(initial_unlocking) - .saturating_sub(ledger.unlocking.len() as u32); + .saturating_sub(final_unlocking as u32); Ok(Some(T::WeightInfo::rebond(removed_chunks)).into()) } @@ -1556,13 +1632,11 @@ pub mod pallet { let ed = T::Currency::minimum_balance(); let reapable = T::Currency::total_balance(&stash) < ed || - Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) - .map(|l| l.total) - .unwrap_or_default() < ed; + Self::ledger(Stash(stash.clone())).map(|l| l.total).unwrap_or_default() < ed; ensure!(reapable, Error::::FundedTarget); + // Remove all staking-related information and lock. Self::kill_stash(&stash, num_slashing_spans)?; - T::Currency::remove_lock(STAKING_ID, &stash); Ok(Pays::No.into()) } @@ -1582,7 +1656,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let ledger = Self::ledger(Controller(controller))?; let stash = &ledger.stash; for nom_stash in who @@ -1691,7 +1765,7 @@ pub mod pallet { pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { // Anyone can call this function. let caller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let ledger = Self::ledger(Controller(controller.clone()))?; let stash = ledger.stash; // In order for one user to chill another user, the following conditions must be met: @@ -1780,6 +1854,65 @@ pub mod pallet { MinCommission::::put(new); Ok(()) } + + /// Pay out a page of the stakers behind a validator for the given era and page. + /// + /// - `validator_stash` is the stash account of the validator. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// - `page` is the page index of nominators to pay out with value between 0 and + /// `num_nominators / T::MaxExposurePageSize`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// If a validator has more than [`Config::MaxExposurePageSize`] nominators backing + /// them, then the list of nominators is paged, with each page being capped at + /// [`Config::MaxExposurePageSize`.] If a validator has more than one page of nominators, + /// the call needs to be made for each page separately in order for all the nominators + /// backing a validator to receive the reward. The nominators are not sorted across pages + /// and so it should not be assumed the highest staker would be on the topmost page and vice + /// versa. If rewards are not claimed in [`Config::HistoryDepth`] eras, they are lost. + #[pallet::call_index(26)] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] + pub fn payout_stakers_by_page( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + page: Page, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + Self::do_payout_stakers_by_page(validator_stash, era, page) + } + + /// Migrates an account's `RewardDestination::Controller` to + /// `RewardDestination::Account(controller)`. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// This will waive the transaction fee if the `payee` is successfully migrated. + #[pallet::call_index(27)] + #[pallet::weight(T::WeightInfo::update_payee())] + pub fn update_payee( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + + ensure!( + (Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::NotController + ); + + let _ = ledger + .set_payee(RewardDestination::Account(controller)) + .defensive_proof("ledger should have been previously retrieved from storage.")?; + + Ok(Pays::No.into()) + } } } diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index bb02da73f6e5d5ddb96f23f61a7ac4ca463e0f03..0d84d503733e6451feb69f287d69f8655fe0b20e 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -597,15 +597,11 @@ pub fn do_slash( slashed_imbalance: &mut NegativeImbalanceOf, slash_era: EraIndex, ) { - let controller = match >::bonded(stash).defensive() { - None => return, - Some(c) => c, - }; - - let mut ledger = match >::ledger(&controller) { - Some(ledger) => ledger, - None => return, // nothing to do. - }; + let mut ledger = + match Pallet::::ledger(sp_staking::StakingAccount::Stash(stash.clone())).defensive() { + Ok(ledger) => ledger, + Err(_) => return, // nothing to do. + }; let value = ledger.slash(value, T::Currency::minimum_balance(), slash_era); @@ -618,7 +614,9 @@ pub fn do_slash( *reward_payout = reward_payout.saturating_sub(missing); } - >::update_ledger(&controller, &ledger); + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage so it exists in storage; qed."); // trigger the event >::deposit_event(super::Event::::Slashed { diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 78183cfde9296bdec25327b39c8fd6691a403296..7d967609f520256573ff85891cd885304dae4f09 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -18,6 +18,7 @@ //! Tests for the module. use super::{ConfigOp, Event, *}; +use crate::ledger::StakingLedgerInspect; use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBoundsBuilder}, ElectionProvider, SortedListProvider, Support, @@ -28,12 +29,13 @@ use frame_support::{ pallet_prelude::*, traits::{Currency, Get, ReservableCurrency}, }; + use mock::*; use pallet_balances::Error as BalancesError; use sp_runtime::{ assert_eq_error_rate, bounded_vec, traits::{BadOrigin, Dispatchable}, - Perbill, Percent, Rounding, TokenError, + Perbill, Percent, Perquintill, Rounding, TokenError, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -151,28 +153,28 @@ fn basic_setup_works() { // Account 11 controls its own stash, which is 100 * balance_factor units assert_eq!( - Staking::ledger(&11).unwrap(), - StakingLedger { + Ledger::get(&11).unwrap(), + StakingLedgerInspect:: { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Account 21 controls its own stash, which is 200 * balance_factor units assert_eq!( - Staking::ledger(&21), - Some(StakingLedger { + Ledger::get(&21).unwrap(), + StakingLedgerInspect:: { stash: 21, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Account 1 does not control any stash - assert_eq!(Staking::ledger(&1), None); + assert!(Staking::ledger(1.into()).is_err()); // ValidatorPrefs are default assert_eq_uvec!( @@ -185,19 +187,19 @@ fn basic_setup_works() { ); assert_eq!( - Staking::ledger(101), - Some(StakingLedger { + Staking::ledger(101.into()).unwrap(), + StakingLedgerInspect { stash: 101, total: 500, active: 500, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1125, own: 1000, @@ -205,7 +207,7 @@ fn basic_setup_works() { }, ); assert_eq!( - Staking::eras_stakers(active_era(), 21), + Staking::eras_stakers(active_era(), &21), Exposure { total: 1375, own: 1000, @@ -265,7 +267,7 @@ fn change_controller_works() { #[test] fn change_controller_already_paired_once_stash() { ExtBuilder::default().build_and_execute(|| { - // 10 and 11 are bonded as controller and stash respectively. + // 11 and 11 are bonded as controller and stash respectively. assert_eq!(Staking::bonded(&11), Some(11)); // 11 is initially a validator. @@ -296,9 +298,9 @@ fn rewards_should_work() { let init_balance_101 = Balances::total_balance(&101); // Set payees - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(21, RewardDestination::Account(21)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 50)]); Pallet::::reward_by_ids(vec![(11, 50)]); @@ -415,7 +417,7 @@ fn staking_should_work() { // --- Block 2: start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Account(3))); assert_ok!(Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(3), @@ -460,14 +462,14 @@ fn staking_should_work() { // Note: the stashed value of 4 is still lock assert_eq!( - Staking::ledger(&3), - Some(StakingLedger { + Staking::ledger(3.into()).unwrap(), + StakingLedgerInspect { stash: 3, total: 1500, active: 1500, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // e.g. it cannot reserve more than 500 that it has free from the total 2000 assert_noop!(Balances::reserve(&3, 501), BalancesError::::LiquidityRestrictions); @@ -521,7 +523,7 @@ fn less_than_needed_candidates_works() { // But the exposure is updated in a simple way. No external votes exists. // This is purely self-vote. - assert!(ErasStakers::::iter_prefix_values(active_era()) + assert!(ErasStakersPaged::::iter_prefix_values((active_era(),)) .all(|exposure| exposure.others.is_empty())); }); } @@ -583,22 +585,10 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::validate(RuntimeOrigin::signed(31), Default::default())); // Set payee to controller. - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(21), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(31), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(41), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(21), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(31), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(41), RewardDestination::Stash)); // give the man some money let initial_balance = 1000; @@ -610,14 +600,14 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![11, 21, 41])); @@ -639,9 +629,9 @@ fn nominating_and_rewards_should_work() { assert_eq!(Balances::total_balance(&21), initial_balance_21 + total_payout_0 / 2); initial_balance_21 = Balances::total_balance(&21); - assert_eq!(ErasStakers::::iter_prefix_values(active_era()).count(), 2); + assert_eq!(ErasStakersPaged::::iter_prefix_values((active_era(),)).count(), 2); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 800, own: 1000, @@ -652,7 +642,7 @@ fn nominating_and_rewards_should_work() { }, ); assert_eq!( - Staking::eras_stakers(active_era(), 21), + Staking::eras_stakers(active_era(), &21), Exposure { total: 1000 + 1200, own: 1000, @@ -712,14 +702,14 @@ fn nominators_also_get_slashed_pro_rata() { ExtBuilder::default().build_and_execute(|| { mock::start_active_era(1); let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), 11); + let initial_exposure = Staking::eras_stakers(active_era(), &11); // 101 is a nominator for 11 assert_eq!(initial_exposure.others.first().unwrap().who, 101); // staked values; - let nominator_stake = Staking::ledger(101).unwrap().active; + let nominator_stake = Staking::ledger(101.into()).unwrap().active; let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11).unwrap().active; + let validator_stake = Staking::ledger(11.into()).unwrap().active; let validator_balance = balances(&11).0; let exposed_stake = initial_exposure.total; let exposed_validator = initial_exposure.own; @@ -732,8 +722,8 @@ fn nominators_also_get_slashed_pro_rata() { ); // both stakes must have been decreased. - assert!(Staking::ledger(101).unwrap().active < nominator_stake); - assert!(Staking::ledger(11).unwrap().active < validator_stake); + assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); + assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); let slash_amount = slash_percent * exposed_stake; let validator_share = @@ -746,8 +736,8 @@ fn nominators_also_get_slashed_pro_rata() { assert!(nominator_share > 0); // both stakes must have been decreased pro-rata. - assert_eq!(Staking::ledger(101).unwrap().active, nominator_stake - nominator_share); - assert_eq!(Staking::ledger(11).unwrap().active, validator_stake - validator_share); + assert_eq!(Staking::ledger(101.into()).unwrap().active, nominator_stake - nominator_share); + assert_eq!(Staking::ledger(11.into()).unwrap().active, validator_stake - validator_share); assert_eq!( balances(&101).0, // free balance nominator_balance - nominator_share, @@ -980,7 +970,7 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::transfer_allow_death(RuntimeOrigin::signed(11), 21, 1), @@ -1005,7 +995,7 @@ fn cannot_transfer_staked_balance_2() { // Confirm account 21 has some free balance assert_eq!(Balances::free_balance(21), 2000); // Confirm account 21 (via controller) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( Balances::transfer_allow_death(RuntimeOrigin::signed(21), 21, 1001), @@ -1024,7 +1014,7 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Confirm account 11 cannot reserve as a result assert_noop!(Balances::reserve(&11, 1), BalancesError::::LiquidityRestrictions); @@ -1047,14 +1037,14 @@ fn reward_destination_works() { assert_eq!(Balances::free_balance(11), 1000); // Check how much is at stake assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Compute total payout now for whole duration as other parameter won't change @@ -1065,21 +1055,24 @@ fn reward_destination_works() { mock::make_all_reward_payment(0); // Check that RewardDestination is Staked (default) - assert_eq!(Staking::payee(&11), RewardDestination::Staked); + assert_eq!(Staking::payee(11.into()), RewardDestination::Staked); // Check that reward went to the stash account of validator assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); // Check that amount at stake increased accordingly assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], - }) + legacy_claimed_rewards: bounded_vec![], + } ); + // (era 0, page 0) is claimed + assert_eq!(Staking::claimed_rewards(0, &11), vec![0]); + // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); @@ -1091,23 +1084,28 @@ fn reward_destination_works() { mock::make_all_reward_payment(1); // Check that RewardDestination is Stash - assert_eq!(Staking::payee(&11), RewardDestination::Stash); + assert_eq!(Staking::payee(11.into()), RewardDestination::Stash); // Check that reward went to the stash account assert_eq!(Balances::free_balance(11), 1000 + total_payout_0 + total_payout_1); + // Record this value + let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; // Check that amount at stake is NOT increased assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1], - }) + legacy_claimed_rewards: bounded_vec![], + } ); - // Change RewardDestination to Controller - >::insert(&11, RewardDestination::Controller); + // (era 1, page 0) is claimed + assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); + + // Change RewardDestination to Account + >::insert(&11, RewardDestination::Account(11)); // Check controller balance assert_eq!(Balances::free_balance(11), 23150); @@ -1119,21 +1117,24 @@ fn reward_destination_works() { mock::start_active_era(3); mock::make_all_reward_payment(2); - // Check that RewardDestination is Controller - assert_eq!(Staking::payee(&11), RewardDestination::Controller); + // Check that RewardDestination is Account(11) + assert_eq!(Staking::payee(11.into()), RewardDestination::Account(11)); // Check that reward went to the controller account - assert_eq!(Balances::free_balance(11), 23150 + total_payout_2); + assert_eq!(Balances::free_balance(11), recorded_stash_balance + total_payout_2); // Check that amount at stake is NOT increased assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1, 2], - }) + legacy_claimed_rewards: bounded_vec![], + } ); + + // (era 2, page 0) is claimed + assert_eq!(Staking::claimed_rewards(2, &11), vec![0]); }); } @@ -1146,9 +1147,9 @@ fn validator_payment_prefs_work() { let commission = Perbill::from_percent(40); >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); - // Reward controller so staked ratio doesn't change. - >::insert(&11, RewardDestination::Controller); - >::insert(&101, RewardDestination::Controller); + // Reward stash so staked ratio doesn't change. + >::insert(&11, RewardDestination::Stash); + >::insert(&101, RewardDestination::Stash); mock::start_active_era(1); mock::make_all_reward_payment(0); @@ -1158,7 +1159,7 @@ fn validator_payment_prefs_work() { // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - let exposure_1 = Staking::eras_stakers(active_era(), 11); + let exposure_1 = Staking::eras_stakers(active_era(), &11); Pallet::::reward_by_ids(vec![(11, 1)]); mock::start_active_era(2); @@ -1185,14 +1186,14 @@ fn bond_extra_works() { assert_eq!(Staking::bonded(&11), Some(11)); // Check how much is at stake assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Give account 11 some large free balance greater than total @@ -1202,28 +1203,28 @@ fn bond_extra_works() { assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 100)); // There should be 100 more `total` and `active` in the ledger assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Call the bond_extra function with a large number, should handle it assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), Balance::max_value())); // The full amount of the funds should now be in the total and active assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000000, active: 1000000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); }); } @@ -1237,8 +1238,8 @@ fn bond_extra_and_withdraw_unbonded_works() { // * it can unbond a portion of its funds from the stash account. // * Once the unbonding period is done, it can actually take the funds out of the stash. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1254,17 +1255,17 @@ fn bond_extra_and_withdraw_unbonded_works() { // Initial state of 11 assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000, own: 1000, others: vec![] } ); @@ -1272,18 +1273,18 @@ fn bond_extra_and_withdraw_unbonded_works() { Staking::bond_extra(RuntimeOrigin::signed(11), 100).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Exposure is a snapshot! only updated after the next era update. assert_ne!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } ); @@ -1293,45 +1294,45 @@ fn bond_extra_and_withdraw_unbonded_works() { // ledger should be the same. assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Exposure is now updated. assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } ); // Unbond almost all of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 1000).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], - }), + legacy_claimed_rewards: bounded_vec![], + }, ); // Attempting to free the balances now will fail. 2 eras need to pass. assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], - }), + legacy_claimed_rewards: bounded_vec![], + }, ); // trigger next era. @@ -1340,14 +1341,14 @@ fn bond_extra_and_withdraw_unbonded_works() { // nothing yet assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], - }), + legacy_claimed_rewards: bounded_vec![], + }, ); // trigger next era. @@ -1356,14 +1357,14 @@ fn bond_extra_and_withdraw_unbonded_works() { assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); // Now the value is free and the staking ledger is updated. assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 100, active: 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }), + legacy_claimed_rewards: bounded_vec![], + }, ); }) } @@ -1390,7 +1391,7 @@ fn many_unbond_calls_should_work() { // `BondingDuration` == 3). assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1)); assert_eq!( - Staking::ledger(&11).map(|l| l.unlocking.len()).unwrap(), + Staking::ledger(11.into()).map(|l| l.unlocking.len()).unwrap(), <::MaxUnlockingChunks as Get>::get() as usize ); @@ -1405,7 +1406,7 @@ fn many_unbond_calls_should_work() { // only slots within last `BondingDuration` are filled. assert_eq!( - Staking::ledger(&11).map(|l| l.unlocking.len()).unwrap(), + Staking::ledger(11.into()).map(|l| l.unlocking.len()).unwrap(), <::BondingDuration>::get() as usize ); }) @@ -1448,8 +1449,8 @@ fn rebond_works() { // * it can unbond a portion of its funds from the stash account. // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1459,14 +1460,14 @@ fn rebond_works() { // Initial state of 11 assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(2); @@ -1478,66 +1479,66 @@ fn rebond_works() { // Unbond almost all of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 2 + 3 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond all the funds unbonded. Staking::rebond(RuntimeOrigin::signed(11), 900).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Unbond almost all of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond part of the funds unbonded. Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond the remainder of the funds unbonded. Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Unbond parts of the funds in stash. @@ -1545,27 +1546,27 @@ fn rebond_works() { Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond part of the funds unbonded. Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); }) } @@ -1574,8 +1575,8 @@ fn rebond_works() { fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1585,14 +1586,14 @@ fn rebond_is_fifo() { // Initial state of 10 assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(2); @@ -1600,14 +1601,14 @@ fn rebond_is_fifo() { // Unbond some of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 400).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 2 + 3 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(3); @@ -1615,8 +1616,8 @@ fn rebond_is_fifo() { // Unbond more of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 300, @@ -1624,8 +1625,8 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(4); @@ -1633,8 +1634,8 @@ fn rebond_is_fifo() { // Unbond yet more of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 200).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 100, @@ -1643,15 +1644,15 @@ fn rebond_is_fifo() { UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, ], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond half of the unbonding funds. Staking::rebond(RuntimeOrigin::signed(11), 400).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 500, @@ -1659,8 +1660,8 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); }) } @@ -1670,8 +1671,8 @@ fn rebond_emits_right_value_in_event() { // When a user calls rebond with more than can be rebonded, things succeed, // and the rebond event emits the actual value rebonded. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1682,27 +1683,27 @@ fn rebond_emits_right_value_in_event() { // Unbond almost all of the funds in stash. Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Re-bond less than the total Staking::rebond(RuntimeOrigin::signed(11), 100).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 200, unlocking: bounded_vec![UnlockChunk { value: 800, era: 1 + 3 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Event emitted should be correct assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 100 }); @@ -1710,14 +1711,14 @@ fn rebond_emits_right_value_in_event() { // Re-bond way more than available Staking::rebond(RuntimeOrigin::signed(11), 100_000).unwrap(); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); // Event emitted should be correct, only 800 assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 800 }); @@ -1736,23 +1737,23 @@ fn reward_to_stake_works() { // Confirm account 10 and 20 are validators assert!(>::contains_key(&11) && >::contains_key(&21)); - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 2000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); // Give the man some money. let _ = Balances::make_free_balance_be(&10, 1000); let _ = Balances::make_free_balance_be(&20, 1000); // Bypass logic and change current exposure - ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); + EraInfo::::set_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] }); >::insert( &20, - StakingLedger { + StakingLedgerInspect { stash: 21, total: 69, active: 69, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); @@ -1765,21 +1766,18 @@ fn reward_to_stake_works() { mock::start_active_era(1); mock::make_all_reward_payment(0); - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 2000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); let _11_balance = Balances::free_balance(&11); - let _21_balance = Balances::free_balance(&21); - assert_eq!(_11_balance, 1000 + total_payout_0 / 2); - assert_eq!(_21_balance, 2000 + total_payout_0 / 2); // Trigger another new era as the info are frozen before the era start. mock::start_active_era(2); // -- new infos - assert_eq!(Staking::eras_stakers(active_era(), 11).total, _11_balance); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, _21_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000 + total_payout_0 / 2); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000 + total_payout_0 / 2); }); } @@ -1806,16 +1804,7 @@ fn reap_stash_works() { // no easy way to cause an account to go below ED, we tweak their staking ledger // instead. - Ledger::::insert( - 11, - StakingLedger { - stash: 11, - total: 5, - active: 5, - unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }, - ); + Ledger::::insert(11, StakingLedger::::new(11, 5)); // reap-able assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); @@ -1835,10 +1824,7 @@ fn switching_roles() { ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination for i in &[11, 21] { - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(*i), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(*i), RewardDestination::Stash)); } assert_eq_uvec!(validator_controllers(), vec![21, 11]); @@ -1849,14 +1835,14 @@ fn switching_roles() { } // add 2 nominators - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Account(1))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 5])); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Account(3))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 1])); // add a new validator candidate - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Account(5))); assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(5), @@ -1909,8 +1895,8 @@ fn wrong_vote_is_moot() { assert_eq_uvec!(validator_controllers(), vec![21, 11]); // our new voter is taken into account - assert!(Staking::eras_stakers(active_era(), 11).others.iter().any(|i| i.who == 61)); - assert!(Staking::eras_stakers(active_era(), 21).others.iter().any(|i| i.who == 61)); + assert!(Staking::eras_stakers(active_era(), &11).others.iter().any(|i| i.who == 61)); + assert!(Staking::eras_stakers(active_era(), &21).others.iter().any(|i| i.who == 61)); }); } @@ -1927,24 +1913,24 @@ fn bond_with_no_staked_value() { .build_and_execute(|| { // Can't bond with 1 assert_noop!( - Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller), + Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1)), Error::::InsufficientBond, ); // bonded with absolute minimum value possible. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Account(1))); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. assert_ok!(Staking::unbond(RuntimeOrigin::signed(1), 1)); assert_eq!( - Staking::ledger(1), - Some(StakingLedger { + Staking::ledger(1.into()).unwrap(), + StakingLedgerInspect { stash: 1, active: 0, total: 5, unlocking: bounded_vec![UnlockChunk { value: 5, era: 3 }], - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(1); @@ -1952,14 +1938,14 @@ fn bond_with_no_staked_value() { // not yet removed. assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0)); - assert!(Staking::ledger(1).is_some()); + assert!(Staking::ledger(1.into()).is_ok()); assert_eq!(Balances::locks(&1)[0].amount, 5); mock::start_active_era(3); // poof. Account 1 is removed from the staking system. assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0)); - assert!(Staking::ledger(1).is_none()); + assert!(Staking::ledger(1.into()).is_err()); assert_eq!(Balances::locks(&1).len(), 0); }); } @@ -1973,15 +1959,12 @@ fn bond_with_little_staked_value_bounded() { .build_and_execute(|| { // setup assert_ok!(Staking::chill(RuntimeOrigin::signed(31))); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); let init_balance_1 = Balances::free_balance(&1); let init_balance_11 = Balances::free_balance(&11); // Stingy validator. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1))); assert_ok!(Staking::validate(RuntimeOrigin::signed(1), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(1), @@ -1997,9 +1980,9 @@ fn bond_with_little_staked_value_bounded() { mock::start_active_era(1); mock::make_all_reward_payment(0); - // 2 is elected. + // 1 is elected. assert_eq_uvec!(validator_controllers(), vec![21, 11, 1]); - assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), &2).total, 0); // Old ones are rewarded. assert_eq_error_rate!( @@ -2017,7 +2000,7 @@ fn bond_with_little_staked_value_bounded() { mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![21, 11, 1]); - assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), &2).total, 0); // 2 is now rewarded. assert_eq_error_rate!( @@ -2044,7 +2027,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { // ensure all have equal stake. assert_eq!( >::iter() - .map(|(v, _)| (v, Staking::ledger(v).unwrap().total)) + .map(|(v, _)| (v, Staking::ledger(v.into()).unwrap().total)) .collect::>(), vec![(31, 1000), (21, 1000), (11, 1000)], ); @@ -2060,14 +2043,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 31])); @@ -2096,7 +2079,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { // ensure all have equal stake. assert_eq!( >::iter() - .map(|(v, _)| (v, Staking::ledger(v).unwrap().total)) + .map(|(v, _)| (v, Staking::ledger(v.into()).unwrap().total)) .collect::>(), vec![(31, 1000), (21, 1000), (11, 1000)], ); @@ -2113,14 +2096,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21])); @@ -2168,8 +2151,8 @@ fn phragmen_should_not_overflow() { assert_eq_uvec!(validator_controllers(), vec![3, 5]); // We can safely convert back to values within [u64, u128]. - assert!(Staking::eras_stakers(active_era(), 3).total > Votes::max_value() as Balance); - assert!(Staking::eras_stakers(active_era(), 5).total > Votes::max_value() as Balance); + assert!(Staking::eras_stakers(active_era(), &3).total > Votes::max_value() as Balance); + assert!(Staking::eras_stakers(active_era(), &5).total > Votes::max_value() as Balance); }) } @@ -2193,10 +2176,9 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Check reward ErasRewardPoints::::insert(0, reward); - ErasStakers::::insert(0, 11, &exposure); - ErasStakersClipped::::insert(0, 11, exposure); + EraInfo::::set_exposure(0, &11, exposure); ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -2206,9 +2188,9 @@ fn reward_validator_slashing_validator_does_not_overflow() { // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 - ErasStakers::::insert( + EraInfo::::set_exposure( 0, - 11, + &11, Exposure { total: stake, own: 1, @@ -2219,7 +2201,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Check slashing on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -2318,7 +2300,7 @@ fn offence_forces_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2336,7 +2318,7 @@ fn offence_ensures_new_era_without_clobbering() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2354,7 +2336,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2375,7 +2357,7 @@ fn slashing_performed_according_exposure() { // This test checks that slashing is performed according the exposure (or more precisely, // historical exposure), not the current balance. ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Handle an offence with a historical exposure. on_offence_now( @@ -2401,7 +2383,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2424,7 +2406,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2440,7 +2422,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], // NOTE: A 100% slash here would clean up the account, causing de-registration. @@ -2467,11 +2449,11 @@ fn reporters_receive_their_slice() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1, 2], }], &[Perbill::from_percent(50)], @@ -2494,11 +2476,11 @@ fn subsequent_reports_in_same_span_pay_out_less() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }], &[Perbill::from_percent(20)], @@ -2511,7 +2493,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }], &[Perbill::from_percent(50)], @@ -2533,7 +2515,7 @@ fn invulnerables_are_not_slashed() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); - let exposure = Staking::eras_stakers(active_era(), 21); + let exposure = Staking::eras_stakers(active_era(), &21); let initial_balance = Staking::slashable_balance_of(&21); let nominator_balances: Vec<_> = @@ -2542,11 +2524,11 @@ fn invulnerables_are_not_slashed() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }, OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }, ], @@ -2576,7 +2558,7 @@ fn dont_slash_if_fraction_is_zero() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2597,7 +2579,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(50)], @@ -2609,7 +2591,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(25)], @@ -2620,7 +2602,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(60)], @@ -2642,7 +2624,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2654,7 +2636,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -2691,12 +2673,15 @@ fn garbage_collection_on_window_pruning() { assert_eq!(Balances::free_balance(11), 1000); let now = active_era(); - let exposure = Staking::eras_stakers(now, 11); + let exposure = Staking::eras_stakers(now, &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( - &[OffenceDetails { offender: (11, Staking::eras_stakers(now, 11)), reporters: vec![] }], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(now, &11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2731,14 +2716,14 @@ fn slashing_nominators_by_span_max() { assert_eq!(Balances::free_balance(101), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); - let exposure_11 = Staking::eras_stakers(active_era(), 11); - let exposure_21 = Staking::eras_stakers(active_era(), 21); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2765,7 +2750,7 @@ fn slashing_nominators_by_span_max() { // second slash: higher era, higher value, same span. on_offence_in_era( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(30)], @@ -2787,7 +2772,7 @@ fn slashing_nominators_by_span_max() { // in-era value, but lower slash value than slash 2. on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(20)], @@ -2822,7 +2807,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2845,7 +2830,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2869,7 +2854,7 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2877,7 +2862,7 @@ fn deferred_slashes_are_deferred() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2928,7 +2913,7 @@ fn retroactive_deferred_slashes_two_eras_before() { assert_eq!(BondingDuration::get(), 3); mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); mock::start_active_era(3); @@ -2964,7 +2949,7 @@ fn retroactive_deferred_slashes_one_before() { assert_eq!(BondingDuration::get(), 3); mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); // unbond at slash era. mock::start_active_era(2); @@ -2982,7 +2967,7 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(4); - assert_eq!(Staking::ledger(11).unwrap().total, 1000); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000); // slash happens after the next line. mock::start_active_era(5); @@ -2997,9 +2982,9 @@ fn retroactive_deferred_slashes_one_before() { )); // their ledger has already been slashed. - assert_eq!(Staking::ledger(11).unwrap().total, 900); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 900); assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); - assert_eq!(Staking::ledger(11).unwrap().total, 900); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 900); }) } @@ -3012,12 +2997,12 @@ fn staker_cannot_bail_deferred_slash() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -3032,11 +3017,11 @@ fn staker_cannot_bail_deferred_slash() { assert_eq!( Ledger::::get(101).unwrap(), - StakingLedger { + StakingLedgerInspect { active: 0, total: 500, stash: 101, - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], unlocking: bounded_vec![UnlockChunk { era: 4u32, value: 500 }], } ); @@ -3086,7 +3071,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -3162,7 +3147,7 @@ fn remove_multi_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -3172,7 +3157,7 @@ fn remove_multi_deferred() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -3527,8 +3512,8 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let part_for_101 = Perbill::from_rational::(125, 1125); // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change @@ -3537,8 +3522,9 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { mock::start_active_era(1); Pallet::::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout + // Increase total token issuance to affect the total payout. let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_1 != total_payout_0); @@ -3546,7 +3532,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { mock::start_active_era(2); Pallet::::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout + // Increase total token issuance to affect the total payout. let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); @@ -3563,19 +3549,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // Last kept is 1: assert!(current_era - HistoryDepth::get() == 1); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0), // Fail: Era out of history Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), // Fail: Double claim Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, active_era), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, active_era, 0), // Fail: Era not finished yet Error::::InvalidEraToReward.with_weight(err_weight) ); @@ -3601,7 +3587,7 @@ fn zero_slash_keeps_nominators() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -3680,9 +3666,10 @@ fn six_session_delay() { } #[test] -fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { +fn test_nominators_over_max_exposure_page_size_are_rewarded() { ExtBuilder::default().build_and_execute(|| { - for i in 0..=<::MaxNominatorRewardedPerValidator as Get<_>>::get() { + // bond one nominator more than the max exposure page size to validator 11. + for i in 0..=MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); @@ -3702,29 +3689,73 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( mock::start_active_era(2); mock::make_all_reward_payment(1); - // Assert only nominators from 1 to Max are rewarded - for i in 0..=<::MaxNominatorRewardedPerValidator as Get<_>>::get() { + // Assert nominators from 1 to Max are rewarded + let mut i: u32 = 0; + while i < MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; - if stash == 10_000 { - assert!(Balances::free_balance(&stash) == balance); - } else { - assert!(Balances::free_balance(&stash) > balance); - } + assert!(Balances::free_balance(&stash) > balance); + i += 1; + } + + // Assert overflowing nominators from page 1 are also rewarded + let stash = 10_000 + i as AccountId; + assert!(Balances::free_balance(&stash) > (10_000 + i) as Balance); + }); +} + +#[test] +fn test_nominators_are_rewarded_for_all_exposure_page() { + ExtBuilder::default().build_and_execute(|| { + // 3 pages of exposure + let nominator_count = 2 * MaxExposurePageSize::get() + 1; + + for i in 0..nominator_count { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + Balances::make_free_balance_be(&stash, balance); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(stash), + balance, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(stash), vec![11])); + } + mock::start_active_era(1); + + Pallet::::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); + mock::make_all_reward_payment(1); + + assert_eq!(EraInfo::::get_page_count(1, &11), 3); + + // Assert all nominators are rewarded according to their stake + for i in 0..nominator_count { + // balance of the nominator after the reward payout. + let current_balance = Balances::free_balance(&((10000 + i) as AccountId)); + // balance of the nominator in the previous iteration. + let previous_balance = Balances::free_balance(&((10000 + i - 1) as AccountId)); + // balance before the reward. + let original_balance = 10_000 + i as Balance; + + assert!(current_balance > original_balance); + // since the stake of the nominator is increasing for each iteration, the final balance + // after the reward should also be higher than the previous iteration. + assert!(current_balance > previous_balance); } }); } #[test] -fn test_payout_stakers() { - // Test that payout_stakers work in general, including that only the top - // `T::MaxNominatorRewardedPerValidator` nominators are rewarded. +fn test_multi_page_payout_stakers_by_page() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. ExtBuilder::default().has_stakers(false).build_and_execute(|| { let balance = 1000; // Track the exposure of the validator and all nominators. let mut total_exposure = balance; - // Track the exposure of the validator and the nominators that will get paid out. - let mut payout_exposure = balance; // Create a validator: bond_validator(11, balance); // Default(64) assert_eq!(Validators::::count(), 1); @@ -3733,100 +3764,148 @@ fn test_payout_stakers() { for i in 0..100 { let bond_amount = balance + i as Balance; bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. total_exposure += bond_amount; - if i >= 36 { - payout_exposure += bond_amount; - }; } - let payout_exposure_part = Perbill::from_rational(payout_exposure, total_exposure); mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); + // Since `MaxExposurePageSize = 64`, there are two pages of validator exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // compute and ensure the reward amount is greater than zero. let payout = current_total_payout_for_duration(reward_time_per_era()); - let actual_paid_out = payout_exposure_part * payout; - mock::start_active_era(2); + // verify the exposures are calculated correctly. + let actual_exposure_0 = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + let actual_exposure_1 = EraInfo::::get_paged_exposure(1, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + let pre_payout_total_issuance = Balances::total_issuance(); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); - assert_eq_error_rate!( - Balances::total_issuance(), - pre_payout_total_issuance + actual_paid_out, - 1 - ); - assert!(RewardOnUnbalanceWasCalled::get()); + System::reset_events(); - // `Rewarded` events are being executed. + let controller_balance_before_p0_payout = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + + // verify `Rewarded` events are being executed assert!(matches!( staking_events_since_last_call().as_slice(), &[ .., - Event::Rewarded { stash: 1037, dest: RewardDestination::Controller, amount: 108 }, - Event::Rewarded { stash: 1036, dest: RewardDestination::Controller, amount: 108 } + Event::Rewarded { stash: 1063, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1064, dest: RewardDestination::Stash, amount: 111 }, + ] + )); + + let controller_balance_after_p0_payout = Balances::free_balance(&11); + + // verify rewards have been paid out but still some left + assert!(Balances::total_issuance() > pre_payout_total_issuance); + assert!(Balances::total_issuance() < pre_payout_total_issuance + payout); + + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // Payout the second and last page of nominators + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 1)); + + // verify `Rewarded` events are being executed for the second page. + let events = staking_events_since_last_call(); + assert!(matches!( + events.as_slice(), + &[ + Event::PayoutStarted { era_index: 1, validator_stash: 11 }, + Event::Rewarded { stash: 1065, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1066, dest: RewardDestination::Stash, amount: 111 }, + .. ] )); + // verify the validator was not rewarded the second time + assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!(Balances::total_issuance(), pre_payout_total_issuance + payout, 2); + assert!(RewardOnUnbalanceWasCalled::get()); // Top 64 nominators of validator 11 automatically paid out, including the validator - // Validator payout goes to controller. assert!(Balances::free_balance(&11) > balance); - for i in 36..100 { + for i in 0..100 { assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); } - // The bottom 36 do not - for i in 0..36 { - assert_eq!(Balances::free_balance(&(1000 + i)), balance + i as Balance); - } - // We track rewards in `claimed_rewards` vec + // verify we no longer track rewards in `legacy_claimed_rewards` vec assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![1] - }) + legacy_claimed_rewards: bounded_vec![] + } ); + // verify rewards are tracked to prevent double claims + let ledger = Staking::ledger(11.into()); + for page in 0..EraInfo::::get_page_count(1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); + } + for i in 3..16 { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let payout = current_total_payout_for_duration(reward_time_per_era()); - let actual_paid_out = payout_exposure_part * payout; let pre_payout_total_issuance = Balances::total_issuance(); mock::start_active_era(i); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, i - 1)); + mock::make_all_reward_payment(i - 1); assert_eq_error_rate!( Balances::total_issuance(), - pre_payout_total_issuance + actual_paid_out, - 1 + pre_payout_total_issuance + payout, + 2 ); assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..EraInfo::::get_page_count(i - 1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); + } } - // We track rewards in `claimed_rewards` vec - assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: Default::default(), - claimed_rewards: (1..=14).collect::>().try_into().unwrap() - }) - ); + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); let last_era = 99; let history_depth = HistoryDepth::get(); - let expected_last_reward_era = last_era - 1; - let expected_start_reward_era = last_era - history_depth; + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; for i in 16..=last_era { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. @@ -3834,129 +3913,517 @@ fn test_payout_stakers() { mock::start_active_era(i); } - // We clean it up as history passes - assert_ok!(Staking::payout_stakers( + // verify we clean up history as we go + for era in 0..15 { + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + } + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), 11, - expected_start_reward_era + first_claimable_reward_era, + 0 )); - assert_ok!(Staking::payout_stakers( + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), 11, - expected_last_reward_era + first_claimable_reward_era, + 1 )); - assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: Default::default(), - claimed_rewards: bounded_vec![expected_start_reward_era, expected_last_reward_era] - }) - ); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 0 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0, 1]); // Out of order claims works. - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 69)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 23)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 42)); - assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: Default::default(), - claimed_rewards: bounded_vec![ - expected_start_reward_era, - 23, - 42, - 69, - expected_last_reward_era - ] - }) - ); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 69, 0)); + assert_eq!(Staking::claimed_rewards(69, &11), vec![0]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 23, 1)); + assert_eq!(Staking::claimed_rewards(23, &11), vec![1]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 42, 0)); + assert_eq!(Staking::claimed_rewards(42, &11), vec![0]); }); } #[test] -fn payout_stakers_handles_basic_errors() { - // Here we will test payouts handle all errors. +fn test_multi_page_payout_stakers_backward_compatible() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. ExtBuilder::default().has_stakers(false).build_and_execute(|| { - // Consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); - - // Same setup as the test above let balance = 1000; + // Track the exposure of the validator and all nominators. + let mut total_exposure = balance; + // Create a validator: bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); - // Create nominators, targeting stash + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Create nominators, targeting stash of validators for i in 0..100 { - bond_nominator(1000 + i, balance + i as Balance, vec![11]); + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. + total_exposure += bond_amount; } mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // compute and ensure the reward amount is greater than zero. - let _ = current_total_payout_for_duration(reward_time_per_era()); + // Since `MaxExposurePageSize = 64`, there are two pages of validator exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - // Wrong Era, too big - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), - Error::::InvalidEraToReward.with_weight(err_weight) - ); - // Wrong Staker + // verify the exposures are calculated correctly. + let actual_exposure_0 = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + let actual_exposure_1 = EraInfo::::get_paged_exposure(1, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + + let pre_payout_total_issuance = Balances::total_issuance(); + RewardOnUnbalanceWasCalled::set(false); + + let controller_balance_before_p0_payout = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + // page 0 is claimed assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 10, 1), - Error::::NotStash.with_weight(err_weight) + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0), + Error::::AlreadyClaimed.with_weight(err_weight) ); - let last_era = 99; - for i in 3..=last_era { - Staking::reward_by_ids(vec![(11, 1)]); - // compute and ensure the reward amount is greater than zero. - let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_active_era(i); - } + let controller_balance_after_p0_payout = Balances::free_balance(&11); - let history_depth = HistoryDepth::get(); - let expected_last_reward_era = last_era - 1; - let expected_start_reward_era = last_era - history_depth; + // verify rewards have been paid out but still some left + assert!(Balances::total_issuance() > pre_payout_total_issuance); + assert!(Balances::total_issuance() < pre_payout_total_issuance + payout); - // We are at era last_era=99. Given history_depth=80, we should be able - // to payout era starting from expected_start_reward_era=19 through - // expected_last_reward_era=98 (80 total eras), but not 18 or 99. - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era - 1), - Error::::InvalidEraToReward.with_weight(err_weight) - ); + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // This should payout the second and last page of nominators + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + + // cannot claim any more pages assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era + 1), - Error::::InvalidEraToReward.with_weight(err_weight) + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1), + Error::::AlreadyClaimed.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - 11, - expected_start_reward_era - )); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), + + // verify the validator was not rewarded the second time + assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!(Balances::total_issuance(), pre_payout_total_issuance + payout, 2); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify all nominators of validator 11 are paid out, including the validator + // Validator payout goes to controller. + assert!(Balances::free_balance(&11) > balance); + for i in 0..100 { + assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); + } + + // verify we no longer track rewards in `legacy_claimed_rewards` vec + let ledger = Staking::ledger(11.into()); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![] + } + ); + + // verify rewards are tracked to prevent double claims + for page in 0..EraInfo::::get_page_count(1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); + } + + for i in 3..16 { + Staking::reward_by_ids(vec![(11, 1)]); + + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); + let pre_payout_total_issuance = Balances::total_issuance(); + + mock::start_active_era(i); + RewardOnUnbalanceWasCalled::set(false); + mock::make_all_reward_payment(i - 1); + assert_eq_error_rate!( + Balances::total_issuance(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..EraInfo::::get_page_count(i - 1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); + } + } + + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); + + let last_era = 99; + let history_depth = HistoryDepth::get(); + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; + for i in 16..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); + } + + // verify we clean up history as we go + for era in 0..15 { + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + } + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era + )); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + )); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + + // change order and verify only page 1 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1]); + + // verify page 0 is claimed even when explicit page is not passed + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era,)); + + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1, 0]); + + // cannot claim any more pages + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // Create 4 nominator pages + for i in 100..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + let test_era = last_era + 1; + mock::start_active_era(test_era); + + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(test_era + 1); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0]); + + // cannot claim page 2 again + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1, 3]); + }); +} + +#[test] +fn test_page_count_and_size() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + mock::start_active_era(1); + + // Since max exposure page size is 64, 2 pages of nominators are created. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // first page has 64 nominators + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 0).unwrap().others().len(), 64); + // second page has 36 nominators + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1).unwrap().others().len(), 36); + + // now lets decrease page size + MaxExposurePageSize::set(32); + mock::start_active_era(2); + // now we expect 4 pages. + assert_eq!(EraInfo::::get_page_count(2, &11), 4); + // first 3 pages have 32 nominators each + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 0).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 1).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 2).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 3).unwrap().others().len(), 4); + + // now lets decrease page size even more + MaxExposurePageSize::set(5); + mock::start_active_era(3); + + // now we expect the max 20 pages (100/5). + assert_eq!(EraInfo::::get_page_count(3, &11), 20); + }); +} + +#[test] +fn payout_stakers_handles_basic_errors() { + // Here we will test payouts handle all errors. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Same setup as the test above + let balance = 1000; + bond_validator(11, balance); // Default(64) + + // Create nominators, targeting stash + for i in 0..100 { + bond_nominator(1000 + i, balance + i as Balance, vec![11]); + } + + mock::start_active_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); + + // Wrong Era, too big + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + // Wrong Staker + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 10, 1, 0), + Error::::NotStash.with_weight(err_weight) + ); + + let last_era = 99; + for i in 3..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); + } + + let history_depth = HistoryDepth::get(); + let expected_last_reward_era = last_era - 1; + let expected_start_reward_era = last_era - history_depth; + + // We are at era last_era=99. Given history_depth=80, we should be able + // to payout era starting from expected_start_reward_era=19 through + // expected_last_reward_era=98 (80 total eras), but not 18 or 99. + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era - 1, + 0 + ), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era + 1, + 0 + ), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era, + 0 + )); + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + )); + + // can call page 1 + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), 11, - expected_last_reward_era + expected_last_reward_era, + 1 )); // Can't claim again assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era, + 0 + ), Error::::AlreadyClaimed.with_weight(err_weight) ); + assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + ), Error::::AlreadyClaimed.with_weight(err_weight) ); + + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 1 + ), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // invalid page + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 2 + ), + Error::::InvalidPage.with_weight(err_weight) + ); + }); +} + +#[test] +fn test_commission_paid_across_pages() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1; + let commission = 50; + // Create a validator: + bond_validator(11, balance); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(commission), blocked: false } + )); + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + mock::start_active_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + + // Since `MaxExposurePageSize = 64`, there are four pages of validator + // exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 4); + + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); + + let initial_balance = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + + let controller_balance_after_p0_payout = Balances::free_balance(&11); + + // some commission is paid + assert!(initial_balance < controller_balance_after_p0_payout); + + // payout all pages + for i in 1..4 { + let before_balance = Balances::free_balance(&11); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, i)); + let after_balance = Balances::free_balance(&11); + // some commission is paid for every page + assert!(before_balance < after_balance); + } + + assert_eq_error_rate!(Balances::free_balance(&11), initial_balance + payout / 2, 1,); }); } @@ -3965,8 +4432,7 @@ fn payout_stakers_handles_weight_refund() { // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by // `payout_stakers` to calculate the weight of each payout op. ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let max_nom_rewarded = - <::MaxNominatorRewardedPerValidator as Get<_>>::get(); + let max_nom_rewarded = MaxExposurePageSize::get(); // Make sure the configured value is meaningful for our use. assert!(max_nom_rewarded >= 4); let half_max_nom_rewarded = max_nom_rewarded / 2; @@ -4002,7 +4468,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(2); // Collect payouts when there are no nominators - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 1, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4015,7 +4485,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(3); // Collect payouts for an era where the validator did not receive any points. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 2, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4028,7 +4502,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(4); // Collect payouts when the validator has `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 3, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4051,14 +4529,22 @@ fn payout_stakers_handles_weight_refund() { start_active_era(6); // Collect payouts when the validator had `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 5, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); // Try and collect payouts for an era that has already been collected. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 5, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert!(result.is_err()); @@ -4068,50 +4554,46 @@ fn payout_stakers_handles_weight_refund() { } #[test] -fn bond_during_era_correctly_populates_claimed_rewards() { +fn bond_during_era_does_not_populate_legacy_claimed_rewards() { ExtBuilder::default().has_stakers(false).build_and_execute(|| { // Era = None bond_validator(9, 1000); assert_eq!( - Staking::ledger(&9), - Some(StakingLedger { + Staking::ledger(9.into()).unwrap(), + StakingLedgerInspect { stash: 9, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - }) + legacy_claimed_rewards: bounded_vec![], + } ); mock::start_active_era(5); bond_validator(11, 1000); assert_eq!( - Staking::ledger(&11), - Some(StakingLedger { + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (0..5).collect::>().try_into().unwrap(), - }) + legacy_claimed_rewards: bounded_vec![], + } ); // make sure only era upto history depth is stored let current_era = 99; - let last_reward_era = 99 - HistoryDepth::get(); mock::start_active_era(current_era); bond_validator(13, 1000); assert_eq!( - Staking::ledger(&13), - Some(StakingLedger { + Staking::ledger(13.into()).unwrap(), + StakingLedgerInspect { stash: 13, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era) - .collect::>() - .try_into() - .unwrap(), - }) + legacy_claimed_rewards: Default::default(), + } ); }); } @@ -4139,7 +4621,7 @@ fn offences_weight_calculated_correctly() { >, > = (1..10) .map(|i| OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), i)), + offender: (i, Staking::eras_stakers(active_era(), &i)), reporters: vec![], }) .collect(); @@ -4155,7 +4637,7 @@ fn offences_weight_calculated_correctly() { // On Offence with one offenders, Applied let one_offender = [OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }]; @@ -4181,41 +4663,8 @@ fn offences_weight_calculated_correctly() { DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight - ); - }); -} - -#[test] -fn payout_creates_controller() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let balance = 1000; - // Create a validator: - bond_validator(11, balance); - - // create a stash/controller pair and nominate - let (stash, controller) = testing_utils::create_unique_stash_controller::( - 0, - 100, - RewardDestination::Controller, - false, - ) - .unwrap(); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![11])); - - // kill controller - assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(controller), stash, 100)); - assert_eq!(Balances::free_balance(controller), 0); - - mock::start_active_era(1); - Staking::reward_by_ids(vec![(11, 1)]); - // compute and ensure the reward amount is greater than zero. - let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(controller), 11, 1)); - - // Controller is created - assert!(Balances::free_balance(controller) > 0); - }) + ); + }); } #[test] @@ -4239,7 +4688,7 @@ fn payout_to_any_account_works() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); // Payment is successful assert!(Balances::free_balance(42) > 0); @@ -4356,13 +4805,13 @@ fn cannot_rebond_to_lower_than_ed() { .build_and_execute(|| { // initial stuff. assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: 11 * 1000, active: 11 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4370,13 +4819,13 @@ fn cannot_rebond_to_lower_than_ed() { assert_ok!(Staking::chill(RuntimeOrigin::signed(21))); assert_ok!(Staking::unbond(RuntimeOrigin::signed(21), 11 * 1000)); assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: 11 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 11 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4396,13 +4845,13 @@ fn cannot_bond_extra_to_lower_than_ed() { .build_and_execute(|| { // initial stuff. assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: 11 * 1000, active: 11 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4410,13 +4859,13 @@ fn cannot_bond_extra_to_lower_than_ed() { assert_ok!(Staking::chill(RuntimeOrigin::signed(21))); assert_ok!(Staking::unbond(RuntimeOrigin::signed(21), 11 * 1000)); assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: 11 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 11 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4437,13 +4886,13 @@ fn do_not_die_when_active_is_ed() { .build_and_execute(|| { // given assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: 1000 * ed, active: 1000 * ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4454,13 +4903,13 @@ fn do_not_die_when_active_is_ed() { // then assert_eq!( - Staking::ledger(&21).unwrap(), - StakingLedger { + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { stash: 21, total: ed, active: ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); }) @@ -4961,7 +5410,7 @@ fn min_bond_checks_work() { .min_validator_bond(1_500) .build_and_execute(|| { // 500 is not enough for any role - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Stash)); assert_noop!( Staking::nominate(RuntimeOrigin::signed(3), vec![1]), Error::::InsufficientBond @@ -5023,19 +5472,11 @@ fn chill_other_works() { Balances::make_free_balance_be(&c, 100_000); // Nominator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(a), - 1000, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(a), 1000, RewardDestination::Stash)); assert_ok!(Staking::nominate(RuntimeOrigin::signed(a), vec![1])); // Validator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(b), - 1500, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(b), 1500, RewardDestination::Stash)); assert_ok!(Staking::validate(RuntimeOrigin::signed(b), ValidatorPrefs::default())); } @@ -5182,7 +5623,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 10_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::validate( @@ -5193,12 +5634,9 @@ fn capped_stakers_works() { } // but no more - let (_, last_validator) = testing_utils::create_stash_controller::( - 1337, - 100, - RewardDestination::Controller, - ) - .unwrap(); + let (_, last_validator) = + testing_utils::create_stash_controller::(1337, 100, RewardDestination::Stash) + .unwrap(); assert_noop!( Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), @@ -5211,7 +5649,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 20_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); @@ -5222,7 +5660,7 @@ fn capped_stakers_works() { let (_, last_nominator) = testing_utils::create_stash_controller::( 30_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_noop!( @@ -5530,15 +5968,14 @@ fn force_apply_min_commission_works() { #[test] fn proportional_slash_stop_slashing_if_remaining_zero() { let c = |era, value| UnlockChunk:: { era, value }; + + // we have some chunks, but they are not affected. + let unlocking = bounded_vec![c(1, 10), c(2, 10)]; + // Given - let mut ledger = StakingLedger:: { - stash: 123, - total: 40, - active: 20, - // we have some chunks, but they are not affected. - unlocking: bounded_vec![c(1, 10), c(2, 10)], - claimed_rewards: bounded_vec![], - }; + let mut ledger = StakingLedger::::new(123, 20); + ledger.total = 40; + ledger.unlocking = unlocking; assert_eq!(BondingDuration::get(), 3); @@ -5550,13 +5987,7 @@ fn proportional_slash_stop_slashing_if_remaining_zero() { fn proportional_ledger_slash_works() { let c = |era, value| UnlockChunk:: { era, value }; // Given - let mut ledger = StakingLedger:: { - stash: 123, - total: 10, - active: 10, - unlocking: bounded_vec![], - claimed_rewards: bounded_vec![], - }; + let mut ledger = StakingLedger::::new(123, 10); assert_eq!(BondingDuration::get(), 3); // When we slash a ledger with no unlocking chunks @@ -5772,149 +6203,6 @@ fn proportional_ledger_slash_works() { ); } -#[test] -fn pre_bonding_era_cannot_be_claimed() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let history_depth = HistoryDepth::get(); - // jump to some era above history_depth - let mut current_era = history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - - // put some money in stash=3 and controller=4. - for i in 3..5 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - mock::start_active_era(current_era); - - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); - - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&3).unwrap(), - StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // start next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1)); - - // consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); - // cannot claim rewards for an era before bonding occured as it is - // already marked as claimed. - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 2), - Error::::AlreadyClaimed.with_weight(err_weight) - ); - - // decoding will fail now since Staking Ledger is in corrupt state - HistoryDepth::set(history_depth - 1); - assert_eq!(Staking::ledger(&4), None); - - // make sure stakers still cannot claim rewards that they are not meant to - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 2), - Error::::NotController - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(history_depth); - }); -} - -#[test] -fn reducing_history_depth_abrupt() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let original_history_depth = HistoryDepth::get(); - let mut current_era = original_history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - original_history_depth; - - // put some money in (stash, controller)=(3,3),(5,5). - for i in 3..7 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - // start current era - mock::start_active_era(current_era); - - // add a new candidate for being a staker. account 3 controlled by 3. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); - - // all previous era before the bonding action should be marked as - // claimed. - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&3).unwrap(), - StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1)); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // history_depth reduced without migration - let history_depth = original_history_depth - 1; - HistoryDepth::set(history_depth); - // claiming reward does not work anymore - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1), - Error::::NotController - ); - - // new stakers can still bond - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1200, RewardDestination::Controller)); - - // new staking ledgers created will be bounded by the current history depth - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&5).unwrap(), - StakingLedger { - stash: 5, - total: 1200, - active: 1200, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(original_history_depth); - }); -} - #[test] fn reducing_max_unlocking_chunks_abrupt() { // Concern is on validators only @@ -5924,7 +6212,7 @@ fn reducing_max_unlocking_chunks_abrupt() { MaxUnlockingChunks::set(2); start_active_era(10); assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 300, RewardDestination::Staked)); - assert!(matches!(Staking::ledger(3), Some(_))); + assert!(matches!(Staking::ledger(3.into()), Ok(_))); // when staker unbonds assert_ok!(Staking::unbond(RuntimeOrigin::signed(3), 20)); @@ -5933,8 +6221,8 @@ fn reducing_max_unlocking_chunks_abrupt() { // => 10 + 3 = 13 let expected_unlocking: BoundedVec, MaxUnlockingChunks> = bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; - assert!(matches!(Staking::ledger(3), - Some(StakingLedger { + assert!(matches!(Staking::ledger(3.into()), + Ok(StakingLedger { unlocking, .. }) if unlocking==expected_unlocking)); @@ -5945,8 +6233,8 @@ fn reducing_max_unlocking_chunks_abrupt() { // then another unlock chunk is added let expected_unlocking: BoundedVec, MaxUnlockingChunks> = bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; - assert!(matches!(Staking::ledger(3), - Some(StakingLedger { + assert!(matches!(Staking::ledger(3.into()), + Ok(StakingLedger { unlocking, .. }) if unlocking==expected_unlocking)); @@ -6061,6 +6349,298 @@ fn set_min_commission_works_with_admin_origin() { }) } +#[test] +fn can_page_exposure() { + let mut others: Vec> = vec![]; + let mut total_stake: Balance = 0; + // 19 nominators + for i in 1..20 { + let individual_stake: Balance = 100 * i as Balance; + others.push(IndividualExposure { who: i, value: individual_stake }); + total_stake += individual_stake; + } + let own_stake: Balance = 500; + total_stake += own_stake; + assert_eq!(total_stake, 19_500); + // build full exposure set + let exposure: Exposure = + Exposure { total: total_stake, own: own_stake, others }; + + // when + let (exposure_metadata, exposure_page): ( + PagedExposureMetadata, + Vec>, + ) = exposure.clone().into_pages(3); + + // then + // 7 pages of nominators. + assert_eq!(exposure_page.len(), 7); + assert_eq!(exposure_metadata.page_count, 7); + // first page stake = 100 + 200 + 300 + assert!(matches!(exposure_page[0], ExposurePage { page_total: 600, .. })); + // second page stake = 0 + 400 + 500 + 600 + assert!(matches!(exposure_page[1], ExposurePage { page_total: 1500, .. })); + // verify overview has the total + assert_eq!(exposure_metadata.total, 19_500); + // verify total stake is same as in the original exposure. + assert_eq!( + exposure_page.iter().map(|a| a.page_total).reduce(|a, b| a + b).unwrap(), + 19_500 - exposure_metadata.own + ); + // verify own stake is correct + assert_eq!(exposure_metadata.own, 500); + // verify number of nominators are same as in the original exposure. + assert_eq!(exposure_page.iter().map(|a| a.others.len()).reduce(|a, b| a + b).unwrap(), 19); + assert_eq!(exposure_metadata.nominator_count, 19); +} + +#[test] +fn should_retain_era_info_only_upto_history_depth() { + ExtBuilder::default().build_and_execute(|| { + // remove existing exposure + Pallet::::clear_era_information(0); + let validator_stash = 10; + + for era in 0..4 { + ClaimedRewards::::insert(era, &validator_stash, vec![0, 1, 2]); + for page in 0..3 { + ErasStakersPaged::::insert( + (era, &validator_stash, page), + ExposurePage { page_total: 100, others: vec![] }, + ); + } + } + + for i in 0..4 { + // Count of entries remaining in ClaimedRewards = total - cleared_count + assert_eq!(ClaimedRewards::::iter().count(), (4 - i)); + // 1 claimed_rewards entry for each era + assert_eq!(ClaimedRewards::::iter_prefix(i as EraIndex).count(), 1); + // 3 entries (pages) for each era + assert_eq!(ErasStakersPaged::::iter_prefix((i as EraIndex,)).count(), 3); + + // when clear era info + Pallet::::clear_era_information(i as EraIndex); + + // then all era entries are cleared + assert_eq!(ClaimedRewards::::iter_prefix(i as EraIndex).count(), 0); + assert_eq!(ErasStakersPaged::::iter_prefix((i as EraIndex,)).count(), 0); + } + }); +} + +#[test] +fn test_legacy_claimed_rewards_is_checked_at_reward_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Create a validator: + bond_validator(11, 1000); + + // reward validator for next 2 eras + mock::start_active_era(1); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(2); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(3); + + //verify rewards are not claimed + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + + // assume reward claim for era 1 was stored in legacy storage + Ledger::::insert( + 11, + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + + // verify rewards for era 1 cannot be claimed + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0), + Error::::AlreadyClaimed + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)), + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + + // verify rewards for era 2 can be claimed + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + // but the new claimed rewards for era 2 is not stored in legacy storage + assert_eq!( + Ledger::::get(11).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + // instead it is kept in `ClaimedRewards` + assert_eq!(ClaimedRewards::::get(2, 11), vec![0]); + }); +} + +#[test] +fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // case 1: exposure exist in clipped. + // set page cap to 10 + MaxExposurePageSize::set(10); + bond_validator(11, 1000); + let mut expected_individual_exposures: Vec> = vec![]; + let mut total_exposure: Balance = 0; + // 1st exposure page + for i in 0..10 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + for i in 10..15 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + mock::start_active_era(1); + // reward validator for current era + Pallet::::reward_by_ids(vec![(11, 1)]); + + // start new era + mock::start_active_era(2); + // verify exposure for era 1 is stored in paged storage, that each exposure is stored in + // one and only one page, and no exposure is repeated. + let actual_exposure_page_0 = ErasStakersPaged::::get((1, 11, 0)).unwrap(); + let actual_exposure_page_1 = ErasStakersPaged::::get((1, 11, 1)).unwrap(); + expected_individual_exposures.iter().for_each(|exposure| { + assert!( + actual_exposure_page_0.others.contains(exposure) || + actual_exposure_page_1.others.contains(exposure) + ); + }); + assert_eq!( + expected_individual_exposures.len(), + actual_exposure_page_0.others.len() + actual_exposure_page_1.others.len() + ); + // verify `EraInfo` returns page from paged storage + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 0).unwrap().others(), + &actual_exposure_page_0.others + ); + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 1).unwrap().others(), + &actual_exposure_page_1.others + ); + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). + // delete paged storage and add exposure to clipped storage + >::remove((1, 11, 0)); + >::remove((1, 11, 1)); + >::remove(1, 11); + + >::insert( + 1, + 11, + Exposure { + total: total_exposure, + own: 1000, + others: expected_individual_exposures.clone(), + }, + ); + let mut clipped_exposure = expected_individual_exposures.clone(); + clipped_exposure.sort_by(|a, b| b.who.cmp(&a.who)); + clipped_exposure.truncate(10); + >::insert( + 1, + 11, + Exposure { total: total_exposure, own: 1000, others: clipped_exposure.clone() }, + ); + + // verify `EraInfo` returns exposure from clipped storage + let actual_exposure_paged = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_paged.others(), &clipped_exposure); + assert_eq!(actual_exposure_paged.own(), 1000); + assert_eq!(actual_exposure_paged.exposure_metadata.page_count, 1); + + let actual_exposure_full = EraInfo::::get_full_exposure(1, &11); + assert_eq!(actual_exposure_full.others, expected_individual_exposures); + assert_eq!(actual_exposure_full.own, 1000); + assert_eq!(actual_exposure_full.total, total_exposure); + + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + + // for pages other than 0, clipped storage returns empty exposure + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); + // page size is 1 for clipped storage + assert_eq!(EraInfo::::get_page_count(1, &11), 1); + + // payout for page 0 works + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); + // payout for page 1 fails + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 1), + Error::::InvalidPage + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)) + ); + }); +} + mod staking_interface { use frame_support::storage::with_storage_layer; use sp_staking::StakingInterface; @@ -6090,7 +6670,7 @@ mod staking_interface { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -6134,3 +6714,157 @@ mod staking_interface { }) } } + +mod ledger { + use super::*; + + #[test] + fn paired_account_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Staking::bond( + RuntimeOrigin::signed(10), + 100, + RewardDestination::Account(10) + )); + + assert_eq!(>::get(&10), Some(10)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Controller(10)), + Some(10) + ); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(10)), Some(10)); + + assert_eq!(>::get(&42), None); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Controller(42)), None); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(42)), None); + + // bond manually stash with different controller. This is deprecated but the migration + // has not been complete yet (controller: 100, stash: 200) + assert_ok!(bond_controller_stash(100, 200)); + assert_eq!(>::get(&200), Some(100)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Controller(100)), + Some(200) + ); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Stash(200)), + Some(100) + ); + }) + } + + #[test] + fn get_ledger_works() { + ExtBuilder::default().build_and_execute(|| { + // stash does not exist + assert!(StakingLedger::::get(StakingAccount::Stash(42)).is_err()); + + // bonded and paired + assert_eq!(>::get(&11), Some(11)); + + match StakingLedger::::get(StakingAccount::Stash(11)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(11)); + assert_eq!(ledger.stash, 11); + }, + Err(_) => panic!("staking ledger must exist"), + }; + + // bond manually stash with different controller. This is deprecated but the migration + // has not been complete yet (controller: 100, stash: 200) + assert_ok!(bond_controller_stash(100, 200)); + assert_eq!(>::get(&200), Some(100)); + + match StakingLedger::::get(StakingAccount::Stash(200)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(100)); + assert_eq!(ledger.stash, 200); + }, + Err(_) => panic!("staking ledger must exist"), + }; + + match StakingLedger::::get(StakingAccount::Controller(100)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(100)); + assert_eq!(ledger.stash, 200); + }, + Err(_) => panic!("staking ledger must exist"), + }; + }) + } + + #[test] + fn bond_works() { + ExtBuilder::default().build_and_execute(|| { + assert!(!StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(>::get(&42).is_none()); + + let mut ledger: StakingLedger = StakingLedger::default_from(42); + let reward_dest = RewardDestination::Account(10); + + assert_ok!(ledger.clone().bond(reward_dest)); + assert!(StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(>::get(&42).is_some()); + assert_eq!(>::get(&42), reward_dest); + + // cannot bond again. + assert!(ledger.clone().bond(reward_dest).is_err()); + + // once bonded, update works as expected. + ledger.legacy_claimed_rewards = bounded_vec![1]; + assert_ok!(ledger.update()); + }) + } + + #[test] + fn is_bonded_works() { + ExtBuilder::default().build_and_execute(|| { + assert!(!StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(!StakingLedger::::is_bonded(StakingAccount::Controller(42))); + + // adds entry to Bonded without Ledger pair (should not happen). + >::insert(42, 42); + assert!(!StakingLedger::::is_bonded(StakingAccount::Controller(42))); + + assert_eq!(>::get(&11), Some(11)); + assert!(StakingLedger::::is_bonded(StakingAccount::Stash(11))); + assert!(StakingLedger::::is_bonded(StakingAccount::Controller(11))); + + >::remove(42); // ensures try-state checks pass. + }) + } + + #[test] + #[allow(deprecated)] + fn set_payee_errors_on_controller_destination() { + ExtBuilder::default().build_and_execute(|| { + Payee::::insert(11, RewardDestination::Staked); + assert_noop!( + Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller), + Error::::ControllerDeprecated + ); + assert_eq!(Payee::::get(&11), RewardDestination::Staked); + }) + } + + #[test] + #[allow(deprecated)] + fn update_payee_migration_works() { + ExtBuilder::default().build_and_execute(|| { + // migrate a `Controller` variant to `Account` variant. + Payee::::insert(11, RewardDestination::Controller); + assert_eq!(Payee::::get(&11), RewardDestination::Controller); + assert_ok!(Staking::update_payee(RuntimeOrigin::signed(11), 11)); + assert_eq!(Payee::::get(&11), RewardDestination::Account(11)); + + // Do not migrate a variant if not `Controller`. + Payee::::insert(21, RewardDestination::Stash); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + assert_noop!( + Staking::update_payee(RuntimeOrigin::signed(11), 21), + Error::::NotController + ); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + }) + } +} diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index f2c65e677cac834a7e2b5d6a6d6ebadb54285014..ad9ca75a0328d77ef0d419ab2d4b30b116ee0de8 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_staking +//! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_staking -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/staking/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_staking +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/staking/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +47,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_staking. +/// Weight functions needed for `pallet_staking`. pub trait WeightInfo { fn bond() -> Weight; fn bond_extra() -> Weight; @@ -62,6 +59,7 @@ pub trait WeightInfo { fn nominate(n: u32, ) -> Weight; fn chill() -> Weight; fn set_payee() -> Weight; + fn update_payee() -> Weight; fn set_controller() -> Weight; fn set_validator_count() -> Weight; fn force_no_eras() -> Weight; @@ -70,7 +68,6 @@ pub trait WeightInfo { fn set_invulnerables(v: u32, ) -> Weight; fn force_unstake(s: u32, ) -> Weight; fn cancel_deferred_slash(s: u32, ) -> Weight; - fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; @@ -84,599 +81,589 @@ pub trait WeightInfo { fn set_min_commission() -> Weight; } -/// Weights for pallet_staking using the Substrate node and recommended hardware. +/// Weights for `pallet_staking` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1047` + // Measured: `927` // Estimated: `4764` - // Minimum execution time: 53_983_000 picoseconds. - Weight::from_parts(55_296_000, 4764) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 43_427_000 picoseconds. + Weight::from_parts(45_221_000, 4764) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2028` + // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 96_590_000 picoseconds. - Weight::from_parts(98_921_000, 8877) + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(90_599_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2233` + // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 99_901_000 picoseconds. - Weight::from_parts(102_919_000, 8877) + // Minimum execution time: 91_488_000 picoseconds. + Weight::from_parts(94_126_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1021` + // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 45_230_000 picoseconds. - Weight::from_parts(47_052_829, 4764) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 42_713_000 picoseconds. + Weight::from_parts(44_530_499, 4764) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 97_534_000 picoseconds. - Weight::from_parts(104_772_163, 6248) - // Standard Error: 3_674 - .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + // Minimum execution time: 88_700_000 picoseconds. + Weight::from_parts(98_329_624, 6248) + // Standard Error: 4_477 + .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1414` + // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 57_467_000 picoseconds. - Weight::from_parts(59_437_000, 4556) + // Minimum execution time: 51_877_000 picoseconds. + Weight::from_parts(53_503_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1285 + k * (569 ±0)` + // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 32_857_000 picoseconds. - Weight::from_parts(37_116_967, 4556) - // Standard Error: 9_522 - .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) + // Minimum execution time: 29_604_000 picoseconds. + Weight::from_parts(31_726_296, 4556) + // Standard Error: 6_350 + .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1908 + n * (102 ±0)` + // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 69_613_000 picoseconds. - Weight::from_parts(68_079_061, 6248) - // Standard Error: 18_554 - .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) + // Minimum execution time: 64_276_000 picoseconds. + Weight::from_parts(62_615_844, 6248) + // Standard Error: 14_914 + .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1748` + // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 60_430_000 picoseconds. - Weight::from_parts(62_702_000, 6248) + // Minimum execution time: 55_064_000 picoseconds. + Weight::from_parts(56_566_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `808` + // Measured: `902` // Estimated: `4556` - // Minimum execution time: 14_276_000 picoseconds. - Weight::from_parts(14_766_000, 4556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_519_000, 4556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - fn set_controller() -> Weight { + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `907` - // Estimated: `8122` - // Minimum execution time: 21_710_000 picoseconds. - Weight::from_parts(22_430_000, 8122) + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_545_000 picoseconds. + Weight::from_parts(21_395_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `902` + // Estimated: `4556` + // Minimum execution time: 20_179_000 picoseconds. + Weight::from_parts(20_728_000, 4556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_120_000, 0) + // Minimum execution time: 2_594_000 picoseconds. + Weight::from_parts(2_777_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_362_000 picoseconds. - Weight::from_parts(9_785_000, 0) + // Minimum execution time: 8_302_000 picoseconds. + Weight::from_parts(8_741_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_275_000 picoseconds. - Weight::from_parts(9_678_000, 0) + // Minimum execution time: 8_504_000 picoseconds. + Weight::from_parts(8_774_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_414_000 picoseconds. - Weight::from_parts(9_848_000, 0) + // Minimum execution time: 8_474_000 picoseconds. + Weight::from_parts(8_740_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_061_000 picoseconds. - Weight::from_parts(3_618_535, 0) - // Standard Error: 44 - .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(3_360_048, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2018 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_914_000 picoseconds. - Weight::from_parts(95_688_129, 6248) - // Standard Error: 5_030 - .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 87_495_000 picoseconds. + Weight::from_parts(95_710_388, 6248) + // Standard Error: 5_849 + .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66639` - // Estimated: `70104` - // Minimum execution time: 99_269_000 picoseconds. - Weight::from_parts(1_154_264_637, 70104) - // Standard Error: 76_592 - .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) + // Measured: `66672` + // Estimated: `70137` + // Minimum execution time: 98_255_000 picoseconds. + Weight::from_parts(892_807_378, 70137) + // Standard Error: 57_735 + .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `20217 + n * (143 ±0)` - // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 91_767_000 picoseconds. - Weight::from_parts(146_781_264, 19844) - // Standard Error: 31_341 - .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:257 w:257) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:257 w:257) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:257 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:257 w:257) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:257 w:257) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:257 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:257 w:257) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33190 + n * (377 ±0)` - // Estimated: `30845 + n * (3774 ±0)` - // Minimum execution time: 121_303_000 picoseconds. - Weight::from_parts(151_046_907, 30845) - // Standard Error: 41_899 - .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `33297 + n * (377 ±0)` + // Estimated: `30944 + n * (3774 ±3)` + // Minimum execution time: 144_258_000 picoseconds. + Weight::from_parts(175_256_796, 30944) + // Standard Error: 24_339 + .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2029 + l * (7 ±0)` + // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 90_068_000 picoseconds. - Weight::from_parts(93_137_456, 8877) - // Standard Error: 4_799 - .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + // Minimum execution time: 83_532_000 picoseconds. + Weight::from_parts(86_757_943, 8877) + // Standard Error: 6_379 + .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 103_139_000 picoseconds. - Weight::from_parts(107_036_296, 6248) - // Standard Error: 3_935 - .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + // Minimum execution time: 96_776_000 picoseconds. + Weight::from_parts(100_950_027, 6248) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 587_156_000 picoseconds. - Weight::from_parts(590_176_000, 512390) - // Standard Error: 2_008_420 - .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) - // Standard Error: 200_128 - .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) + // Minimum execution time: 577_699_000 picoseconds. + Weight::from_parts(582_827_000, 512390) + // Standard Error: 2_000_851 + .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) + // Standard Error: 199_373 + .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_399_721_000 picoseconds. - Weight::from_parts(34_605_803_000, 512390) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) + // Minimum execution time: 34_048_778_000 picoseconds. + Weight::from_parts(34_397_777_000, 512390) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -684,709 +671,699 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `983 + v * (50 ±0)` + // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_392_849_000 picoseconds. - Weight::from_parts(64_373_879, 3510) - // Standard Error: 8_995 - .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) + // Minimum execution time: 2_473_149_000 picoseconds. + Weight::from_parts(84_721_859, 3510) + // Standard Error: 8_690 + .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_529_000 picoseconds. - Weight::from_parts(7_970_000, 0) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(5_897_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_011_000 picoseconds. - Weight::from_parts(7_317_000, 0) + // Minimum execution time: 4_676_000 picoseconds. + Weight::from_parts(4_913_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1871` + // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 75_982_000 picoseconds. - Weight::from_parts(77_412_000, 6248) + // Minimum execution time: 67_286_000 picoseconds. + Weight::from_parts(69_081_000, 6248) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `694` + // Measured: `691` // Estimated: `3510` - // Minimum execution time: 13_923_000 picoseconds. - Weight::from_parts(14_356_000, 3510) + // Minimum execution time: 12_749_000 picoseconds. + Weight::from_parts(13_275_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_415_000 picoseconds. - Weight::from_parts(3_679_000, 0) + // Minimum execution time: 3_155_000 picoseconds. + Weight::from_parts(3_319_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1047` + // Measured: `927` // Estimated: `4764` - // Minimum execution time: 53_983_000 picoseconds. - Weight::from_parts(55_296_000, 4764) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 43_427_000 picoseconds. + Weight::from_parts(45_221_000, 4764) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2028` + // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 96_590_000 picoseconds. - Weight::from_parts(98_921_000, 8877) + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(90_599_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2233` + // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 99_901_000 picoseconds. - Weight::from_parts(102_919_000, 8877) + // Minimum execution time: 91_488_000 picoseconds. + Weight::from_parts(94_126_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1021` + // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 45_230_000 picoseconds. - Weight::from_parts(47_052_829, 4764) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 42_713_000 picoseconds. + Weight::from_parts(44_530_499, 4764) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 97_534_000 picoseconds. - Weight::from_parts(104_772_163, 6248) - // Standard Error: 3_674 - .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + // Minimum execution time: 88_700_000 picoseconds. + Weight::from_parts(98_329_624, 6248) + // Standard Error: 4_477 + .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1414` + // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 57_467_000 picoseconds. - Weight::from_parts(59_437_000, 4556) + // Minimum execution time: 51_877_000 picoseconds. + Weight::from_parts(53_503_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1285 + k * (569 ±0)` + // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 32_857_000 picoseconds. - Weight::from_parts(37_116_967, 4556) - // Standard Error: 9_522 - .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) + // Minimum execution time: 29_604_000 picoseconds. + Weight::from_parts(31_726_296, 4556) + // Standard Error: 6_350 + .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1908 + n * (102 ±0)` + // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 69_613_000 picoseconds. - Weight::from_parts(68_079_061, 6248) - // Standard Error: 18_554 - .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) + // Minimum execution time: 64_276_000 picoseconds. + Weight::from_parts(62_615_844, 6248) + // Standard Error: 14_914 + .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1748` + // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 60_430_000 picoseconds. - Weight::from_parts(62_702_000, 6248) + // Minimum execution time: 55_064_000 picoseconds. + Weight::from_parts(56_566_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `808` + // Measured: `902` // Estimated: `4556` - // Minimum execution time: 14_276_000 picoseconds. - Weight::from_parts(14_766_000, 4556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_519_000, 4556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - fn set_controller() -> Weight { + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `907` - // Estimated: `8122` - // Minimum execution time: 21_710_000 picoseconds. - Weight::from_parts(22_430_000, 8122) + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_545_000 picoseconds. + Weight::from_parts(21_395_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `902` + // Estimated: `4556` + // Minimum execution time: 20_179_000 picoseconds. + Weight::from_parts(20_728_000, 4556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_120_000, 0) + // Minimum execution time: 2_594_000 picoseconds. + Weight::from_parts(2_777_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_362_000 picoseconds. - Weight::from_parts(9_785_000, 0) + // Minimum execution time: 8_302_000 picoseconds. + Weight::from_parts(8_741_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_275_000 picoseconds. - Weight::from_parts(9_678_000, 0) + // Minimum execution time: 8_504_000 picoseconds. + Weight::from_parts(8_774_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_414_000 picoseconds. - Weight::from_parts(9_848_000, 0) + // Minimum execution time: 8_474_000 picoseconds. + Weight::from_parts(8_740_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_061_000 picoseconds. - Weight::from_parts(3_618_535, 0) - // Standard Error: 44 - .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(3_360_048, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2018 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_914_000 picoseconds. - Weight::from_parts(95_688_129, 6248) - // Standard Error: 5_030 - .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 87_495_000 picoseconds. + Weight::from_parts(95_710_388, 6248) + // Standard Error: 5_849 + .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66639` - // Estimated: `70104` - // Minimum execution time: 99_269_000 picoseconds. - Weight::from_parts(1_154_264_637, 70104) - // Standard Error: 76_592 - .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) + // Measured: `66672` + // Estimated: `70137` + // Minimum execution time: 98_255_000 picoseconds. + Weight::from_parts(892_807_378, 70137) + // Standard Error: 57_735 + .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `20217 + n * (143 ±0)` - // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 91_767_000 picoseconds. - Weight::from_parts(146_781_264, 19844) - // Standard Error: 31_341 - .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:257 w:257) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:257 w:257) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:257 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:257 w:257) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:257 w:257) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:257 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:257 w:257) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33190 + n * (377 ±0)` - // Estimated: `30845 + n * (3774 ±0)` - // Minimum execution time: 121_303_000 picoseconds. - Weight::from_parts(151_046_907, 30845) - // Standard Error: 41_899 - .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `33297 + n * (377 ±0)` + // Estimated: `30944 + n * (3774 ±3)` + // Minimum execution time: 144_258_000 picoseconds. + Weight::from_parts(175_256_796, 30944) + // Standard Error: 24_339 + .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2029 + l * (7 ±0)` + // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 90_068_000 picoseconds. - Weight::from_parts(93_137_456, 8877) - // Standard Error: 4_799 - .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + // Minimum execution time: 83_532_000 picoseconds. + Weight::from_parts(86_757_943, 8877) + // Standard Error: 6_379 + .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 103_139_000 picoseconds. - Weight::from_parts(107_036_296, 6248) - // Standard Error: 3_935 - .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + // Minimum execution time: 96_776_000 picoseconds. + Weight::from_parts(100_950_027, 6248) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 587_156_000 picoseconds. - Weight::from_parts(590_176_000, 512390) - // Standard Error: 2_008_420 - .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) - // Standard Error: 200_128 - .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) + // Minimum execution time: 577_699_000 picoseconds. + Weight::from_parts(582_827_000, 512390) + // Standard Error: 2_000_851 + .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) + // Standard Error: 199_373 + .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_399_721_000 picoseconds. - Weight::from_parts(34_605_803_000, 512390) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) + // Minimum execution time: 34_048_778_000 picoseconds. + Weight::from_parts(34_397_777_000, 512390) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1394,113 +1371,113 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `983 + v * (50 ±0)` + // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_392_849_000 picoseconds. - Weight::from_parts(64_373_879, 3510) - // Standard Error: 8_995 - .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) + // Minimum execution time: 2_473_149_000 picoseconds. + Weight::from_parts(84_721_859, 3510) + // Standard Error: 8_690 + .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_529_000 picoseconds. - Weight::from_parts(7_970_000, 0) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(5_897_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_011_000 picoseconds. - Weight::from_parts(7_317_000, 0) + // Minimum execution time: 4_676_000 picoseconds. + Weight::from_parts(4_913_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1871` + // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 75_982_000 picoseconds. - Weight::from_parts(77_412_000, 6248) + // Minimum execution time: 67_286_000 picoseconds. + Weight::from_parts(69_081_000, 6248) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `694` + // Measured: `691` // Estimated: `3510` - // Minimum execution time: 13_923_000 picoseconds. - Weight::from_parts(14_356_000, 3510) + // Minimum execution time: 12_749_000 picoseconds. + Weight::from_parts(13_275_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_415_000 picoseconds. - Weight::from_parts(3_679_000, 0) + // Minimum execution time: 3_155_000 picoseconds. + Weight::from_parts(3_319_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 83218a131361894b955ce1c79085c93c5626c92d..9e81397fadd5c2238794f7561541f5e359779151 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", optional = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 3e69b219bb527781ce555d48eea692cbe69f159c..ac3996459cd4ac2b88178c18a537f9f972b20046 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1126,6 +1126,7 @@ mod mock { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); type MaxHolds = (); } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index eded681c47cffcf1e35afd9a7e88c232409a98be..a5c8cf5b8de78caa988f89a3798ea10f4962bee1 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-statement-store = { path = "../../primitives/statement-store", default-features = false} @@ -33,6 +33,7 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "scale-info/std", "sp-api/std", diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 79d2aa7d891d5015b20d71447e71ef42f287b141..10a74e100df842cf1321c8343a1127fdcca679a1 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -86,6 +86,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type MaxHolds = (); } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index a4934346d5dec87d4c6f8df9a924bb1781b0039e..ef507a953164f7ff1d9aac57089ab01fd51536cd 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} -docify = "0.2.4" +docify = "0.2.6" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index 6a365c1873c1de4580f0f69310180ddc4df18d58..e64233fe7480a9d82c9c07709dd9b403544bff6a 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -22,41 +22,40 @@ use crate::Pallet; use frame_benchmarking::v2::*; use frame_system::RawOrigin; -const SEED: u32 = 0; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - frame_system::Pallet::::assert_last_event(generic_event.into()); +fn assert_last_event(generic_event: crate::Event) { + let re: ::RuntimeEvent = generic_event.into(); + frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks( where ::RuntimeCall: From>)] +#[benchmarks(where ::RuntimeCall: From>)] mod benchmarks { use super::*; #[benchmark] fn set_key() { let caller: T::AccountId = whitelisted_caller(); - Key::::put(caller.clone()); + Key::::put(&caller); - let new_sudoer: T::AccountId = account("sudoer", 0, SEED); + let new_sudoer: T::AccountId = account("sudoer", 0, 0); let new_sudoer_lookup = T::Lookup::unlookup(new_sudoer.clone()); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), new_sudoer_lookup); - assert_last_event::(Event::KeyChanged { old_sudoer: Some(caller) }.into()); + assert_last_event::(Event::KeyChanged { old: Some(caller), new: new_sudoer }); } #[benchmark] fn sudo() { let caller: T::AccountId = whitelisted_caller(); - Key::::put(caller.clone()); + Key::::put(&caller); - let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let call = frame_system::Call::remark { remark: vec![] }.into(); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), Box::new(call.clone())); + _(RawOrigin::Signed(caller), Box::new(call)); - assert_last_event::(Event::Sudid { sudo_result: Ok(()) }.into()) + assert_last_event::(Event::Sudid { sudo_result: Ok(()) }) } #[benchmark] @@ -64,15 +63,26 @@ mod benchmarks { let caller: T::AccountId = whitelisted_caller(); Key::::put(caller.clone()); - let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let call = frame_system::Call::remark { remark: vec![] }.into(); + + let who: T::AccountId = account("as", 0, 0); + let who_lookup = T::Lookup::unlookup(who); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), who_lookup, Box::new(call)); + + assert_last_event::(Event::SudoAsDone { sudo_result: Ok(()) }) + } - let who: T::AccountId = account("as", 0, SEED); - let who_lookup = T::Lookup::unlookup(who.clone()); + #[benchmark] + fn remove_key() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); #[extrinsic_call] - _(RawOrigin::Signed(caller), who_lookup, Box::new(call.clone())); + _(RawOrigin::Signed(caller.clone())); - assert_last_event::(Event::SudoAsDone { sudo_result: Ok(()) }.into()) + assert_last_event::(Event::KeyRemoved {}); } impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 0c869bec7f076f7cc2e240adda1eeb77b8409277..d556c5eb6ae638988307a6d14955051b4f2c0870 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -146,14 +146,36 @@ type AccountIdLookupOf = <::Lookup as StaticLookup pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use frame_system::{pallet_prelude::*, RawOrigin}; - #[pallet::config] + /// Default preludes for [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::derive_impl; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type WeightInfo = (); + #[inject_runtime_type] + type RuntimeEvent = (); + #[inject_runtime_type] + type RuntimeCall = (); + } + } + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A sudo-able call. + #[pallet::no_default_bounds] type RuntimeCall: Parameter + UnfilteredDispatchable + GetDispatchInfo; @@ -168,11 +190,6 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Authenticates the sudo key and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(0)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); @@ -185,12 +202,11 @@ pub mod pallet { origin: OriginFor, call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(RawOrigin::Root.into()); Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) }); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -200,46 +216,37 @@ pub mod pallet { /// Sudo user to specify the weight of the call. /// /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(1)] - #[pallet::weight((*_weight, call.get_dispatch_info().class))] + #[pallet::weight((*weight, call.get_dispatch_info().class))] pub fn sudo_unchecked_weight( origin: OriginFor, call: Box<::RuntimeCall>, - _weight: Weight, + weight: Weight, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; + let _ = weight; // We don't check the weight witness since it is a root call. - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(RawOrigin::Root.into()); Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) }); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo /// key. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::set_key())] pub fn set_key( origin: OriginFor, new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; + let new = T::Lookup::lookup(new)?; + Self::deposit_event(Event::KeyChanged { old: Key::::get(), new: new.clone() }); + Key::::put(new); - Self::deposit_event(Event::KeyChanged { old_sudoer: Key::::get() }); - Key::::put(&new); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -248,9 +255,6 @@ pub mod pallet { /// a given account. /// /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); @@ -264,17 +268,29 @@ pub mod pallet { who: AccountIdLookupOf, call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; let who = T::Lookup::lookup(who)?; - - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - + let res = call.dispatch_bypass_filter(RawOrigin::Signed(who).into()); Self::deposit_event(Event::SudoAsDone { sudo_result: res.map(|_| ()).map_err(|e| e.error), }); + + // Sudo user does not pay a fee. + Ok(Pays::No.into()) + } + + /// Permanently removes the sudo key. + /// + /// **This cannot be un-done.** + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::remove_key())] + pub fn remove_key(origin: OriginFor) -> DispatchResultWithPostInfo { + Self::ensure_sudo(origin)?; + + Self::deposit_event(Event::KeyRemoved {}); + Key::::kill(); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -290,9 +306,13 @@ pub mod pallet { }, /// The sudo key has been updated. KeyChanged { - /// The old sudo key if one was previously set. - old_sudoer: Option, + /// The old sudo key (if one was previously set). + old: Option, + /// The new sudo key (if one was set). + new: T::AccountId, }, + /// The key was permanently removed. + KeyRemoved, /// A [sudo_as](Pallet::sudo_as) call just took place. SudoAsDone { /// The result of the call made by the sudo user. @@ -301,9 +321,9 @@ pub mod pallet { } #[pallet::error] - /// Error for the Sudo pallet + /// Error for the Sudo pallet. pub enum Error { - /// Sender must be the Sudo account + /// Sender must be the Sudo account. RequireSudo, } @@ -322,8 +342,19 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - if let Some(ref key) = self.key { - Key::::put(key); + Key::::set(self.key.clone()); + } + } + + impl Pallet { + /// Ensure that the caller is the sudo key. + pub(crate) fn ensure_sudo(origin: OriginFor) -> DispatchResult { + let sender = ensure_signed(origin)?; + + if Self::key().map_or(false, |k| k == sender) { + Ok(()) + } else { + Err(Error::::RequireSudo.into()) } } } diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 9e78e474f4e5a6dd030b39254f7fd7fb65cc7a54..6f123b7c82b2bc4ac576bfe68b39597b10a33fa9 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -19,8 +19,8 @@ use super::*; use crate as sudo; -use frame_support::traits::{ConstU32, ConstU64, Contains}; -use sp_core::H256; +use frame_support::traits::{ConstU32, Contains}; +use sp_core::{ConstU64, H256}; use sp_io; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -156,7 +156,9 @@ pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { sudo::GenesisConfig:: { key: Some(root_key) } .assimilate_storage(&mut t) .unwrap(); - t.into() + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| System::set_block_number(1)); + ext } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index 6963ba2e6a05133b719e546b63f2cd0c722aa264..13dc069ddef1cbbcc1b94b25606bdc519d4b44c6 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -59,9 +59,6 @@ fn sudo_basics() { #[test] fn sudo_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, @@ -118,9 +115,6 @@ fn sudo_unchecked_weight_basics() { #[test] fn sudo_unchecked_weight_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, @@ -154,15 +148,24 @@ fn set_key_basics() { #[test] fn set_key_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); - System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(1) })); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old: Some(1), new: 2 })); // Double check. assert_ok!(Sudo::set_key(RuntimeOrigin::signed(2), 4)); - System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(2) })); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old: Some(2), new: 4 })); + }); +} + +#[test] +fn remove_key_works() { + new_test_ext(1).execute_with(|| { + assert_ok!(Sudo::remove_key(RuntimeOrigin::signed(1))); + assert!(Sudo::key().is_none()); + System::assert_has_event(TestEvent::Sudo(Event::KeyRemoved {})); + + assert_noop!(Sudo::remove_key(RuntimeOrigin::signed(1)), Error::::RequireSudo); + assert_noop!(Sudo::set_key(RuntimeOrigin::signed(1), 1), Error::::RequireSudo); }); } @@ -201,9 +204,6 @@ fn sudo_as_basics() { #[test] fn sudo_as_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 0cdd0c8a81f2c94dc8a967d128f21b00bd0f22e2..10d1a9f7a513b7ae98947e27832842a42ab0c589 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_sudo. +//! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_sudo -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/sudo/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/sudo/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,80 +47,103 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_sudo. +/// Weight functions needed for `pallet_sudo`. pub trait WeightInfo { fn set_key() -> Weight; fn sudo() -> Weight; fn sudo_as() -> Weight; + fn remove_key() -> Weight; } -/// Weights for pallet_sudo using the Substrate node and recommended hardware. +/// Weights for `pallet_sudo` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_918_000 picoseconds. - Weight::from_parts(13_403_000, 1517) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(10_076_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_693_000 picoseconds. - Weight::from_parts(13_001_000, 1517) + // Minimum execution time: 10_453_000 picoseconds. + Weight::from_parts(10_931_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_590_000 picoseconds. - Weight::from_parts(12_994_000, 1517) + // Minimum execution time: 10_202_000 picoseconds. + Weight::from_parts(10_800_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 8_555_000 picoseconds. + Weight::from_parts(8_846_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_918_000 picoseconds. - Weight::from_parts(13_403_000, 1517) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(10_076_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_693_000 picoseconds. - Weight::from_parts(13_001_000, 1517) + // Minimum execution time: 10_453_000 picoseconds. + Weight::from_parts(10_931_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_590_000 picoseconds. - Weight::from_parts(12_994_000, 1517) + // Minimum execution time: 10_202_000 picoseconds. + Weight::from_parts(10_800_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 8_555_000 picoseconds. + Weight::from_parts(8_846_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 5cb5d6d12ab784c5fabd205de84bdba227f3edec..b8e21e60761ae30f51a94560e4a345ef98f5db4a 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -13,9 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = { version = "6.1", default-features = false } serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../primitives/api", default-features = false, features = [ "frame-metadata" ] } sp-std = { path = "../../primitives/std", default-features = false} @@ -30,7 +31,7 @@ sp-weights = { path = "../../primitives/weights", default-features = false} sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false} sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false} tt-call = "1.0.8" -macro_magic = "0.4.2" +macro_magic = "0.5.0" frame-support-procedural = { path = "procedural", default-features = false} paste = "1.0" sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true} @@ -42,8 +43,8 @@ sp-core-hashing-proc-macro = { path = "../../primitives/core/hashing/proc-macro" k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} -serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } -docify = "0.2.4" +serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } +docify = "0.2.6" static_assertions = "1.1.0" aquamarine = { version = "0.3.2" } @@ -52,7 +53,6 @@ aquamarine = { version = "0.3.2" } assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { path = "../system" } -array-bytes = "6.1" [features] default = [ "std" ] @@ -66,6 +66,7 @@ std = [ "log/std", "scale-info/std", "serde/std", + "serde_json/std", "sp-api/std", "sp-arithmetic/std", "sp-core/std", diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index d2854a2a79f00a0f3176a956507920519e252705..45ed1750a52871f0d43d6e040def2fa8867d4ddd 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -23,8 +23,8 @@ proc-macro2 = "1.0.56" quote = "1.0.28" syn = { version = "2.0.38", features = ["full"] } frame-support-procedural-tools = { path = "tools" } -proc-macro-warning = { version = "0.4.2", default-features = false } -macro_magic = { version = "0.4.2", features = ["proc_support"] } +macro_magic = { version = "0.5.0", features = ["proc_support"] } +proc-macro-warning = { version = "1.0.0", default-features = false } expander = "2.0.0" sp-core-hashing = { path = "../../../primitives/core/hashing" } diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index 6f8f1d155e1e5991038349597ee4e00516b92c0f..6ded82d91aa5cd9bb9cceb4594ae9768d4948483 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -18,7 +18,7 @@ //! Home of the parsing and expansion code for the new pallet benchmarking syntax use derive_syn_parse::Parse; -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro::TokenStream; use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; use quote::{quote, ToTokens}; @@ -418,7 +418,8 @@ pub fn benchmarks( true => quote!(T: Config, I: 'static), }; - let krate = generate_crate_access_2018("frame-benchmarking")?; + let krate = generate_access_from_frame_or_crate("frame-benchmarking")?; + let frame_system = generate_access_from_frame_or_crate("frame-system")?; // benchmark name variables let benchmark_names_str: Vec = benchmark_names.iter().map(|n| n.to_string()).collect(); @@ -488,7 +489,7 @@ pub fn benchmarks( } #[cfg(any(feature = "runtime-benchmarks", test))] impl<#type_impl_generics> #krate::Benchmarking for Pallet<#type_use_generics> - where T: frame_system::Config, #where_clause + where T: #frame_system::Config, #where_clause { fn benchmarks( extra: bool, @@ -516,7 +517,7 @@ pub fn benchmarks( components, // TODO: Not supported by V2 syntax as of yet. // https://github.com/paritytech/substrate/issues/13132 - pov_modes: vec![], + pov_modes: #krate::__private::vec![], } }).collect::<#krate::__private::Vec<_>>() } @@ -535,7 +536,7 @@ pub fn benchmarks( _ => return Err("Could not find extrinsic.".into()), }; let mut whitelist = whitelist.to_vec(); - let whitelisted_caller_key = as #krate::__private::storage::StorageMap<_, _,>>::hashed_key_for( #krate::whitelisted_caller::() @@ -571,8 +572,8 @@ pub fn benchmarks( >::instance(&selected_benchmark, c, verify)?; // Set the block number to at least 1 so events are deposited. - if #krate::__private::Zero::is_zero(&frame_system::Pallet::::block_number()) { - frame_system::Pallet::::set_block_number(1u32.into()); + if #krate::__private::Zero::is_zero(&#frame_system::Pallet::::block_number()) { + #frame_system::Pallet::::set_block_number(1u32.into()); } // Commit the externalities to the database, flushing the DB cache. @@ -654,7 +655,7 @@ pub fn benchmarks( } #[cfg(test)] - impl<#type_impl_generics> Pallet<#type_use_generics> where T: ::frame_system::Config, #where_clause { + impl<#type_impl_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { /// Test a particular benchmark by name. /// /// This isn't called `test_benchmark_by_name` just in case some end-user eventually @@ -719,10 +720,14 @@ fn expand_benchmark( where_clause: TokenStream2, ) -> TokenStream2 { // set up variables needed during quoting - let krate = match generate_crate_access_2018("frame-benchmarking") { + let krate = match generate_access_from_frame_or_crate("frame-benchmarking") { Ok(ident) => ident, Err(err) => return err.to_compile_error().into(), }; + let frame_system = match generate_access_from_frame_or_crate("frame-system") { + Ok(path) => path, + Err(err) => return err.to_compile_error().into(), + }; let codec = quote!(#krate::__private::codec); let traits = quote!(#krate::__private::traits); let setup_stmts = benchmark_def.setup_stmts; @@ -762,7 +767,7 @@ fn expand_benchmark( Expr::Cast(t) => { let ty = t.ty.clone(); quote! { - <::RuntimeOrigin as From<#ty>>::from(#origin); + <::RuntimeOrigin as From<#ty>>::from(#origin); } }, _ => quote! { @@ -932,7 +937,7 @@ fn expand_benchmark( } #[cfg(test)] - impl<#type_impl_generics> Pallet<#type_use_generics> where T: ::frame_system::Config, #where_clause { + impl<#type_impl_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { #[allow(unused)] fn #test_ident() -> Result<(), #krate::BenchmarkError> { let selected_benchmark = SelectedBenchmark::#name; @@ -951,8 +956,8 @@ fn expand_benchmark( >::instance(&selected_benchmark, &c, true)?; // Set the block number to at least 1 so events are deposited. - if #krate::__private::Zero::is_zero(&frame_system::Pallet::::block_number()) { - frame_system::Pallet::::set_block_number(1u32.into()); + if #krate::__private::Zero::is_zero(&#frame_system::Pallet::::block_number()) { + #frame_system::Pallet::::set_block_number(1u32.into()); } // Run execution + verification diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index 859b9a327e48dc7ee8cdce9c7ff3caa8c7621457..ce2aa0942794d40cc3bfe0f4d65fdd80f8140d52 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -124,6 +124,18 @@ pub fn expand_outer_dispatch( } } + impl #scrate::dispatch::CheckIfFeeless for RuntimeCall { + type Origin = #system_path::pallet_prelude::OriginFor<#runtime>; + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match self { + #( + #pallet_attrs + #variant_patterns => call.is_feeless(origin), + )* + } + } + } + impl #scrate::traits::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::traits::CallMetadata { use #scrate::traits::GetCallName; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/composite_helper.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/composite_helper.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c81d2360cb7bec33b7da67de08bc24a34d5ea34 --- /dev/null +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/composite_helper.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::parse::PalletPath; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +pub(crate) fn expand_conversion_fn( + composite_name: &str, + path: &PalletPath, + instance: Option<&Ident>, + variant_name: &Ident, +) -> TokenStream { + let composite_name = quote::format_ident!("{}", composite_name); + let runtime_composite_name = quote::format_ident!("Runtime{}", composite_name); + + if let Some(inst) = instance { + quote! { + impl From<#path::#composite_name<#path::#inst>> for #runtime_composite_name { + fn from(hr: #path::#composite_name<#path::#inst>) -> Self { + #runtime_composite_name::#variant_name(hr) + } + } + } + } else { + quote! { + impl From<#path::#composite_name> for #runtime_composite_name { + fn from(hr: #path::#composite_name) -> Self { + #runtime_composite_name::#variant_name(hr) + } + } + } + } +} + +pub(crate) fn expand_variant( + composite_name: &str, + index: u8, + path: &PalletPath, + instance: Option<&Ident>, + variant_name: &Ident, +) -> TokenStream { + let composite_name = quote::format_ident!("{}", composite_name); + + if let Some(inst) = instance { + quote! { + #[codec(index = #index)] + #variant_name(#path::#composite_name<#path::#inst>), + } + } else { + quote! { + #[codec(index = #index)] + #variant_name(#path::#composite_name), + } + } +} diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs index b142f8e84c92f1fb9d09f33b710c724c5a84eff2..55696cc6c6e3c30ef162a05b12992951a652fa2c 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs @@ -15,8 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{parse::PalletPath, Pallet}; -use proc_macro2::{Ident, TokenStream}; +use super::composite_helper; +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; use quote::quote; pub fn expand_outer_freeze_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { @@ -27,17 +28,30 @@ pub fn expand_outer_freeze_reason(pallet_decls: &[Pallet], scrate: &TokenStream) let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref(); - conversion_fns.push(expand_conversion_fn(path, variant_name)); + conversion_fns.push(composite_helper::expand_conversion_fn( + "FreezeReason", + path, + instance, + variant_name, + )); - freeze_reason_variants.push(expand_variant(index, path, variant_name)); + freeze_reason_variants.push(composite_helper::expand_variant( + "FreezeReason", + index, + path, + instance, + variant_name, + )); } } + let freeze_reason_variants_count = freeze_reason_variants.len() as u32; quote! { /// A reason for placing a freeze on funds. #[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + Copy, Clone, Eq, PartialEq, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::MaxEncodedLen, #scrate::__private::scale_info::TypeInfo, #scrate::__private::RuntimeDebug, @@ -46,23 +60,10 @@ pub fn expand_outer_freeze_reason(pallet_decls: &[Pallet], scrate: &TokenStream) #( #freeze_reason_variants )* } - #( #conversion_fns )* - } -} - -fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - impl From<#path::FreezeReason> for RuntimeFreezeReason { - fn from(hr: #path::FreezeReason) -> Self { - RuntimeFreezeReason::#variant_name(hr) - } + impl #scrate::traits::VariantCount for RuntimeFreezeReason { + const VARIANT_COUNT: u32 = #freeze_reason_variants_count; } - } -} -fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - #[codec(index = #index)] - #variant_name(#path::FreezeReason), + #( #conversion_fns )* } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs index ed7183c4a150d80dcc69ab0b06a4c4fd73ff765b..3856c4a2bb945b6b80a102e4bba5de9300836c43 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs @@ -15,8 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{parse::PalletPath, Pallet}; -use proc_macro2::{Ident, TokenStream}; +use super::composite_helper; +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; use quote::quote; pub fn expand_outer_hold_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { @@ -27,17 +28,30 @@ pub fn expand_outer_hold_reason(pallet_decls: &[Pallet], scrate: &TokenStream) - let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref(); - conversion_fns.push(expand_conversion_fn(path, variant_name)); + conversion_fns.push(composite_helper::expand_conversion_fn( + "HoldReason", + path, + instance, + variant_name, + )); - hold_reason_variants.push(expand_variant(index, path, variant_name)); + hold_reason_variants.push(composite_helper::expand_variant( + "HoldReason", + index, + path, + instance, + variant_name, + )); } } + let hold_reason_variants_count = hold_reason_variants.len() as u32; quote! { /// A reason for placing a hold on funds. #[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + Copy, Clone, Eq, PartialEq, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::MaxEncodedLen, #scrate::__private::scale_info::TypeInfo, #scrate::__private::RuntimeDebug, @@ -46,23 +60,10 @@ pub fn expand_outer_hold_reason(pallet_decls: &[Pallet], scrate: &TokenStream) - #( #hold_reason_variants )* } - #( #conversion_fns )* - } -} - -fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - impl From<#path::HoldReason> for RuntimeHoldReason { - fn from(hr: #path::HoldReason) -> Self { - RuntimeHoldReason::#variant_name(hr) - } + impl #scrate::traits::VariantCount for RuntimeHoldReason { + const VARIANT_COUNT: u32 = #hold_reason_variants_count; } - } -} -fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - #[codec(index = #index)] - #variant_name(#path::HoldReason), + #( #conversion_fns )* } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index a77aad66dcfc2db3d25e27d422c60c5aa8a8e2a5..34b9d21d8ce84fafb7958278227a3f572d8d2f74 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -90,8 +90,7 @@ pub fn expand_outer_inherent( use #scrate::inherent::{ProvideInherent, IsFatalError}; use #scrate::traits::{IsSubType, ExtrinsicCall}; use #scrate::sp_runtime::traits::Block as _; - use #scrate::_private::sp_inherents::Error; - use #scrate::__private::log; + use #scrate::__private::{sp_inherents::Error, log}; let mut result = #scrate::inherent::CheckInherentsResult::new(); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/lock_id.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/lock_id.rs index ba35147a051fbc7347be8cc45508b975b5245f25..e67c0da00ea15e42efd0bcdddab98be5844b3e3f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/lock_id.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/lock_id.rs @@ -15,8 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{parse::PalletPath, Pallet}; -use proc_macro2::{Ident, TokenStream}; +use super::composite_helper; +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; use quote::quote; pub fn expand_outer_lock_id(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { @@ -27,17 +28,29 @@ pub fn expand_outer_lock_id(pallet_decls: &[Pallet], scrate: &TokenStream) -> To let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref(); - conversion_fns.push(expand_conversion_fn(path, variant_name)); + conversion_fns.push(composite_helper::expand_conversion_fn( + "LockId", + path, + instance, + variant_name, + )); - lock_id_variants.push(expand_variant(index, path, variant_name)); + lock_id_variants.push(composite_helper::expand_variant( + "LockId", + index, + path, + instance, + variant_name, + )); } } quote! { /// An identifier for each lock placed on funds. #[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + Copy, Clone, Eq, PartialEq, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::MaxEncodedLen, #scrate::__private::scale_info::TypeInfo, #scrate::__private::RuntimeDebug, @@ -49,20 +62,3 @@ pub fn expand_outer_lock_id(pallet_decls: &[Pallet], scrate: &TokenStream) -> To #( #conversion_fns )* } } - -fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - impl From<#path::LockId> for RuntimeLockId { - fn from(hr: #path::LockId) -> Self { - RuntimeLockId::#variant_name(hr) - } - } - } -} - -fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - #[codec(index = #index)] - #variant_name(#path::LockId), - } -} diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs index 830338f9265ffa846aee7de1e01bc7175e908c3a..a0fc6b8130b3c0b5b8fc7ce76768bbc9b0dc9908 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -16,6 +16,7 @@ // limitations under the License mod call; +pub mod composite_helper; mod config; mod freeze_reason; mod hold_reason; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs index 2a3283230ad84baa02d159c74b72ec63a38230e8..892b842b174872c1d15ab39bcf0c7cc5b509c961 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs @@ -15,8 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{parse::PalletPath, Pallet}; -use proc_macro2::{Ident, TokenStream}; +use super::composite_helper; +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; use quote::quote; pub fn expand_outer_slash_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { @@ -27,17 +28,29 @@ pub fn expand_outer_slash_reason(pallet_decls: &[Pallet], scrate: &TokenStream) let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref(); - conversion_fns.push(expand_conversion_fn(path, variant_name)); + conversion_fns.push(composite_helper::expand_conversion_fn( + "SlashReason", + path, + instance, + variant_name, + )); - slash_reason_variants.push(expand_variant(index, path, variant_name)); + slash_reason_variants.push(composite_helper::expand_variant( + "SlashReason", + index, + path, + instance, + variant_name, + )); } } quote! { /// A reason for slashing funds. #[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + Copy, Clone, Eq, PartialEq, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::MaxEncodedLen, #scrate::__private::scale_info::TypeInfo, #scrate::__private::RuntimeDebug, @@ -49,20 +62,3 @@ pub fn expand_outer_slash_reason(pallet_decls: &[Pallet], scrate: &TokenStream) #( #conversion_fns )* } } - -fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - impl From<#path::SlashReason> for RuntimeSlashReason { - fn from(hr: #path::SlashReason) -> Self { - RuntimeSlashReason::#variant_name(hr) - } - } - } -} - -fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { - quote! { - #[codec(index = #index)] - #variant_name(#path::SlashReason), - } -} diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index e8c3c0889214c222f05b700b125b45942cd5d585..ce34694275b386e029ef5c928569220c770752cf 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -214,7 +214,7 @@ mod parse; use crate::pallet::parse::helper::two128_str; use cfg_expr::Predicate; use frame_support_procedural_tools::{ - generate_crate_access, generate_crate_access_2018, generate_hidden_includes, + generate_access_from_frame_or_crate, generate_crate_access, generate_hidden_includes, }; use itertools::Itertools; use parse::{ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration}; @@ -272,7 +272,7 @@ fn construct_runtime_implicit_to_explicit( input: TokenStream2, definition: ImplicitRuntimeDeclaration, ) -> Result { - let frame_support = generate_crate_access_2018("frame-support")?; + let frame_support = generate_access_from_frame_or_crate("frame-support")?; let mut expansion = quote::quote!( #frame_support::construct_runtime! { #input } ); @@ -283,7 +283,7 @@ fn construct_runtime_implicit_to_explicit( expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts }] - frame_support = [{ #frame_support }] + your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] pattern = [{ #pallet_name: #pallet_path #pallet_instance }] @@ -308,7 +308,7 @@ fn construct_runtime_explicit_to_explicit_expanded( input: TokenStream2, definition: ExplicitRuntimeDeclaration, ) -> Result { - let frame_support = generate_crate_access_2018("frame-support")?; + let frame_support = generate_access_from_frame_or_crate("frame-support")?; let mut expansion = quote::quote!( #frame_support::construct_runtime! { #input } ); @@ -319,7 +319,7 @@ fn construct_runtime_explicit_to_explicit_expanded( expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_extra_parts }] - frame_support = [{ #frame_support }] + your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] pattern = [{ #pallet_name: #pallet_path #pallet_instance }] @@ -372,7 +372,7 @@ fn construct_runtime_final_expansion( let scrate = generate_crate_access(hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(hidden_crate_name, "frame-support"); - let frame_system = generate_crate_access_2018("frame-system")?; + let frame_system = generate_access_from_frame_or_crate("frame-system")?; let block = quote!(<#name as #frame_system::Config>::Block); let unchecked_extrinsic = quote!(<#block as #scrate::sp_runtime::traits::Block>::Extrinsic); @@ -414,7 +414,7 @@ fn construct_runtime_final_expansion( ) .help_links(&["https://github.com/paritytech/substrate/pull/14437"]) .span(where_section.span) - .build(), + .build_or_panic(), ) }); @@ -799,7 +799,7 @@ fn decl_static_assertions( quote! { #scrate::__private::tt_call! { macro = [{ #path::tt_error_token }] - frame_support = [{ #scrate }] + your_tt_return = [{ #scrate::__private::tt_return }] ~~> #scrate::assert_error_encoded_size! { path = [{ #path }] runtime = [{ #runtime }] diff --git a/substrate/frame/support/procedural/src/crate_version.rs b/substrate/frame/support/procedural/src/crate_version.rs index 3f728abdb0b03ef5c3c2f6aca5746e8bac8a1e66..8c8975a425971978596702af98617d93f5bfb31a 100644 --- a/substrate/frame/support/procedural/src/crate_version.rs +++ b/substrate/frame/support/procedural/src/crate_version.rs @@ -18,7 +18,7 @@ //! Implementation of macros related to crate versioning. use super::get_cargo_env_var; -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro2::{Span, TokenStream}; use syn::{Error, Result}; @@ -42,7 +42,7 @@ pub fn crate_to_crate_version(input: proc_macro::TokenStream) -> Result("CARGO_PKG_VERSION_PATCH") .map_err(|_| create_error("Patch version needs to fit into `u8`"))?; - let crate_ = generate_crate_access_2018("frame-support")?; + let crate_ = generate_access_from_frame_or_crate("frame-support")?; Ok(quote::quote! { #crate_::traits::CrateVersion { diff --git a/substrate/frame/support/procedural/src/key_prefix.rs b/substrate/frame/support/procedural/src/key_prefix.rs index 6f793d0e37bde1a72e1cda44b30ca98a3c771a4c..7f1ab6866d1e869a23623b25c51e5bbb6f297887 100644 --- a/substrate/frame/support/procedural/src/key_prefix.rs +++ b/substrate/frame/support/procedural/src/key_prefix.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro2::{Span, TokenStream}; use quote::{format_ident, quote, ToTokens}; use syn::{Ident, Result}; @@ -27,6 +28,7 @@ pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result Result),* > HasReversibleKeyPrefix<( #( #kargs, )* )> for ( #( Key<#hashers, #current_tuple>, )* ) { - fn decode_partial_key(key_material: &[u8]) -> Result { + fn decode_partial_key(key_material: &[u8]) -> Result< + Self::Suffix, + #frame_support::__private::codec::Error, + > { <#suffix_keygen>::decode_final_key(key_material).map(|k| k.0) } } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index da4cb41fe4f2f48db2774f2ba2d10b7b4f16f459..ec4118918856ab1a7aa1140a13df9dfc28e938dd 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -33,8 +33,8 @@ mod storage_alias; mod transactional; mod tt_macro; -use frame_support_procedural_tools::generate_crate_access_2018; -use macro_magic::import_tokens_attr; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; +use macro_magic::{import_tokens_attr, import_tokens_attr_verbatim}; use proc_macro::TokenStream; use quote::{quote, ToTokens}; use std::{cell::RefCell, str::FromStr}; @@ -751,12 +751,12 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// Items that lack a `syn::Ident` for whatever reason are first checked to see if they exist, /// verbatim, in the local/destination trait before they are copied over, so you should not need to /// worry about collisions between identical unnamed items. -#[import_tokens_attr { +#[import_tokens_attr_verbatim { format!( "{}::macro_magic", - match generate_crate_access_2018("frame-support") { + match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_crate_access_2018("frame"), + Err(_) => generate_access_from_frame_or_crate("frame"), } .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") .to_token_stream() @@ -864,7 +864,12 @@ pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenSt let item_impl = syn::parse_macro_input!(tokens as ItemImpl); // internally wrap macro_magic's `#[export_tokens]` macro - match macro_magic::mm_core::export_tokens_internal(attrs, item_impl.to_token_stream(), true) { + match macro_magic::mm_core::export_tokens_internal( + attrs, + item_impl.to_token_stream(), + true, + false, + ) { Ok(tokens) => tokens.into(), Err(err) => err.to_compile_error().into(), } @@ -974,21 +979,26 @@ pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by `Get` -/// from [`pallet::config`](`macro@config`) into metadata, e.g.: /// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::constant`. #[proc_macro_attribute] pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::constant_name`. +#[proc_macro_attribute] +pub fn constant_name(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// To bypass the `frame_system::Config` supertrait check, use the attribute /// `pallet::disable_frame_system_supertrait_check`, e.g.: /// @@ -1094,6 +1104,16 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::call`. +#[proc_macro_attribute] +pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, /// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// @@ -1137,6 +1157,36 @@ pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, +/// which explicitly defines the condition for the dispatchable to be feeless. +/// +/// The arguments for the closure must be the referenced arguments of the dispatchable function. +/// +/// The closure must return `bool`. +/// +/// ### Example +/// ```ignore +/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { +/// *something == 0 +/// })] +/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { +/// .... +/// } +/// ``` +/// +/// Please note that this only works for signed dispatchables and requires a signed extension +/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing +/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// +/// ### Macro expansion +/// +/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding +/// closure in the implementation. +#[proc_macro_attribute] +pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Allows you to define some extra constants to be added into constant metadata. /// /// Item must be defined as: @@ -1263,60 +1313,11 @@ pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. /// -/// Item should be defined as: +/// --- /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * `StorageValue` expects `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `CountedStorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageDoubleMap` expects `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` and optionally -/// `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the `CountedStorageMap` variant, the `Prefix` also implements -/// `CountedStorageMapInstance`. It also associates a `CounterPrefix`, which is implemented the -/// same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = CountedStorageaMap<...>` will store -/// its counter at the prefix: `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::storage`. #[proc_macro_attribute] pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1419,6 +1420,9 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// /// **Rust-Analyzer users**: See the documentation of the Rust item in /// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] @@ -1426,6 +1430,9 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// /// **Rust-Analyzer users**: See the documentation of the Rust item in /// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] @@ -1565,7 +1572,7 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { let _mod = parse_macro_input!(tokens_clone as ItemMod); // use macro_magic's export_tokens as the internal implementation otherwise - match macro_magic::mm_core::export_tokens_internal(attr, tokens, false) { + match macro_magic::mm_core::export_tokens_internal(attr, tokens, false, true) { Ok(tokens) => tokens.into(), Err(err) => err.to_compile_error().into(), } @@ -1607,9 +1614,9 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { #[import_tokens_attr { format!( "{}::macro_magic", - match generate_crate_access_2018("frame-support") { + match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_crate_access_2018("frame"), + Err(_) => generate_access_from_frame_or_crate("frame"), } .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") .to_token_stream() diff --git a/substrate/frame/support/procedural/src/no_bound/clone.rs b/substrate/frame/support/procedural/src/no_bound/clone.rs index 2c9037984f59437cf74af7f40948214fad6ccb7c..8e57a10d34c607c0a3d8de29eb6429ce73a4c8b9 100644 --- a/substrate/frame/support/procedural/src/no_bound/clone.rs +++ b/substrate/frame/support/procedural/src/no_bound/clone.rs @@ -98,6 +98,7 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::clone::Clone for #name #ty_generics #where_clause { fn clone(&self) -> Self { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/debug.rs b/substrate/frame/support/procedural/src/no_bound/debug.rs index 88f5dfe7bec47bc90cb106d5855c6b9ae8294f3e..dd14b9cd564c9ccce9a8442b3669ae418fc18027 100644 --- a/substrate/frame/support/procedural/src/no_bound/debug.rs +++ b/substrate/frame/support/procedural/src/no_bound/debug.rs @@ -112,6 +112,7 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::fmt::Debug for #input_ident #ty_generics #where_clause { fn fmt(&self, fmt: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/default.rs b/substrate/frame/support/procedural/src/no_bound/default.rs index ddaab26c44159843c3282e0e26f9f095144ad20a..0524247d24ed6baa580928356296169281e45ae8 100644 --- a/substrate/frame/support/procedural/src/no_bound/default.rs +++ b/substrate/frame/support/procedural/src/no_bound/default.rs @@ -149,6 +149,7 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::default::Default for #name #ty_generics #where_clause { fn default() -> Self { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/partial_eq.rs b/substrate/frame/support/procedural/src/no_bound/partial_eq.rs index 1a4a4e50b39e0d6224e2f1108c682f2a4b7ccd0d..7cf5701b193b8a31143f3f636eaa2ab511536434 100644 --- a/substrate/frame/support/procedural/src/no_bound/partial_eq.rs +++ b/substrate/frame/support/procedural/src/no_bound/partial_eq.rs @@ -128,6 +128,7 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::cmp::PartialEq for #name #ty_generics #where_clause { fn eq(&self, other: &Self) -> bool { #impl_ diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index 3ed5509863e91f940145bf41e2407da08a4e471a..cf302faafc7805bb8bb2f20e5e7feb4444ce3932 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -17,12 +17,14 @@ use crate::{ pallet::{ + expand::warnings::{weight_constant_warning, weight_witness_warning}, parse::call::{CallVariantDef, CallWeightDef}, Def, }, COUNTER, }; use proc_macro2::TokenStream as TokenStream2; +use proc_macro_warning::Warning; use quote::{quote, ToTokens}; use syn::spanned::Spanned; @@ -68,7 +70,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { continue } - let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex") + let warning = Warning::new_deprecated("ImplicitCallIndex") .index(call_index_warnings.len()) .old("use implicit call indices") .new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode") @@ -77,7 +79,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { "https://github.com/paritytech/substrate/pull/11381" ]) .span(method.name.span()) - .build(); + .build_or_panic(); call_index_warnings.push(warning); } @@ -86,18 +88,12 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { for method in &methods { match &method.weight { CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)), - CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => { - let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight") - .index(weight_warnings.len()) - .old("use hard-coded constant as call weight") - .new("benchmark all calls or put the pallet into `dev` mode") - .help_link("https://github.com/paritytech/substrate/pull/13798") - .span(lit.span()) - .build(); - weight_warnings.push(warning); + CallWeightDef::Immediate(e) => { + weight_constant_warning(e, def.dev_mode, &mut weight_warnings); + weight_witness_warning(method, def.dev_mode, &mut weight_warnings); + fn_weight.push(e.into_token_stream()); }, - CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()), CallWeightDef::Inherited => { let pallet_weight = def .call @@ -245,6 +241,16 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }) .collect::>(); + let feeless_check = methods.iter().map(|method| &method.feeless_check).collect::>(); + let feeless_check_result = + feeless_check.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { + if let Some(feeless_check) = feeless_check { + quote::quote!(#feeless_check(origin, #( #arg_name, )*)) + } else { + quote::quote!(false) + } + }); + quote::quote_spanned!(span => mod warnings { #( @@ -351,6 +357,23 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::dispatch::CheckIfFeeless for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + #[allow(unused_variables)] + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match *self { + #( + Self::#fn_name { #( #args_name_pattern_ref, )* } => { + #feeless_check_result + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + impl<#type_impl_gen> #frame_support::traits::GetCallName for #call_ident<#type_use_gen> #where_clause { diff --git a/substrate/frame/support/procedural/src/pallet/expand/error.rs b/substrate/frame/support/procedural/src/pallet/expand/error.rs index d3aa0b762bc52df2af97079574eb5dbf6616c72a..877489fd6057b85d06ba2477020776c91bf92d3c 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/error.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/error.rs @@ -42,9 +42,9 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #error_token_unique_id { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support::)*__private::tt_return! { + $my_tt_return! { $caller } }; @@ -170,9 +170,9 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #error_token_unique_id { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support::)*__private::tt_return! { + $my_tt_return! { $caller error = [{ #error_ident }] } diff --git a/substrate/frame/support/procedural/src/pallet/expand/event.rs b/substrate/frame/support/procedural/src/pallet/expand/event.rs index fbb699b4d41cceb64384a67ad3c472cf6c32c98b..2713f45fc3d54429ccab3ba82972adddd6e13a98 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/event.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/event.rs @@ -127,11 +127,12 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { let trait_use_gen = &def.trait_use_generics(event.attr_span); let type_impl_gen = &def.type_impl_generics(event.attr_span); let type_use_gen = &def.type_use_generics(event.attr_span); + let pallet_ident = &def.pallet_struct.pallet; let PalletEventDepositAttr { fn_vis, fn_span, .. } = deposit_event; quote::quote_spanned!(*fn_span => - impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #fn_vis fn deposit_event(event: Event<#event_use_gen>) { let event = < ::RuntimeEvent as diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs index b00f9bcd1a662dd592a7191a80469363f1a5acb4..31d519ef2929397293514fea96d1e21b476fc4df 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -17,6 +17,7 @@ use crate::{pallet::Def, COUNTER}; use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; use syn::{spanned::Spanned, Ident}; /// @@ -79,7 +80,7 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config_item = &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_config.index]; - let serde_crate = format!("{}::__private::serde", frame_support); + let serde_crate = format!("{}::__private::serde", frame_support.to_token_stream()); match genesis_config_item { syn::Item::Enum(syn::ItemEnum { attrs, .. }) | diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 2825756f270f02ba8feb66088811ca21391c5330..5044d4285bb64aceeccd14959743a494f3c94f85 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -34,6 +34,38 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let type_use_gen = &def.type_use_generics(span); let pallet_ident = &def.pallet_struct.pallet; let frame_system = &def.frame_system; + let pallet_name = quote::quote! { + < + ::PalletInfo + as + #frame_support::traits::PalletInfo + >::name::().unwrap_or("") + }; + + let initialize_on_chain_storage_version = if let Some(current_version) = + &def.pallet_struct.storage_version + { + quote::quote! { + #frame_support::__private::log::info!( + target: #frame_support::LOG_TARGET, + "🐥 New pallet {:?} detected in the runtime. Initializing the on-chain storage version to match the storage version defined in the pallet: {:?}", + #pallet_name, + #current_version + ); + #current_version.put::(); + } + } else { + quote::quote! { + let default_version = #frame_support::traits::StorageVersion::new(0); + #frame_support::__private::log::info!( + target: #frame_support::LOG_TARGET, + "🐥 New pallet {:?} detected in the runtime. The pallet has no defined storage version, so the on-chain version is being initialized to {:?}.", + #pallet_name, + default_version + ); + default_version.put::(); + } + }; let log_runtime_upgrade = if has_runtime_upgrade { // a migration is defined here. @@ -42,7 +74,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { target: #frame_support::LOG_TARGET, "⚠️ {} declares internal migrations (which *might* execute). \ On-chain `{:?}` vs current storage version `{:?}`", - pallet_name, + #pallet_name, ::on_chain_storage_version(), ::current_storage_version(), ); @@ -53,24 +85,11 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::debug!( target: #frame_support::LOG_TARGET, "✅ no migration for {}", - pallet_name, + #pallet_name, ); } }; - let log_try_state = quote::quote! { - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().expect("No name found for the pallet! This usually means that the pallet wasn't added to `construct_runtime!`."); - #frame_support::__private::log::debug!( - target: #frame_support::LOG_TARGET, - "🩺 try-state pallet {:?}", - pallet_name, - ); - }; - let hooks_impl = if def.hooks.is_none() { let frame_system = &def.frame_system; quote::quote! { @@ -91,16 +110,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let current_version = ::current_storage_version(); if on_chain_version != current_version { - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().unwrap_or(""); - #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, "{}: On chain storage version {:?} doesn't match current storage version {:?}.", - pallet_name, + #pallet_name, on_chain_version, current_version, ); @@ -113,17 +126,11 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let on_chain_version = ::on_chain_storage_version(); if on_chain_version != #frame_support::traits::StorageVersion::new(0) { - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().unwrap_or(""); - #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, "{}: On chain storage version {:?} is set to non zero, \ while the pallet is missing the `#[pallet::storage_version(VERSION)]` attribute.", - pallet_name, + #pallet_name, on_chain_version, ); @@ -186,6 +193,32 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> + #frame_support::traits::BeforeAllRuntimeMigrations + for #pallet_ident<#type_use_gen> #where_clause + { + fn before_all_runtime_migrations() -> #frame_support::weights::Weight { + use #frame_support::traits::{Get, PalletInfoAccess}; + use #frame_support::__private::hashing::twox_128; + use #frame_support::storage::unhashed::contains_prefixed_key; + #frame_support::__private::sp_tracing::enter_span!( + #frame_support::__private::sp_tracing::trace_span!("before_all") + ); + + // Check if the pallet has any keys set, including the storage version. If there are + // no keys set, the pallet was just added to the runtime and needs to have its + // version initialized. + let pallet_hashed_prefix = ::name_hash(); + let exists = contains_prefixed_key(&pallet_hashed_prefix); + if !exists { + #initialize_on_chain_storage_version + ::DbWeight::get().reads_writes(1, 1) + } else { + ::DbWeight::get().reads(1) + } + } + } + impl<#type_impl_gen> #frame_support::traits::OnRuntimeUpgrade for #pallet_ident<#type_use_gen> #where_clause @@ -196,11 +229,6 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { ); // log info about the upgrade. - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().unwrap_or(""); #log_runtime_upgrade < @@ -271,12 +299,25 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { n: #frame_system::pallet_prelude::BlockNumberFor::, _s: #frame_support::traits::TryStateSelect ) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { - #log_try_state + #frame_support::__private::log::info!( + target: #frame_support::LOG_TARGET, + "🩺 Running {:?} try-state checks", + #pallet_name, + ); < Self as #frame_support::traits::Hooks< #frame_system::pallet_prelude::BlockNumberFor:: > - >::try_state(n) + >::try_state(n).map_err(|err| { + #frame_support::__private::log::error!( + target: #frame_support::LOG_TARGET, + "❌ {:?} try_state checks failed: {:?}", + #pallet_name, + err + ); + + err + }) } } ) diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 2b998227c1d841a92a2ded9f3f5b681b83d4ef65..6f32e5697512f7e87a0130ec31e69eb5201c4d89 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -34,6 +34,7 @@ mod store_trait; mod tt_default_parts; mod type_value; mod validate_unsigned; +mod warnings; use crate::pallet::Def; use quote::ToTokens; diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index e7f7cf548f0ea54008c47a5e0a92a0d0b5dd93ea..96c2c8e3120b8f8fb76e8ee10b067ce3f13f62e9 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -822,12 +822,69 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { ) }); + // aggregated where clause of all storage types and the whole pallet. let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); let completed_where_clause = super::merge_where_clauses(&where_clauses); let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + let try_decode_entire_state = { + let mut storage_names = def + .storages + .iter() + .filter_map(|storage| { + if storage.cfg_attrs.is_empty() { + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + Some(quote::quote_spanned!(storage.attr_span => #ident<#gen> )) + } else { + None + } + }) + .collect::>(); + storage_names.sort_by_cached_key(|ident| ident.to_string()); + + quote::quote!( + #[cfg(feature = "try-runtime")] + impl<#type_impl_gen> #frame_support::traits::TryDecodeEntireStorage + for #pallet_ident<#type_use_gen> #completed_where_clause + { + fn try_decode_entire_state() -> Result> { + let pallet_name = <::PalletInfo as frame_support::traits::PalletInfo> + ::name::<#pallet_ident<#type_use_gen>>() + .expect("Every active pallet has a name in the runtime; qed"); + + #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode pallet: {pallet_name}"); + + // NOTE: for now, we have to exclude storage items that are feature gated. + let mut errors = #frame_support::__private::sp_std::vec::Vec::new(); + let mut decoded = 0usize; + + #( + #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode storage: \ + {pallet_name}::{}", stringify!(#storage_names)); + + match <#storage_names as #frame_support::traits::TryDecodeEntireStorage>::try_decode_entire_state() { + Ok(count) => { + decoded += count; + }, + Err(err) => { + errors.extend(err); + }, + } + )* + + if errors.is_empty() { + Ok(decoded) + } else { + Err(errors) + } + } + } + ) + }; + quote::quote!( impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause @@ -853,5 +910,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #( #getters )* #( #prefix_structs )* #( #on_empty_structs )* + + #try_decode_entire_state ) } diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 86db56c776dfefbdd393ae090e59ed11472456c1..c9a776ee247527b8e76508144ece3708d110194b 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -85,18 +85,16 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { // wrapped inside of braces and finally prepended with double colons, to the caller inside // of a key named `tokens`. // - // We need to accept a frame_support argument here, because this macro gets expanded on the - // crate that called the `construct_runtime!` macro, and said crate may have renamed - // frame-support, and so we need to pass in the frame-support path that said crate - // recognizes. + // We need to accept a path argument here, because this macro gets expanded on the + // crate that called the `construct_runtime!` macro, and the actual path is unknown. #[macro_export] #[doc(hidden)] macro_rules! #default_parts_unique_id { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support)*::__private::tt_return! { + $my_tt_return! { $caller tokens = [{ expanded::{ @@ -112,7 +110,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { pub use #default_parts_unique_id as tt_default_parts; - // This macro is similar to the `tt_default_parts!`. It expands the pallets thare are declared + // This macro is similar to the `tt_default_parts!`. It expands the pallets that are declared // explicitly (`System: frame_system::{Pallet, Call}`) with extra parts. // // For example, after expansion an explicit pallet would look like: @@ -124,9 +122,9 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #extra_parts_unique_id { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support)*::__private::tt_return! { + $my_tt_return! { $caller tokens = [{ expanded::{ diff --git a/substrate/frame/support/procedural/src/pallet/expand/warnings.rs b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs new file mode 100644 index 0000000000000000000000000000000000000000..030e3ddaf32321da15c5b6df7fe30f0b80d9f6b1 --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generates warnings for undesirable pallet code. + +use crate::pallet::parse::call::{CallVariantDef, CallWeightDef}; +use proc_macro_warning::Warning; +use syn::{ + spanned::Spanned, + visit::{self, Visit}, +}; + +/// Warn if any of the call arguments starts with a underscore and is used in a weight formula. +pub(crate) fn weight_witness_warning( + method: &CallVariantDef, + dev_mode: bool, + warnings: &mut Vec, +) { + if dev_mode { + return + } + let CallWeightDef::Immediate(w) = &method.weight else { return }; + + let partial_warning = Warning::new_deprecated("UncheckedWeightWitness") + .old("not check weight witness data") + .new("ensure that all witness data for weight calculation is checked before usage") + .help_link("https://github.com/paritytech/polkadot-sdk/pull/1818"); + + for (_, arg_ident, _) in method.args.iter() { + if !arg_ident.to_string().starts_with('_') || !contains_ident(w.clone(), &arg_ident) { + continue + } + + let warning = partial_warning + .clone() + .index(warnings.len()) + .span(arg_ident.span()) + .build_or_panic(); + + warnings.push(warning); + } +} + +/// Warn if the weight is a constant and the pallet not in `dev_mode`. +pub(crate) fn weight_constant_warning( + weight: &syn::Expr, + dev_mode: bool, + warnings: &mut Vec, +) { + if dev_mode { + return + } + let syn::Expr::Lit(lit) = weight else { return }; + + let warning = Warning::new_deprecated("ConstantWeight") + .index(warnings.len()) + .old("use hard-coded constant as call weight") + .new("benchmark all calls or put the pallet into `dev` mode") + .help_link("https://github.com/paritytech/substrate/pull/13798") + .span(lit.span()) + .build_or_panic(); + + warnings.push(warning); +} + +/// Returns whether `expr` contains `ident`. +fn contains_ident(mut expr: syn::Expr, ident: &syn::Ident) -> bool { + struct ContainsIdent { + ident: syn::Ident, + found: bool, + } + + impl<'a> Visit<'a> for ContainsIdent { + fn visit_ident(&mut self, i: &syn::Ident) { + if *i == self.ident { + self.found = true; + } + } + } + + let mut visitor = ContainsIdent { ident: ident.clone(), found: false }; + visit::visit_expr(&mut visitor, &mut expr); + visitor.found +} diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 90631f264b92aba30a60059f0d992583b6253938..519e1e618954f27354531cb6e336b8db4e5d4190 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -17,9 +17,10 @@ use super::{helper, InheritedCallWeightAttr}; use frame_support_procedural_tools::get_doc_literals; +use proc_macro2::Span; use quote::ToTokens; use std::collections::HashMap; -use syn::spanned::Spanned; +use syn::{spanned::Spanned, ExprClosure}; /// List of additional token to be used for parsing. mod keyword { @@ -30,6 +31,7 @@ mod keyword { syn::custom_keyword!(compact); syn::custom_keyword!(T); syn::custom_keyword!(pallet); + syn::custom_keyword!(feeless_if); } /// Definition of dispatchables typically `impl Pallet { ... }` @@ -82,13 +84,18 @@ pub struct CallVariantDef { pub docs: Vec, /// Attributes annotated at the top of the dispatchable function. pub attrs: Vec, + /// The optional `feeless_if` attribute on the `pallet::call`. + pub feeless_check: Option, } /// Attributes for functions in call impl block. -/// Parse for `#[pallet::weight(expr)]` or `#[pallet::call_index(expr)] pub enum FunctionAttr { + /// Parse for `#[pallet::call_index(expr)]` CallIndex(u8), + /// Parse for `#[pallet::weight(expr)]` Weight(syn::Expr), + /// Parse for `#[pallet::feeless_if(expr)]` + FeelessIf(Span, syn::ExprClosure), } impl syn::parse::Parse for FunctionAttr { @@ -115,6 +122,19 @@ impl syn::parse::Parse for FunctionAttr { return Err(syn::Error::new(index.span(), msg)) } Ok(FunctionAttr::CallIndex(index.base10_parse()?)) + } else if lookahead.peek(keyword::feeless_if) { + content.parse::()?; + let closure_content; + syn::parenthesized!(closure_content in content); + Ok(FunctionAttr::FeelessIf( + closure_content.span(), + closure_content.parse::().map_err(|e| { + let msg = "Invalid feeless_if attribute: expected a closure"; + let mut err = syn::Error::new(closure_content.span(), msg); + err.combine(e); + err + })?, + )) } else { Err(lookahead.error()) } @@ -138,28 +158,33 @@ impl syn::parse::Parse for ArgAttrIsCompact { } } -/// Check the syntax is `OriginFor` -pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg; +/// Check the syntax is `OriginFor` or `&OriginFor`. +pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::Result<()> { + pub struct CheckDispatchableFirstArg(bool); impl syn::parse::Parse for CheckDispatchableFirstArg { fn parse(input: syn::parse::ParseStream) -> syn::Result { + let is_ref = input.parse::().is_ok(); input.parse::()?; input.parse::()?; input.parse::()?; input.parse::]>()?; - Ok(Self) + Ok(Self(is_ref)) } } - syn::parse2::(ty.to_token_stream()).map_err(|e| { - let msg = "Invalid type: expected `OriginFor`"; - let mut err = syn::Error::new(ty.span(), msg); - err.combine(e); - err - })?; - - Ok(()) + let result = syn::parse2::(ty.to_token_stream()); + return match result { + Ok(CheckDispatchableFirstArg(has_ref)) if is_ref == has_ref => Ok(()), + _ => { + let msg = if is_ref { + "Invalid type: expected `&OriginFor`" + } else { + "Invalid type: expected `OriginFor`" + }; + return Err(syn::Error::new(ty.span(), msg)) + }, + } } impl CallDef { @@ -215,7 +240,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty, false)?; }, } @@ -227,16 +252,22 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) } - let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = - helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter().partition( - |attr| { - if let FunctionAttr::Weight(_) = attr { - true - } else { - false - } + let mut call_idx_attrs = vec![]; + let mut weight_attrs = vec![]; + let mut feeless_attrs = vec![]; + for attr in helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter() { + match attr { + FunctionAttr::CallIndex(_) => { + call_idx_attrs.push(attr); }, - ); + FunctionAttr::Weight(_) => { + weight_attrs.push(attr); + }, + FunctionAttr::FeelessIf(span, _) => { + feeless_attrs.push((span, attr)); + }, + } + } if weight_attrs.is_empty() && dev_mode { // inject a default O(1) weight when dev mode is enabled and no weight has @@ -323,6 +354,73 @@ impl CallDef { let docs = get_doc_literals(&method.attrs); + if feeless_attrs.len() > 1 { + let msg = "Invalid pallet::call, there can only be one feeless_if attribute"; + return Err(syn::Error::new(feeless_attrs[1].0, msg)) + } + let feeless_check: Option = + feeless_attrs.pop().map(|(_, attr)| match attr { + FunctionAttr::FeelessIf(_, closure) => closure, + _ => unreachable!("checked during creation of the let binding"), + }); + + if let Some(ref feeless_check) = feeless_check { + if feeless_check.inputs.len() != args.len() + 1 { + let msg = "Invalid pallet::call, feeless_if closure must have same \ + number of arguments as the dispatchable function"; + return Err(syn::Error::new(feeless_check.span(), msg)) + } + + match feeless_check.inputs.first() { + None => { + let msg = "Invalid pallet::call, feeless_if closure must have at least origin arg"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + Some(syn::Pat::Type(arg)) => { + check_dispatchable_first_arg_type(&arg.ty, true)?; + }, + _ => { + let msg = "Invalid pallet::call, feeless_if closure first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + } + + for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { + let feeless_arg_type = + if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)) + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + }; + + if feeless_arg_type != arg.2 { + let msg = + "Invalid pallet::call, feeless_if closure argument must have \ + a reference to the same type as the dispatchable function argument"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + } + } + + let valid_return = match &feeless_check.output { + syn::ReturnType::Type(_, type_) => match *(type_.clone()) { + syn::Type::Path(syn::TypePath { path, .. }) => path.is_ident("bool"), + _ => false, + }, + _ => false, + }; + if !valid_return { + let msg = "Invalid pallet::call, feeless_if closure must return `bool`"; + return Err(syn::Error::new(feeless_check.output.span(), msg)) + } + } + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, @@ -331,6 +429,7 @@ impl CallDef { args, docs, attrs: method.attrs.clone(), + feeless_check, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/procedural/src/pallet/parse/composite.rs b/substrate/frame/support/procedural/src/pallet/parse/composite.rs index cb554a116175c1f93aa2b7f5b4da447d6efb7741..6e6ea6a795c1403b64891c00507cf0f7d8dffc25 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/composite.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/composite.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use super::helper; use quote::ToTokens; use syn::spanned::Spanned; @@ -91,7 +92,7 @@ impl CompositeDef { pub fn try_from( attr_span: proc_macro2::Span, index: usize, - scrate: &proc_macro2::Ident, + scrate: &syn::Path, item: &mut syn::Item, ) -> syn::Result { let item = if let syn::Item::Enum(item) = item { @@ -108,6 +109,13 @@ impl CompositeDef { return Err(syn::Error::new(item.span(), msg)) } + let has_instance = if item.generics.params.first().is_some() { + helper::check_config_def_gen(&item.generics, item.ident.span())?; + true + } else { + false + }; + let has_derive_attr = item.attrs.iter().any(|attr| { if let syn::Meta::List(syn::MetaList { path, .. }) = &attr.meta { path.get_ident().map(|ident| ident == "derive").unwrap_or(false) @@ -119,7 +127,7 @@ impl CompositeDef { if !has_derive_attr { let derive_attr: syn::Attribute = syn::parse_quote! { #[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + Copy, Clone, Eq, PartialEq, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::MaxEncodedLen, #scrate::__private::scale_info::TypeInfo, #scrate::__private::RuntimeDebug, @@ -128,6 +136,20 @@ impl CompositeDef { item.attrs.push(derive_attr); } + if has_instance { + item.attrs.push(syn::parse_quote! { + #[scale_info(skip_type_params(I))] + }); + + item.variants.push(syn::parse_quote! { + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #scrate::__private::sp_std::marker::PhantomData, + ) + }); + } + let composite_keyword = syn::parse2::(item.ident.to_token_stream())?; diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index e505f8b04119bd685bb2c3dfd77521f349eb619f..fbab92db196cb33d90e88413d66e4ff75e8e3cd4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -16,7 +16,7 @@ // limitations under the License. use super::helper; -use frame_support_procedural_tools::get_doc_literals; +use frame_support_procedural_tools::{get_doc_literals, is_using_frame_crate}; use quote::ToTokens; use syn::{spanned::Spanned, token, Token}; @@ -165,24 +165,8 @@ pub struct PalletAttr { typ: PalletAttrType, } -pub struct ConfigBoundParse(syn::Ident); - -impl syn::parse::Parse for ConfigBoundParse { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let ident = input.parse::()?; - input.parse::()?; - input.parse::()?; - - if input.peek(syn::token::Lt) { - input.parse::()?; - } - - Ok(Self(ident)) - } -} - -/// Parse for `IsType<::RuntimeEvent>` and retrieve `$ident` -pub struct IsTypeBoundEventParse(syn::Ident); +/// Parse for `IsType<::RuntimeEvent>` and retrieve `$path` +pub struct IsTypeBoundEventParse(syn::Path); impl syn::parse::Parse for IsTypeBoundEventParse { fn parse(input: syn::parse::ParseStream) -> syn::Result { @@ -191,15 +175,13 @@ impl syn::parse::Parse for IsTypeBoundEventParse { input.parse::()?; input.parse::()?; input.parse::()?; - let ident = input.parse::()?; - input.parse::()?; - input.parse::()?; + let config_path = input.parse::()?; input.parse::]>()?; input.parse::()?; input.parse::()?; input.parse::]>()?; - Ok(Self(ident)) + Ok(Self(config_path)) } } @@ -237,7 +219,7 @@ impl syn::parse::Parse for FromEventParse { /// Check if trait_item is `type RuntimeEvent`, if so checks its bounds are those expected. /// (Event type is reserved type) fn check_event_type( - frame_system: &syn::Ident, + frame_system: &syn::Path, trait_item: &syn::TraitItem, trait_has_instance: bool, ) -> syn::Result { @@ -249,19 +231,16 @@ fn check_event_type( no generics nor where_clause"; return Err(syn::Error::new(trait_item.span(), msg)) } - // Check bound contains IsType and From + // Check bound contains IsType and From let has_is_type_bound = type_.bounds.iter().any(|s| { syn::parse2::(s.to_token_stream()) - .map_or(false, |b| b.0 == *frame_system) + .map_or(false, |b| has_expected_system_config(b.0, frame_system)) }); if !has_is_type_bound { - let msg = format!( - "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `IsType<::RuntimeEvent>`", - frame_system, - ); + let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `IsType<::RuntimeEvent>`".to_string(); return Err(syn::Error::new(type_.span(), msg)) } @@ -295,6 +274,43 @@ fn check_event_type( } } +/// Check that the path to `frame_system::Config` is valid, this is that the path is just +/// `frame_system::Config` or when using the `frame` crate it is `frame::xyz::frame_system::Config`. +fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool { + // Check if `frame_system` is actually 'frame_system'. + if path.segments.iter().all(|s| s.ident != "frame_system") { + return false + } + + let mut expected_system_config = + match (is_using_frame_crate(&path), is_using_frame_crate(&frame_system)) { + (true, false) => + // We can't use the path to `frame_system` from `frame` if `frame_system` is not being + // in scope through `frame`. + return false, + (false, true) => + // We know that the only valid frame_system path is one that is `frame_system`, as + // `frame` re-exports it as such. + syn::parse2::(quote::quote!(frame_system)).expect("is a valid path; qed"), + (_, _) => + // They are either both `frame_system` or both `frame::xyz::frame_system`. + frame_system.clone(), + }; + + expected_system_config + .segments + .push(syn::PathSegment::from(syn::Ident::new("Config", path.span()))); + + // the parse path might be something like `frame_system::Config<...>`, so we + // only compare the idents along the path. + expected_system_config + .segments + .into_iter() + .map(|ps| ps.ident) + .collect::>() == + path.segments.into_iter().map(|ps| ps.ident).collect::>() +} + /// Replace ident `Self` by `T` pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenStream { input @@ -311,7 +327,7 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS impl ConfigDef { pub fn try_from( - frame_system: &syn::Ident, + frame_system: &syn::Path, attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, @@ -352,8 +368,8 @@ impl ConfigDef { }; let has_frame_system_supertrait = item.supertraits.iter().any(|s| { - syn::parse2::(s.to_token_stream()) - .map_or(false, |b| b.0 == *frame_system) + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| has_expected_system_config(b, frame_system)) }); let mut has_event_type = false; @@ -461,7 +477,8 @@ impl ConfigDef { (try `pub trait Config: frame_system::Config {{ ...` or \ `pub trait Config: frame_system::Config {{ ...`). \ To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", - frame_system, found, + frame_system.to_token_stream(), + found, ); return Err(syn::Error::new(item.span(), msg)) } @@ -477,3 +494,97 @@ impl ConfigDef { }) } } + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn has_expected_system_config_works() { + let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_works_with_assoc_type() { + let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_works_with_frame() { + let frame_system = + syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_works_with_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_works_with_other_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame::xyz::frame_system::Config)).unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_does_not_works_with_mixed_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_does_not_works_with_other_mixed_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame::xyz::frame_system::Config)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_does_not_work_with_frame_full_path_if_not_frame_crate() { + let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_unexpected_frame_system() { + let frame_system = + syn::parse2::(quote::quote!(framez::deps::frame_system)).unwrap(); + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_unexpected_path() { + let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); + let path = syn::parse2::(quote::quote!(frame_system::ConfigSystem)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } + + #[test] + fn has_expected_system_config_not_frame_system() { + let frame_system = syn::parse2::(quote::quote!(something)).unwrap(); + let path = syn::parse2::(quote::quote!(something::Config)).unwrap(); + assert!(!has_expected_system_config(path, &frame_system)); + } +} diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 0f5e5f1136610e40226ba880a1b47512d2ee34a9..83a881751ef308a948e9fc69a684cd90862af145 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -37,7 +37,7 @@ pub mod type_value; pub mod validate_unsigned; use composite::{keyword::CompositeKeyword, CompositeDef}; -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use syn::spanned::Spanned; /// Parsed definition of a pallet. @@ -60,15 +60,15 @@ pub struct Def { pub extra_constants: Option, pub composites: Vec, pub type_values: Vec, - pub frame_system: syn::Ident, - pub frame_support: syn::Ident, + pub frame_system: syn::Path, + pub frame_support: syn::Path, pub dev_mode: bool, } impl Def { pub fn try_from(mut item: syn::ItemMod, dev_mode: bool) -> syn::Result { - let frame_system = generate_crate_access_2018("frame-system")?; - let frame_support = generate_crate_access_2018("frame-support")?; + let frame_system = generate_access_from_frame_or_crate("frame-system")?; + let frame_support = generate_access_from_frame_or_crate("frame-support")?; let item_span = item.span(); let items = &mut item diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index 3a0ec4747153a512ae642d810624e0487c7699a7..d1c7ba2e5e3c6788827577dfa843205d0be69174 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -151,7 +151,7 @@ pub enum QueryKind { /// `type MyStorage = StorageValue` /// The keys and values types are parsed in order to get metadata pub struct StorageDef { - /// The index of error item in pallet module. + /// The index of storage item in pallet module. pub index: usize, /// Visibility of the storage type. pub vis: syn::Visibility, diff --git a/substrate/frame/support/procedural/src/pallet_error.rs b/substrate/frame/support/procedural/src/pallet_error.rs index 7fd02240a628a23fdaee5ff84f89570aa1705fb2..693a1e9821c9728a7992a3d2d6741b855d2af867 100644 --- a/substrate/frame/support/procedural/src/pallet_error.rs +++ b/substrate/frame/support/procedural/src/pallet_error.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use quote::ToTokens; // Derive `PalletError` @@ -25,7 +25,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS Err(e) => return e.to_compile_error().into(), }; - let frame_support = match generate_crate_access_2018("frame-support") { + let frame_support = match generate_access_from_frame_or_crate("frame-support") { Ok(c) => c, Err(e) => return e.into_compile_error().into(), }; @@ -111,7 +111,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS fn generate_field_types( field: &syn::Field, - scrate: &syn::Ident, + scrate: &syn::Path, ) -> syn::Result> { let attrs = &field.attrs; @@ -143,7 +143,7 @@ fn generate_field_types( fn generate_variant_field_types( variant: &syn::Variant, - scrate: &syn::Ident, + scrate: &syn::Path, ) -> syn::Result>> { let attrs = &variant.attrs; diff --git a/substrate/frame/support/procedural/src/storage_alias.rs b/substrate/frame/support/procedural/src/storage_alias.rs index 4903fd1c129c7786b19226aacebbb27df7cc0b29..c0b4089a2748f0b8be6683e591a36fb496af16a4 100644 --- a/substrate/frame/support/procedural/src/storage_alias.rs +++ b/substrate/frame/support/procedural/src/storage_alias.rs @@ -18,7 +18,7 @@ //! Implementation of the `storage_alias` attribute macro. use crate::{counter_prefix, pallet::parse::helper}; -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; use syn::{ @@ -199,7 +199,7 @@ impl StorageType { /// Generate the actual type declaration. fn generate_type_declaration( &self, - crate_: &Ident, + crate_: &syn::Path, storage_instance: &StorageInstance, storage_name: &Ident, storage_generics: Option<&SimpleGenerics>, @@ -475,7 +475,7 @@ enum PrefixType { /// Implementation of the `storage_alias` attribute macro. pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> Result { let input = syn::parse2::(input)?; - let crate_ = generate_crate_access_2018("frame-support")?; + let crate_ = generate_access_from_frame_or_crate("frame-support")?; let prefix_type = if attributes.is_empty() { PrefixType::Compatibility @@ -527,7 +527,7 @@ struct StorageInstance { /// Generate the [`StorageInstance`] for the storage alias. fn generate_storage_instance( - crate_: &Ident, + crate_: &syn::Path, storage_name: &Ident, storage_generics: Option<&SimpleGenerics>, storage_where_clause: Option<&WhereClause>, diff --git a/substrate/frame/support/procedural/src/transactional.rs b/substrate/frame/support/procedural/src/transactional.rs index 23117ffa39c4e6bbd826a885fbaecff5df7f606b..e9d4f84b797a9d7da49639040e1c2658a9cedcdf 100644 --- a/substrate/frame/support/procedural/src/transactional.rs +++ b/substrate/frame/support/procedural/src/transactional.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support_procedural_tools::generate_crate_access_2018; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro::TokenStream; use quote::quote; use syn::{ItemFn, Result}; @@ -23,7 +23,7 @@ use syn::{ItemFn, Result}; pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018("frame-support")?; + let crate_ = generate_access_from_frame_or_crate("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { @@ -45,7 +45,7 @@ pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018("frame-support")?; + let crate_ = generate_access_from_frame_or_crate("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { diff --git a/substrate/frame/support/procedural/src/tt_macro.rs b/substrate/frame/support/procedural/src/tt_macro.rs index 01611f5dc4a4d054de423668af2b56a1b5532035..d3712742189f8cbf78ba2fcfcd266e8e89ab4ec5 100644 --- a/substrate/frame/support/procedural/src/tt_macro.rs +++ b/substrate/frame/support/procedural/src/tt_macro.rs @@ -18,7 +18,6 @@ //! Implementation of the `create_tt_return_macro` macro use crate::COUNTER; -use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro2::{Ident, TokenStream}; use quote::format_ident; @@ -65,9 +64,9 @@ impl syn::parse::Parse for CreateTtReturnMacroDef { /// macro_rules! my_tt_macro { /// { /// $caller:tt -/// $(frame_support = [{ $($frame_support:ident)::* }])? +/// $(your_tt_return = [{ $my_tt_return:path }])? /// } => { -/// frame_support::__private::tt_return! { +/// $my_tt_return! { /// $caller /// foo = [{ bar }] /// } @@ -78,10 +77,6 @@ pub fn create_tt_return_macro(input: proc_macro::TokenStream) -> proc_macro::Tok let CreateTtReturnMacroDef { name, args } = syn::parse_macro_input!(input as CreateTtReturnMacroDef); - let frame_support = match generate_crate_access_2018("frame-support") { - Ok(i) => i, - Err(e) => return e.into_compile_error().into(), - }; let (keys, values): (Vec<_>, Vec<_>) = args.into_iter().unzip(); let count = COUNTER.with(|counter| counter.borrow_mut().inc()); let unique_name = format_ident!("{}_{}", name, count); @@ -92,9 +87,9 @@ pub fn create_tt_return_macro(input: proc_macro::TokenStream) -> proc_macro::Tok macro_rules! #unique_name { { $caller:tt - $(frame_support = [{ $($frame_support:ident)::* }])? + $(your_tt_return = [{ $my_tt_macro:path }])? } => { - #frame_support::__private::tt_return! { + $my_tt_return! { $caller #( #keys = [{ #values }] diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index 541accc8ab96a3520d6bf9edd03434459ca29bd5..be43961336280673524071441632bbfa7f61de61 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -39,24 +39,44 @@ fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { /// Generates the access to the `frame-support` crate. pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - quote::quote!(frame_support) + let frame_support = match generate_access_from_frame_or_crate("frame-support") { + Ok(c) => c, + Err(e) => return e.into_compile_error().into(), + }; + quote::quote!(#frame_support) } else { let mod_name = generate_hidden_includes_mod_name(unique_id); quote::quote!( self::#mod_name::hidden_include ) } } +/// Check if a path is using the `frame` crate or not. +/// +/// This will usually check the output of [`generate_access_from_frame_or_crate`]. +/// We want to know if whatever the `path` takes us to, is exported from `frame` or not. In that +/// case `path` would start with `frame`, something like `frame::x::y:z`. +pub fn is_using_frame_crate(path: &syn::Path) -> bool { + path.segments.first().map(|s| s.ident == "frame").unwrap_or(false) +} + /// Generate the crate access for the crate using 2018 syntax. /// -/// for `frame-support` output will for example be `frame_support`. -pub fn generate_crate_access_2018(def_crate: &str) -> Result { - match crate_name(def_crate) { - Ok(FoundCrate::Itself) => { - let name = def_crate.to_string().replace("-", "_"); - Ok(syn::Ident::new(&name, Span::call_site())) - }, - Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())), - Err(e) => Err(Error::new(Span::call_site(), e)), +/// If `frame` is in scope, it will use `frame::deps::`. Else, it will try and find +/// `` directly. +pub fn generate_access_from_frame_or_crate(def_crate: &str) -> Result { + if let Some(path) = get_frame_crate_path(def_crate) { + Ok(path) + } else { + let ident = match crate_name(def_crate) { + Ok(FoundCrate::Itself) => { + let name = def_crate.to_string().replace("-", "_"); + Ok(syn::Ident::new(&name, Span::call_site())) + }, + Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())), + Err(e) => Err(Error::new(Span::call_site(), e)), + }?; + + Ok(syn::Path::from(ident)) } } @@ -64,21 +84,41 @@ pub fn generate_crate_access_2018(def_crate: &str) -> Result pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream { let mod_name = generate_hidden_includes_mod_name(unique_id); - match crate_name(def_crate) { - Ok(FoundCrate::Itself) => quote!(), - Ok(FoundCrate::Name(name)) => { - let name = Ident::new(&name, Span::call_site()); - quote::quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #name as hidden_include; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), e).to_compile_error(); - quote!( #err ) - }, + if let Some(path) = get_frame_crate_path(def_crate) { + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub use #path as hidden_include; + } + ) + } else { + match crate_name(def_crate) { + Ok(FoundCrate::Itself) => quote!(), + Ok(FoundCrate::Name(name)) => { + let name = Ident::new(&name, Span::call_site()); + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub use #name as hidden_include; + } + ) + }, + Err(e) => { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, + } + } +} + +/// Generates the path to the frame crate deps. +fn get_frame_crate_path(def_crate: &str) -> Option { + // This does not work if the frame crate is renamed. + if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + let path = format!("{}::deps::{}", name, def_crate.to_string().replace("-", "_")); + Some(syn::parse_str::(&path).expect("is a valid path; qed")) + } else { + None } } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 75e94827fea8a142d5038d4685ede440b7b02c5c..e57227f9b401ed92fcdf997ba4b36c7888aa4921 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -54,6 +54,20 @@ pub trait Callable { // https://github.com/rust-lang/rust/issues/51331 pub type CallableCallFor =
>::RuntimeCall; +/// Means to checks if the dispatchable is feeless. +/// +/// This is automatically implemented for all dispatchables during pallet expansion. +/// If a call is marked by [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) +/// attribute, the corresponding closure is checked. +pub trait CheckIfFeeless { + /// The Origin type of the runtime. + type Origin; + + /// Checks if the dispatchable satisfies the feeless condition as defined by + /// [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) + fn is_feeless(&self, origin: &Self::Origin) -> bool; +} + /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub enum RawOrigin { @@ -390,7 +404,7 @@ impl GetDispatchInfo } /// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[derive(Clone, Eq, PartialEq, Default, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { /// Value for `Normal` extrinsics. normal: T, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 8c4b3de49b5d5e6357a3864a9e5d8001c19ffebd..2ec3b24db0ce3b5f9cc9cfdfc6b599169737c6ea 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -30,6 +30,7 @@ #![cfg_attr(not(feature = "std"), no_std)] /// Export ourself as `frame_support` to make tests happy. +#[doc(hidden)] extern crate self as frame_support; /// Private exports that are being used by macros. @@ -45,11 +46,14 @@ pub mod __private { pub use serde; pub use sp_core::{OpaqueMetadata, Void}; pub use sp_core_hashing_proc_macro; - pub use sp_io::{self, storage::root as storage_root}; + pub use sp_inherents; + pub use sp_io::{self, hashing, storage::root as storage_root}; pub use sp_metadata_ir as metadata_ir; #[cfg(feature = "std")] pub use sp_runtime::{bounded_btree_map, bounded_vec}; - pub use sp_runtime::{traits::Dispatchable, RuntimeDebug, StateVersion}; + pub use sp_runtime::{ + traits::Dispatchable, DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, + }; #[cfg(feature = "std")] pub use sp_state_machine::BasicExternalities; pub use sp_std; @@ -779,6 +783,31 @@ macro_rules! assert_error_encoded_size { } => {}; } +/// Do something hypothetically by rolling back any changes afterwards. +/// +/// Returns the original result of the closure. +#[macro_export] +#[cfg(feature = "experimental")] +macro_rules! hypothetically { + ( $e:expr ) => { + $crate::storage::transactional::with_transaction(|| -> $crate::__private::TransactionOutcome> { + $crate::__private::TransactionOutcome::Rollback(Ok($e)) + }, + ).expect("Always returning Ok; qed") + }; +} + +/// Assert something to be *hypothetically* `Ok`, without actually committing it. +/// +/// Reverts any storage changes made by the closure. +#[macro_export] +#[cfg(feature = "experimental")] +macro_rules! hypothetically_ok { + ($e:expr $(, $args:expr)* $(,)?) => { + $crate::assert_ok!($crate::hypothetically!($e) $(, $args)*); + }; +} + #[doc(hidden)] pub use serde::{Deserialize, Serialize}; @@ -786,12 +815,6 @@ pub use serde::{Deserialize, Serialize}; #[cfg(not(no_std))] pub use macro_magic; -/// Private module re-exporting items used by frame support macros. -#[doc(hidden)] -pub mod _private { - pub use sp_inherents; -} - /// Prelude to be used for pallet testing, for ease of use. #[cfg(feature = "std")] pub mod testing_prelude { @@ -806,16 +829,20 @@ pub mod testing_prelude { /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { pub use crate::{ + defensive, defensive_assert, dispatch::{DispatchClass, DispatchResult, DispatchResultWithPostInfo, Parameter, Pays}, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, storage, storage::{ + bounded_btree_map::BoundedBTreeMap, + bounded_btree_set::BoundedBTreeSet, bounded_vec::BoundedVec, types::{ CountedStorageMap, CountedStorageNMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, StorageMap, StorageNMap, StorageValue, ValueQuery, }, + weak_bounded_vec::WeakBoundedVec, StorageList, }, traits::{ @@ -1646,8 +1673,7 @@ pub mod pallet_prelude { /// the enum: /// /// ```ignore -/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, -/// RuntimeDebug +/// Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug /// ``` /// /// The inverse is also true: if there are any #[derive] attributes present for the enum, then @@ -1815,6 +1841,15 @@ pub mod pallet_prelude { /// #[pallet::origin] /// pub struct Origin(PhantomData); /// +/// // Declare a hold reason (this is optional). +/// // +/// // Creates a hold reason for this pallet that is aggregated by `construct_runtime`. +/// // A similar enum can be defined for `FreezeReason`, `LockId` or `SlashReason`. +/// #[pallet::composite_enum] +/// pub enum HoldReason { +/// SomeHoldReason +/// } +/// /// // Declare validate_unsigned implementation (this is optional). /// #[pallet::validate_unsigned] /// impl ValidateUnsigned for Pallet { @@ -1949,6 +1984,11 @@ pub mod pallet_prelude { /// #[pallet::origin] /// pub struct Origin(PhantomData<(T, I)>); /// +/// #[pallet::composite_enum] +/// pub enum HoldReason { +/// SomeHoldReason +/// } +/// /// #[pallet::validate_unsigned] /// impl, I: 'static> ValidateUnsigned for Pallet { /// type Call = Call; @@ -2186,11 +2226,11 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - call_index, compact, composite_enum, config, constant, - disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, - generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, - origin, pallet_section, storage, storage_prefix, storage_version, type_value, unbounded, - validate_unsigned, weight, whitelist_storage, + call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, + event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + import_section, inherent, no_default, no_default_bounds, origin, pallet_section, + storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, + whitelist_storage, }; /// Allows you to define the genesis configuration for the pallet. @@ -2207,7 +2247,7 @@ pub mod pallet_macros { /// /// The fields of the `GenesisConfig` can in turn be populated by the chain-spec. /// - /// ## Example: + /// ## Example /// /// ``` /// #[frame_support::pallet] @@ -2262,6 +2302,230 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::genesis_build; + + /// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded + /// by [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) + /// into metadata. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// use frame_support::pallet_prelude::*; + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[pallet::config] + /// pub trait Config: frame_system::Config { + /// /// This is like a normal `Get` trait, but it will be added into metadata. + /// #[pallet::constant] + /// type Foo: Get; + /// } + /// } + /// ``` + pub use frame_support_procedural::constant; + + /// Declares a type alias as a storage item. Storage items are pointers to data stored + /// on-chain (the *blockchain state*), under a specific key. The exact key is dependent on + /// the type of the storage. + /// + /// > From the perspective of this pallet, the entire blockchain state is abstracted behind + /// > a key-value api, namely [`sp_io::storage`]. + /// + /// ## Storage Types + /// + /// The following storage types are supported by the `#[storage]` macro. For specific + /// information about each storage type, refer to the documentation of the respective type. + /// + /// * [`StorageValue`](crate::storage::types::StorageValue) + /// * [`StorageMap`](crate::storage::types::StorageMap) + /// * [`CountedStorageMap`](crate::storage::types::CountedStorageMap) + /// * [`StorageDoubleMap`](crate::storage::types::StorageDoubleMap) + /// * [`StorageNMap`](crate::storage::types::StorageNMap) + /// * [`CountedStorageNMap`](crate::storage::types::CountedStorageNMap) + /// + /// ## Storage Type Usage + /// + /// The following details are relevant to all of the aforementioned storage types. + /// Depending on the exact storage type, it may require the following generic parameters: + /// + /// * [`Prefix`](#prefixes) - Used to give the storage item a unique key in the underlying + /// storage. + /// * `Key` - Type of the keys used to store the values, + /// * `Value` - Type of the value being stored, + /// * [`Hasher`](#hashers) - Used to ensure the keys of a map are uniformly distributed, + /// * [`QueryKind`](#querykind) - Used to configure how to handle queries to the underlying + /// storage, + /// * `OnEmpty` - Used to handle missing values when querying the underlying storage, + /// * `MaxValues` - _not currently used_. + /// + /// Each `Key` type requires its own designated `Hasher` declaration, so that + /// [`StorageDoubleMap`](frame_support::storage::types::StorageDoubleMap) needs two of + /// each, and [`StorageNMap`](frame_support::storage::types::StorageNMap) needs `N` such + /// pairs. Since [`StorageValue`](frame_support::storage::types::StorageValue) only stores + /// a single element, no configuration of hashers is needed. + /// + /// ### Syntax + /// + /// Two general syntaxes are supported, as demonstrated below: + /// + /// 1. Named type parameters, e.g., `type Foo = StorageValue`. + /// 2. Positional type parameters, e.g., `type Foo = StorageValue<_, u32>`. + /// + /// In both instances, declaring the generic parameter `` is mandatory. Optionally, it + /// can also be explicitly declared as ``. In the compiled code, `T` will + /// automatically include the trait bound `Config`. + /// + /// Note that in positional syntax, the first generic type parameter must be `_`. + /// + /// #### Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// Positional syntax, without bounding `T`. + /// #[pallet::storage] + /// pub type Foo = StorageValue<_, u32>; + /// + /// /// Positional syntax, with bounding `T`. + /// #[pallet::storage] + /// pub type Bar = StorageValue<_, u32>; + /// + /// /// Named syntax. + /// #[pallet::storage] + /// pub type Baz = StorageMap; + /// } + /// ``` + /// + /// ### QueryKind + /// + /// Every storage type mentioned above has a generic type called + /// [`QueryKind`](frame_support::storage::types::QueryKindTrait) that determines its + /// "query" type. This refers to the kind of value returned when querying the storage, for + /// instance, through a `::get()` method. + /// + /// There are three types of queries: + /// + /// 1. [`OptionQuery`](frame_support::storage::types::OptionQuery): The default query type. + /// It returns `Some(V)` if the value is present, or `None` if it isn't, where `V` is + /// the value type. + /// 2. [`ValueQuery`](frame_support::storage::types::ValueQuery): Returns the value itself + /// if present; otherwise, it returns `Default::default()`. This behavior can be + /// adjusted with the `OnEmpty` generic parameter, which defaults to `OnEmpty = + /// GetDefault`. + /// 3. [`ResultQuery`](frame_support::storage::types::ResultQuery): Returns `Result`, + /// where `V` is the value type. + /// + /// See [`QueryKind`](frame_support::storage::types::QueryKindTrait) for further examples. + /// + /// ### Optimized Appending + /// + /// All storage items — such as + /// [`StorageValue`](frame_support::storage::types::StorageValue), + /// [`StorageMap`](frame_support::storage::types::StorageMap), and their variants—offer an + /// `::append()` method optimized for collections. Using this method avoids the + /// inefficiency of decoding and re-encoding entire collections when adding items. For + /// instance, consider the storage declaration `type MyVal = StorageValue<_, Vec, + /// ValueQuery>`. With `MyVal` storing a large list of bytes, `::append()` lets you + /// directly add bytes to the end in storage without processing the full list. Depending on + /// the storage type, additional key specifications may be needed. + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_append)] + /// Similarly, there also exists a `::try_append()` method, which can be used when handling + /// types where an append operation might fail, such as a + /// [`BoundedVec`](frame_support::BoundedVec). + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_try_append)] + /// ### Optimized Length Decoding + /// + /// All storage items — such as + /// [`StorageValue`](frame_support::storage::types::StorageValue), + /// [`StorageMap`](frame_support::storage::types::StorageMap), and their counterparts — + /// incorporate the `::decode_len()` method. This method allows for efficient retrieval of + /// a collection's length without the necessity of decoding the entire dataset. + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_decode_len)] + /// ### Hashers + /// + /// For all storage types, except + /// [`StorageValue`](frame_support::storage::types::StorageValue), a set of hashers needs + /// to be specified. The choice of hashers is crucial, especially in production chains. The + /// purpose of storage hashers in maps is to ensure the keys of a map are + /// uniformly distributed. An unbalanced map/trie can lead to inefficient performance. + /// + /// In general, hashers are categorized as either cryptographically secure or not. The + /// former is slower than the latter. `Blake2` and `Twox` serve as examples of each, + /// respectively. + /// + /// As a rule of thumb: + /// + /// 1. If the map keys are not controlled by end users, or are cryptographically secure by + /// definition (e.g., `AccountId`), then the use of cryptographically secure hashers is NOT + /// required. + /// 2. If the map keys are controllable by the end users, cryptographically secure hashers + /// should be used. + /// + /// For more information, look at the types that implement + /// [`frame_support::StorageHasher`](frame_support::StorageHasher). + /// + /// Lastly, it's recommended for hashers with "concat" to have reversible hashes. Refer to + /// the implementors section of + /// [`hash::ReversibleStorageHasher`](frame_support::hash::ReversibleStorageHasher). + /// + /// ### Prefixes + /// + /// Internally, every storage type generates a "prefix". This prefix serves as the initial + /// segment of the key utilized to store values in the on-chain state (i.e., the final key + /// used in [`sp_io::storage`](sp_io::storage)). For all storage types, the following rule + /// applies: + /// + /// > The storage prefix begins with `twox128(pallet_prefix) ++ twox128(STORAGE_PREFIX)`, + /// > where + /// > `pallet_prefix` is the name assigned to the pallet instance in + /// > [`frame_support::construct_runtime`](frame_support::construct_runtime), and + /// > `STORAGE_PREFIX` is the name of the `type` aliased to a particular storage type, such + /// > as + /// > `Foo` in `type Foo = StorageValue<..>`. + /// + /// For [`StorageValue`](frame_support::storage::types::StorageValue), no additional key is + /// required. For map types, the prefix is extended with one or more keys defined by the + /// map. + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_map_prefixes)] + /// ## Related Macros + /// + /// The following attribute macros can be used in conjunction with the `#[storage]` macro: + /// + /// * [`macro@getter`]: Creates a custom getter function. + /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. + /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// + /// #### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// A kitchen-sink StorageValue, with all possible additional attributes. + /// #[pallet::storage] + /// #[pallet::getter(fn foo)] + /// #[pallet::storage_prefix = "OtherFoo"] + /// #[pallet::unbounded] + /// pub type Foo = StorageValue<_, u32, ValueQuery>; + /// } + /// ``` + pub use frame_support_procedural::storage; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] @@ -2278,3 +2542,98 @@ sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); // Helper for implementing GenesisBuilder runtime API pub mod genesis_builder_helper; + +#[cfg(test)] +mod test { + // use super::*; + use crate::{ + hash::*, + storage::types::{StorageMap, StorageValue, ValueQuery}, + traits::{ConstU32, StorageInstance}, + BoundedVec, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct Prefix1; + impl StorageInstance for Prefix1 { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "MyVal"; + } + struct Prefix2; + impl StorageInstance for Prefix2 { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "MyMap"; + } + + #[docify::export] + #[test] + pub fn example_storage_value_try_append() { + type MyVal = StorageValue>, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(BoundedVec::try_from(vec![42, 43]).unwrap()); + assert_eq!(MyVal::get(), vec![42, 43]); + // Try to append a single u32 to BoundedVec stored in `MyVal` + assert_ok!(MyVal::try_append(40)); + assert_eq!(MyVal::get(), vec![42, 43, 40]); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_append() { + type MyVal = StorageValue, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(vec![42, 43]); + assert_eq!(MyVal::get(), vec![42, 43]); + // Append a single u32 to Vec stored in `MyVal` + MyVal::append(40); + assert_eq!(MyVal::get(), vec![42, 43, 40]); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_decode_len() { + type MyVal = StorageValue>, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(BoundedVec::try_from(vec![42, 43]).unwrap()); + assert_eq!(MyVal::decode_len().unwrap(), 2); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_map_prefixes() { + type MyVal = StorageValue; + type MyMap = StorageMap; + TestExternalities::default().execute_with(|| { + // This example assumes `pallet_prefix` to be "test" + // Get storage key for `MyVal` StorageValue + assert_eq!( + MyVal::hashed_key().to_vec(), + [twox_128(b"test"), twox_128(b"MyVal")].concat() + ); + // Get storage key for `MyMap` StorageMap and `key` = 1 + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"MyMap")); + k.extend(&1u16.blake2_128_concat()); + assert_eq!(MyMap::hashed_key_for(1).to_vec(), k); + }); + } +} diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 8f490030c25b2b53b2c7ec6b3b3debdb61e7b5e8..a9eb460421f18af1f4b5b5b94fd05cd2984fa9ad 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -51,7 +51,7 @@ use sp_std::marker::PhantomData; /// // OnRuntimeUpgrade implementation... /// } /// -/// pub type VersionCheckedMigrateV5ToV6 = +/// pub type MigrateV5ToV6 = /// VersionedMigration< /// 5, /// 6, @@ -63,7 +63,7 @@ use sp_std::marker::PhantomData; /// // Migrations tuple to pass to the Executive pallet: /// pub type Migrations = ( /// // other migrations... -/// VersionCheckedMigrateV5ToV6, +/// MigrateV5ToV6, /// // other migrations... /// ); /// ``` @@ -119,7 +119,7 @@ impl< let on_chain_version = Pallet::on_chain_storage_version(); if on_chain_version == FROM { log::info!( - "Running {} VersionedOnRuntimeUpgrade: version {:?} to {:?}.", + "🚚 Pallet {:?} VersionedMigration migrating storage version from {:?} to {:?}.", Pallet::name(), FROM, TO @@ -134,9 +134,10 @@ impl< weight.saturating_add(DbWeight::get().reads_writes(1, 1)) } else { log::warn!( - "{} VersionedOnRuntimeUpgrade for version {:?} skipped because current on-chain version is {:?}.", + "🚚 Pallet {:?} VersionedMigration migration {}->{} can be removed; on-chain is already at {:?}.", Pallet::name(), FROM, + TO, on_chain_version ); DbWeight::get().reads(1) diff --git a/substrate/frame/support/src/storage/bounded_btree_map.rs b/substrate/frame/support/src/storage/bounded_btree_map.rs index f2f32d890b87bca062a4e4f3aec31b7fa3b94127..91196be9e802c2eb9016abbcf86bfc0fb7fcb934 100644 --- a/substrate/frame/support/src/storage/bounded_btree_map.rs +++ b/substrate/frame/support/src/storage/bounded_btree_map.rs @@ -79,5 +79,16 @@ pub mod test { assert!(FooDoubleMap::decode_len(1, 2).is_none()); assert!(FooDoubleMap::decode_len(2, 2).is_none()); }); + + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::>(&[1, 2, 3]); + FooDoubleMap::insert(1, 1, bounded.clone()); + FooDoubleMap::insert(2, 2, bounded); // duplicate value + + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert_eq!(FooDoubleMap::decode_len(2, 2).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + }); } } diff --git a/substrate/frame/support/src/storage/bounded_btree_set.rs b/substrate/frame/support/src/storage/bounded_btree_set.rs index 52be1bb99f1015dc63ecd0e5d0bfe343f405e1f2..cf801eb47874f048966aab8adb8a8b3eef5c14f6 100644 --- a/substrate/frame/support/src/storage/bounded_btree_set.rs +++ b/substrate/frame/support/src/storage/bounded_btree_set.rs @@ -17,10 +17,10 @@ //! Traits, types and structs to support a bounded `BTreeSet`. -use crate::storage::StorageDecodeLength; +use frame_support::storage::StorageDecodeNonDedupLength; pub use sp_runtime::BoundedBTreeSet; -impl StorageDecodeLength for BoundedBTreeSet {} +impl StorageDecodeNonDedupLength for BoundedBTreeSet {} #[cfg(test)] pub mod test { @@ -56,28 +56,28 @@ pub mod test { } #[test] - fn decode_len_works() { + fn decode_non_dedup_len_works() { TestExternalities::default().execute_with(|| { let bounded = boundedset_from_keys::>(&[1, 2, 3]); Foo::put(bounded); - assert_eq!(Foo::decode_len().unwrap(), 3); + assert_eq!(Foo::decode_non_dedup_len().unwrap(), 3); }); TestExternalities::default().execute_with(|| { let bounded = boundedset_from_keys::>(&[1, 2, 3]); FooMap::insert(1, bounded); - assert_eq!(FooMap::decode_len(1).unwrap(), 3); - assert!(FooMap::decode_len(0).is_none()); - assert!(FooMap::decode_len(2).is_none()); + assert_eq!(FooMap::decode_non_dedup_len(1).unwrap(), 3); + assert!(FooMap::decode_non_dedup_len(0).is_none()); + assert!(FooMap::decode_non_dedup_len(2).is_none()); }); TestExternalities::default().execute_with(|| { let bounded = boundedset_from_keys::>(&[1, 2, 3]); FooDoubleMap::insert(1, 1, bounded); - assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); - assert!(FooDoubleMap::decode_len(2, 1).is_none()); - assert!(FooDoubleMap::decode_len(1, 2).is_none()); - assert!(FooDoubleMap::decode_len(2, 2).is_none()); + assert_eq!(FooDoubleMap::decode_non_dedup_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_non_dedup_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_non_dedup_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_non_dedup_len(2, 2).is_none()); }); } } diff --git a/substrate/frame/support/src/storage/generator/mod.rs b/substrate/frame/support/src/storage/generator/mod.rs index bac9f642e37d6d5c01f7ed6cc1301d30e90bd6b9..2b2abdc2e830923c5171703f1dc9173b50726cbc 100644 --- a/substrate/frame/support/src/storage/generator/mod.rs +++ b/substrate/frame/support/src/storage/generator/mod.rs @@ -24,10 +24,10 @@ //! //! This is internal api and is subject to change. -mod double_map; +pub(crate) mod double_map; pub(crate) mod map; -mod nmap; -mod value; +pub(crate) mod nmap; +pub(crate) mod value; pub use double_map::StorageDoubleMap; pub use map::StorageMap; diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 851b0687bd12268019f64987bbb92ac2a3231ccb..7f39a3fdad85e98d4a2cf1d328b55e33f7846438 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -167,6 +167,32 @@ pub trait StorageValue { { T::decode_len(&Self::hashed_key()) } + + /// Read the length of the storage value without decoding the entire value. + /// + /// `T` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This + /// means that any duplicate items are included. + /// + /// - `None` does not mean that `get()` does not return a value. The default value is completely + /// ignored by this function. + /// + /// # Example + #[doc = docify::embed!("src/storage/mod.rs", btree_set_decode_non_dedup_len)] + /// This demonstrates how `decode_non_dedup_len` will count even the duplicate values in the + /// storage (in this case, the number `4` is counted twice). + fn decode_non_dedup_len() -> Option + where + T: StorageDecodeNonDedupLength, + { + T::decode_non_dedup_len(&Self::hashed_key()) + } } /// A non-continuous container type. @@ -346,6 +372,27 @@ pub trait StorageMap { V::decode_len(&Self::hashed_key_for(key)) } + /// Read the length of the storage value without decoding the entire value. + /// + /// `V` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + /// + /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This + /// means that any duplicate items are included. + fn decode_non_dedup_len>(key: KeyArg) -> Option + where + V: StorageDecodeNonDedupLength, + { + V::decode_non_dedup_len(&Self::hashed_key_for(key)) + } + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. /// /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. @@ -741,6 +788,27 @@ pub trait StorageDoubleMap { V::decode_len(&Self::hashed_key_for(key1, key2)) } + /// Read the length of the storage value without decoding the entire value under the + /// given `key1` and `key2`. + /// + /// `V` is required to implement [`StorageDecodeNonDedupLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + fn decode_non_dedup_len(key1: KArg1, key2: KArg2) -> Option + where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: StorageDecodeNonDedupLength, + { + V::decode_non_dedup_len(&Self::hashed_key_for(key1, key2)) + } + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and /// `OldHasher2` to the current hashers. /// @@ -1400,8 +1468,7 @@ pub trait StoragePrefixedMap { /// This trait is sealed. pub trait StorageAppend: private::Sealed {} -/// Marker trait that will be implemented for types that support to decode their length in an -/// efficient way. It is expected that the length is at the beginning of the encoded object +/// It is expected that the length is at the beginning of the encoded object /// and that the length is a `Compact`. /// /// This trait is sealed. @@ -1421,6 +1488,29 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { } } +/// It is expected that the length is at the beginning of the encoded objectand that the length is a +/// `Compact`. +/// +/// # Note +/// The length returned by this trait is not deduplicated, i.e. it is the length of the underlying +/// stored Vec. +/// +/// This trait is sealed. +pub trait StorageDecodeNonDedupLength: private::Sealed + codec::DecodeLength { + /// Decode the length of the storage value at `key`. + /// + /// This function assumes that the length is at the beginning of the encoded object and is a + /// `Compact`. + /// + /// Returns `None` if the storage value does not exist or the decoding failed. + fn decode_non_dedup_len(key: &[u8]) -> Option { + let mut data = [0u8; 5]; + let len = sp_io::storage::read(key, &mut data, 0)?; + let len = data.len().min(len as usize); + ::len(&data[..len]).ok() + } +} + /// Provides `Sealed` trait to prevent implementing trait `StorageAppend` & `StorageDecodeLength` /// & `EncodeLikeTuple` outside of this crate. mod private { @@ -1471,7 +1561,14 @@ impl StorageAppend for Vec {} impl StorageDecodeLength for Vec {} impl StorageAppend for BTreeSet {} -impl StorageDecodeLength for BTreeSet {} +impl StorageDecodeNonDedupLength for BTreeSet {} + +// Blanket implementation StorageDecodeNonDedupLength for all types that are StorageDecodeLength. +impl StorageDecodeNonDedupLength for T { + fn decode_non_dedup_len(key: &[u8]) -> Option { + T::decode_len(key) + } +} /// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the /// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` @@ -2026,7 +2123,24 @@ mod test { FooSet::append(6); FooSet::append(7); - assert_eq!(FooSet::decode_len().unwrap(), 7); + assert_eq!(FooSet::decode_non_dedup_len().unwrap(), 7); + }); + } + + #[docify::export] + #[test] + fn btree_set_decode_non_dedup_len() { + #[crate::storage_alias] + type Store = StorageValue>; + + TestExternalities::default().execute_with(|| { + Store::append(4); + Store::append(4); // duplicate value + Store::append(5); + + let length_with_dup_items = 3; + + assert_eq!(Store::decode_non_dedup_len().unwrap(), length_with_dup_items); }); } } diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 50e2c678248c9aa34dddeaaec09b6ddeec0be348..04e69751c16a00e4b89fdf5907bec5be64305014 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -35,8 +35,8 @@ use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; use sp_std::prelude::*; -/// A wrapper around a `StorageMap` and a `StorageValue` to keep track of how many items -/// are in a map, without needing to iterate all the values. +/// A wrapper around a [`StorageMap`] and a [`StorageValue`] (with the value being `u32`) to keep +/// track of how many items are in a map, without needing to iterate all the values. /// /// This storage item has additional storage read and write overhead when manipulating values /// compared to a regular storage map. @@ -47,6 +47,51 @@ use sp_std::prelude::*; /// /// Whenever the counter needs to be updated, an additional read and write occurs to update that /// counter. +/// +/// The total number of items currently stored in the map can be retrieved with the +/// [`CountedStorageMap::count`] method. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Examples +/// +/// Declaring a counted map: +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink CountedStorageMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = CountedStorageMap< +/// _, +/// Blake2_128Concat, +/// u32, +/// u32, +/// ValueQuery, +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = CountedStorageMap< +/// Hasher = Blake2_128Concat, +/// Key = u32, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } +/// ``` +/// +/// Using a counted map in action: +#[doc = docify::embed!("src/storage/types/counted_map.rs", test_simple_count_works)] pub struct CountedStorageMap< Prefix, Hasher, @@ -74,7 +119,11 @@ impl MapWrapper type Map = StorageMap; } -type CounterFor